summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVishvananda Ishaya <vishvananda@gmail.com>2011-08-17 19:06:21 -0700
committerVishvananda Ishaya <vishvananda@gmail.com>2011-08-17 19:06:21 -0700
commitdd749989dd30fdc49af7e4af453e91faf3914b75 (patch)
tree188081c77496e1897120f7263a17737bb2929c5d
parent93207c19c72aff5eb2c99b0b42649a75def35cf0 (diff)
parentaca07a42fabb7f506cf132b995b4ad0139987b02 (diff)
downloadnova-dd749989dd30fdc49af7e4af453e91faf3914b75.tar.gz
nova-dd749989dd30fdc49af7e4af453e91faf3914b75.tar.xz
nova-dd749989dd30fdc49af7e4af453e91faf3914b75.zip
merged trunk
-rw-r--r--Authors3
-rwxr-xr-xbin/clear_rabbit_queues73
-rwxr-xr-xbin/nova-dhcpbridge29
-rwxr-xr-xbin/nova-manage62
-rw-r--r--nova/api/ec2/__init__.py4
-rw-r--r--nova/api/ec2/cloud.py2
-rw-r--r--nova/api/openstack/common.py15
-rw-r--r--nova/api/openstack/contrib/floating_ips.py13
-rw-r--r--nova/api/openstack/contrib/quotas.py100
-rw-r--r--nova/api/openstack/create_instance_helper.py4
-rw-r--r--nova/api/openstack/servers.py29
-rw-r--r--nova/api/openstack/views/servers.py8
-rw-r--r--nova/compute/api.py14
-rw-r--r--nova/compute/manager.py193
-rw-r--r--nova/db/api.py21
-rw-r--r--nova/db/sqlalchemy/api.py66
-rw-r--r--nova/db/sqlalchemy/models.py21
-rw-r--r--nova/db/sqlalchemy/session.py90
-rw-r--r--nova/exception.py13
-rw-r--r--nova/flags.py7
-rw-r--r--nova/image/s3.py26
-rw-r--r--nova/network/linux_net.py358
-rw-r--r--nova/network/manager.py164
-rw-r--r--nova/rpc/amqp.py8
-rw-r--r--nova/scheduler/abstract_scheduler.py (renamed from nova/scheduler/zone_aware_scheduler.py)32
-rw-r--r--nova/scheduler/driver.py157
-rw-r--r--nova/scheduler/host_filter.py51
-rw-r--r--nova/scheduler/least_cost.py7
-rw-r--r--nova/scheduler/manager.py63
-rw-r--r--nova/scheduler/multi.py73
-rw-r--r--nova/tests/api/openstack/contrib/test_floating_ips.py18
-rw-r--r--nova/tests/api/openstack/contrib/test_keypairs.py19
-rw-r--r--nova/tests/api/openstack/contrib/test_quotas.py152
-rw-r--r--nova/tests/api/openstack/test_common.py4
-rw-r--r--nova/tests/api/openstack/test_extensions.py122
-rw-r--r--nova/tests/api/openstack/test_limits.py178
-rw-r--r--nova/tests/api/openstack/test_server_actions.py20
-rw-r--r--nova/tests/api/openstack/test_servers.py48
-rw-r--r--nova/tests/glance/stubs.py6
-rw-r--r--nova/tests/integrated/test_servers.py5
-rw-r--r--nova/tests/scheduler/test_abstract_scheduler.py (renamed from nova/tests/scheduler/test_zone_aware_scheduler.py)40
-rw-r--r--nova/tests/scheduler/test_least_cost_scheduler.py4
-rw-r--r--nova/tests/scheduler/test_scheduler.py83
-rw-r--r--nova/tests/test_compute.py97
-rw-r--r--nova/tests/test_libvirt.py236
-rw-r--r--nova/tests/test_metadata.py2
-rw-r--r--nova/tests/test_network.py198
-rw-r--r--nova/tests/test_xenapi.py5
-rw-r--r--nova/tests/xenapi/stubs.py4
-rw-r--r--nova/virt/driver.py2
-rw-r--r--nova/virt/fake.py6
-rw-r--r--nova/virt/libvirt/connection.py230
-rw-r--r--nova/virt/libvirt/firewall.py123
-rw-r--r--nova/virt/libvirt/netutils.py67
-rw-r--r--nova/virt/libvirt/vif.py8
-rw-r--r--nova/virt/xenapi/fake.py21
-rw-r--r--nova/virt/xenapi/vm_utils.py184
-rw-r--r--nova/virt/xenapi/vmops.py62
-rw-r--r--nova/virt/xenapi_conn.py4
-rw-r--r--smoketests/openwrt-x86-ext2.imagebin4612608 -> 0 bytes
-rw-r--r--smoketests/openwrt-x86-vmlinuzbin1169948 -> 0 bytes
-rw-r--r--smoketests/random.imagebin0 -> 65536 bytes
-rw-r--r--smoketests/random.kernelbin0 -> 16384 bytes
-rw-r--r--smoketests/test_netadmin.py19
-rw-r--r--smoketests/test_sysadmin.py4
65 files changed, 2650 insertions, 1027 deletions
diff --git a/Authors b/Authors
index e639cbf76..02fe46c79 100644
--- a/Authors
+++ b/Authors
@@ -27,6 +27,7 @@ David Pravec <David.Pravec@danix.org>
Dean Troyer <dtroyer@gmail.com>
Devendra Modium <dmodium@isi.edu>
Devin Carlen <devin.carlen@gmail.com>
+Donal Lafferty <donal.lafferty@citrix.com>
Ed Leafe <ed@leafe.com>
Eldar Nugaev <reldan@oscloud.ru>
Eric Day <eday@oddments.org>
@@ -58,6 +59,7 @@ Joshua McKenty <jmckenty@gmail.com>
Justin Santa Barbara <justin@fathomdb.com>
Justin Shepherd <jshepher@rackspace.com>
Kei Masumoto <masumotok@nttdata.co.jp>
+masumoto<masumotok@nttdata.co.jp>
Ken Pepple <ken.pepple@gmail.com>
Kevin Bringard <kbringard@attinteractive.com>
Kevin L. Mitchell <kevin.mitchell@rackspace.com>
@@ -103,6 +105,7 @@ Tushar Patil <tushar.vitthal.patil@gmail.com>
Vasiliy Shlykov <vash@vasiliyshlykov.org>
Vishvananda Ishaya <vishvananda@gmail.com>
Vivek Y S <vivek.ys@gmail.com>
+Vladimir Popovski <vladimir@zadarastorage.com>
William Wolf <throughnothing@gmail.com>
Yoshiaki Tamura <yoshi@midokura.jp>
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
diff --git a/bin/clear_rabbit_queues b/bin/clear_rabbit_queues
new file mode 100755
index 000000000..7a000e5d8
--- /dev/null
+++ b/bin/clear_rabbit_queues
@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Admin/debug script to wipe rabbitMQ (AMQP) queues nova uses.
+ This can be used if you need to change durable options on queues,
+ or to wipe all messages in the queue system if things are in a
+ serious bad way.
+
+"""
+
+import datetime
+import gettext
+import os
+import sys
+import time
+
+# If ../nova/__init__.py exists, add ../ to Python search path, so that
+# it will override what happens to be installed in /usr/(local/)lib/python...
+POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir))
+if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
+ sys.path.insert(0, POSSIBLE_TOPDIR)
+
+gettext.install('nova', unicode=1)
+
+
+from nova import context
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import rpc
+from nova import utils
+
+
+FLAGS = flags.FLAGS
+flags.DEFINE_boolean('delete_exchange', False, 'delete nova exchange too.')
+
+
+def delete_exchange(exch):
+ conn = rpc.create_connection()
+ x = conn.get_channel()
+ x.exchange_delete(exch)
+
+
+def delete_queues(queues):
+ conn = rpc.create_connection()
+ x = conn.get_channel()
+ for q in queues:
+ x.queue_delete(q)
+
+if __name__ == '__main__':
+ utils.default_flagfile()
+ args = flags.FLAGS(sys.argv)
+ logging.setup()
+ delete_queues(args[1:])
+ if FLAGS.delete_exchange:
+ delete_exchange(FLAGS.control_exchange)
diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge
index 325642d52..a47ea7a76 100755
--- a/bin/nova-dhcpbridge
+++ b/bin/nova-dhcpbridge
@@ -48,12 +48,11 @@ flags.DECLARE('auth_driver', 'nova.auth.manager')
flags.DECLARE('network_size', 'nova.network.manager')
flags.DECLARE('num_networks', 'nova.network.manager')
flags.DECLARE('update_dhcp_on_disassociate', 'nova.network.manager')
-flags.DEFINE_string('dnsmasq_interface', 'br0', 'Default Dnsmasq interface')
LOG = logging.getLogger('nova.dhcpbridge')
-def add_lease(mac, ip_address, _hostname, _interface):
+def add_lease(mac, ip_address, _interface):
"""Set the IP that was assigned by the DHCP server."""
if FLAGS.fake_rabbit:
LOG.debug(_("leasing ip"))
@@ -67,13 +66,13 @@ def add_lease(mac, ip_address, _hostname, _interface):
"args": {"address": ip_address}})
-def old_lease(mac, ip_address, hostname, interface):
+def old_lease(mac, ip_address, interface):
"""Update just as add lease."""
- LOG.debug(_("Adopted old lease or got a change of mac/hostname"))
- add_lease(mac, ip_address, hostname, interface)
+ LOG.debug(_("Adopted old lease or got a change of mac"))
+ add_lease(mac, ip_address, interface)
-def del_lease(mac, ip_address, _hostname, _interface):
+def del_lease(mac, ip_address, _interface):
"""Called when a lease expires."""
if FLAGS.fake_rabbit:
LOG.debug(_("releasing ip"))
@@ -87,10 +86,10 @@ def del_lease(mac, ip_address, _hostname, _interface):
"args": {"address": ip_address}})
-def init_leases(interface):
- """Get the list of hosts for an interface."""
+def init_leases(network_id):
+ """Get the list of hosts for a network."""
ctxt = context.get_admin_context()
- network_ref = db.network_get_by_bridge(ctxt, interface)
+ network_ref = db.network_get(ctxt, network_id)
return linux_net.get_dhcp_leases(ctxt, network_ref)
@@ -101,7 +100,8 @@ def main():
argv = FLAGS(sys.argv)
logging.setup()
# check ENV first so we don't break any older deploys
- interface = os.environ.get('DNSMASQ_INTERFACE', FLAGS.dnsmasq_interface)
+ network_id = int(os.environ.get('NETWORK_ID'))
+
if int(os.environ.get('TESTING', '0')):
from nova.tests import fake_flags
@@ -115,13 +115,12 @@ def main():
if action in ['add', 'del', 'old']:
mac = argv[2]
ip = argv[3]
- hostname = argv[4]
- msg = _("Called %(action)s for mac %(mac)s with ip %(ip)s and"
- " hostname %(hostname)s on interface %(interface)s") % locals()
+ msg = _("Called %(action)s for mac %(mac)s with ip %(ip)s"
+ " on interface %(interface)s") % locals()
LOG.debug(msg)
- globals()[action + '_lease'](mac, ip, hostname, interface)
+ globals()[action + '_lease'](mac, ip, interface)
else:
- print init_leases(interface)
+ print init_leases(network_id)
if __name__ == "__main__":
main()
diff --git a/bin/nova-manage b/bin/nova-manage
index 077a89d6f..8e6419c0b 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -719,7 +719,7 @@ class NetworkCommands(object):
# sanitize other input using FLAGS if necessary
if not num_networks:
num_networks = FLAGS.num_networks
- if not network_size:
+ if not network_size and fixed_range_v4:
fixnet = netaddr.IPNetwork(fixed_range_v4)
each_subnet_size = fixnet.size / int(num_networks)
if each_subnet_size > FLAGS.network_size:
@@ -741,6 +741,9 @@ class NetworkCommands(object):
if not dns1 and FLAGS.flat_network_dns:
dns1 = FLAGS.flat_network_dns
+ if not network_size:
+ network_size = FLAGS.network_size
+
# create the network
net_manager = utils.import_object(FLAGS.network_manager)
net_manager.create_networks(context.get_admin_context(),
@@ -834,11 +837,13 @@ class VmCommands(object):
instance['availability_zone'],
instance['launch_index'])
- @args('--ec2_id', dest='ec2_id', metavar='<ec2 id>', help='EC2 ID')
- @args('--dest', dest='dest', metavar='<Destanation>',
- help='destanation node')
- def live_migration(self, ec2_id, dest):
- """Migrates a running instance to a new machine."""
+ def _migration(self, ec2_id, dest, block_migration=False):
+ """Migrates a running instance to a new machine.
+ :param ec2_id: instance id which comes from euca-describe-instance.
+ :param dest: destination host name.
+ :param block_migration: if True, do block_migration.
+
+ """
ctxt = context.get_admin_context()
instance_id = ec2utils.ec2_id_to_id(ec2_id)
@@ -859,11 +864,28 @@ class VmCommands(object):
{"method": "live_migration",
"args": {"instance_id": instance_id,
"dest": dest,
- "topic": FLAGS.compute_topic}})
+ "topic": FLAGS.compute_topic,
+ "block_migration": block_migration}})
print _('Migration of %s initiated.'
'Check its progress using euca-describe-instances.') % ec2_id
+ @args('--ec2_id', dest='ec2_id', metavar='<ec2 id>', help='EC2 ID')
+ @args('--dest', dest='dest', metavar='<Destanation>',
+ help='destanation node')
+ def live_migration(self, ec2_id, dest):
+ """Migrates a running instance to a new machine."""
+
+ self._migration(ec2_id, dest)
+
+ @args('--ec2_id', dest='ec2_id', metavar='<ec2 id>', help='EC2 ID')
+ @args('--dest', dest='dest', metavar='<Destanation>',
+ help='destanation node')
+ def block_migration(self, ec2_id, dest):
+ """Migrates a running instance to a new machine with storage data."""
+
+ self._migration(ec2_id, dest, True)
+
class ServiceCommands(object):
"""Enable and disable running services"""
@@ -882,6 +904,14 @@ class ServiceCommands(object):
services = [s for s in services if s['host'] == host]
if service:
services = [s for s in services if s['binary'] == service]
+ print_format = "%-16s %-36s %-16s %-10s %-5s %-10s"
+ print print_format % (
+ _('Binary'),
+ _('Host'),
+ _('Zone'),
+ _('Status'),
+ _('State'),
+ _('Updated_At'))
for svc in services:
delta = now - (svc['updated_at'] or svc['created_at'])
alive = (delta.seconds <= 15)
@@ -889,9 +919,9 @@ class ServiceCommands(object):
active = 'enabled'
if svc['disabled']:
active = 'disabled'
- print "%-10s %-10s %-8s %s %s" % (svc['host'], svc['binary'],
- active, art,
- svc['updated_at'])
+ print print_format % (svc['binary'], svc['host'],
+ svc['availability_zone'], active, art,
+ svc['updated_at'])
@args('--host', dest='host', metavar='<host>', help='Host')
@args('--service', dest='service', metavar='<service>',
@@ -937,9 +967,19 @@ class ServiceCommands(object):
mem_u = result['resource']['memory_mb_used']
hdd_u = result['resource']['local_gb_used']
+ cpu_sum = 0
+ mem_sum = 0
+ hdd_sum = 0
print 'HOST\t\t\tPROJECT\t\tcpu\tmem(mb)\tdisk(gb)'
print '%s(total)\t\t\t%s\t%s\t%s' % (host, cpu, mem, hdd)
- print '%s(used)\t\t\t%s\t%s\t%s' % (host, cpu_u, mem_u, hdd_u)
+ print '%s(used_now)\t\t\t%s\t%s\t%s' % (host, cpu_u, mem_u, hdd_u)
+ for p_id, val in result['usage'].items():
+ cpu_sum += val['vcpus']
+ mem_sum += val['memory_mb']
+ hdd_sum += val['local_gb']
+ print '%s(used_max)\t\t\t%s\t%s\t%s' % (host, cpu_sum,
+ mem_sum, hdd_sum)
+
for p_id, val in result['usage'].items():
print '%s\t\t%s\t\t%s\t%s\t%s' % (host,
p_id,
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index 2ae370f88..7b778ab0e 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -402,6 +402,10 @@ class Executor(wsgi.Application):
LOG.debug(_('InvalidParameterValue raised: %s'), unicode(ex),
context=context)
return self._error(req, context, type(ex).__name__, unicode(ex))
+ except exception.InvalidPortRange as ex:
+ LOG.debug(_('InvalidPortRange raised: %s'), unicode(ex),
+ context=context)
+ return self._error(req, context, type(ex).__name__, unicode(ex))
except Exception as ex:
extra = {'environment': req.environ}
LOG.exception(_('Unexpected error raised: %s'), unicode(ex),
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 87bba58c3..9aebf92e3 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -305,7 +305,7 @@ class CloudController(object):
'hostname': hostname,
'instance-action': 'none',
'instance-id': ec2_id,
- 'instance-type': instance_ref['instance_type'],
+ 'instance-type': instance_ref['instance_type']['name'],
'local-hostname': hostname,
'local-ipv4': address,
'placement': {'availability-zone': availability_zone},
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index dfdd62201..b2a675653 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -169,13 +169,20 @@ def get_id_from_href(href):
Returns: 123
"""
- if re.match(r'\d+$', str(href)):
+ LOG.debug(_("Attempting to treat %(href)s as an integer ID.") % locals())
+
+ try:
return int(href)
+ except ValueError:
+ pass
+
+ LOG.debug(_("Attempting to treat %(href)s as a URL.") % locals())
+
try:
return int(urlparse.urlsplit(href).path.split('/')[-1])
- except ValueError, e:
- LOG.debug(_("Error extracting id from href: %s") % href)
- raise ValueError(_('could not parse id from href'))
+ except ValueError as error:
+ LOG.debug(_("Failed to parse ID from %(href)s: %(error)s") % locals())
+ raise
def remove_version_from_href(href):
diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py
index 2aba1068a..44b35c385 100644
--- a/nova/api/openstack/contrib/floating_ips.py
+++ b/nova/api/openstack/contrib/floating_ips.py
@@ -43,8 +43,8 @@ def _translate_floating_ip_view(floating_ip):
def _translate_floating_ips_view(floating_ips):
- return {'floating_ips': [_translate_floating_ip_view(floating_ip)
- for floating_ip in floating_ips]}
+ return {'floating_ips': [_translate_floating_ip_view(ip)['floating_ip']
+ for ip in floating_ips]}
class FloatingIPController(object):
@@ -104,12 +104,9 @@ class FloatingIPController(object):
ip = self.network_api.get_floating_ip(context, id)
if 'fixed_ip' in ip:
- try:
- self.disassociate(req, id, '')
- except Exception as e:
- LOG.exception(_("Error disassociating fixed_ip %s"), e)
+ self.disassociate(req, id)
- self.network_api.release_floating_ip(context, address=ip)
+ self.network_api.release_floating_ip(context, address=ip['address'])
return {'released': {
"id": ip['id'],
@@ -134,7 +131,7 @@ class FloatingIPController(object):
"floating_ip": floating_ip,
"fixed_ip": fixed_ip}}
- def disassociate(self, req, id):
+ def disassociate(self, req, id, body=None):
""" POST /floating_ips/{id}/disassociate """
context = req.environ['nova.context']
floating_ip = self.network_api.get_floating_ip(context, id)
diff --git a/nova/api/openstack/contrib/quotas.py b/nova/api/openstack/contrib/quotas.py
new file mode 100644
index 000000000..459b71dfd
--- /dev/null
+++ b/nova/api/openstack/contrib/quotas.py
@@ -0,0 +1,100 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import webob
+
+from nova import db
+from nova import exception
+from nova import quota
+from nova.api.openstack import extensions
+
+
+class QuotaSetsController(object):
+
+ def _format_quota_set(self, project_id, quota_set):
+ """Convert the quota object to a result dict"""
+
+ return {'quota_set': {
+ 'id': str(project_id),
+ 'metadata_items': quota_set['metadata_items'],
+ 'injected_file_content_bytes':
+ quota_set['injected_file_content_bytes'],
+ 'volumes': quota_set['volumes'],
+ 'gigabytes': quota_set['gigabytes'],
+ 'ram': quota_set['ram'],
+ 'floating_ips': quota_set['floating_ips'],
+ 'instances': quota_set['instances'],
+ 'injected_files': quota_set['injected_files'],
+ 'cores': quota_set['cores'],
+ }}
+
+ def show(self, req, id):
+ context = req.environ['nova.context']
+ try:
+ db.sqlalchemy.api.authorize_project_context(context, id)
+ return self._format_quota_set(id,
+ quota.get_project_quotas(context, id))
+ except exception.NotAuthorized:
+ return webob.Response(status_int=403)
+
+ def update(self, req, id, body):
+ context = req.environ['nova.context']
+ project_id = id
+ resources = ['metadata_items', 'injected_file_content_bytes',
+ 'volumes', 'gigabytes', 'ram', 'floating_ips', 'instances',
+ 'injected_files', 'cores']
+ for key in body['quota_set'].keys():
+ if key in resources:
+ value = int(body['quota_set'][key])
+ try:
+ db.quota_update(context, project_id, key, value)
+ except exception.ProjectQuotaNotFound:
+ db.quota_create(context, project_id, key, value)
+ except exception.AdminRequired:
+ return webob.Response(status_int=403)
+ return {'quota_set': quota.get_project_quotas(context, project_id)}
+
+ def defaults(self, req, id):
+ return self._format_quota_set(id, quota._get_default_quotas())
+
+
+class Quotas(extensions.ExtensionDescriptor):
+
+ def get_name(self):
+ return "Quotas"
+
+ def get_alias(self):
+ return "os-quota-sets"
+
+ def get_description(self):
+ return "Quotas management support"
+
+ def get_namespace(self):
+ return "http://docs.openstack.org/ext/quotas-sets/api/v1.1"
+
+ def get_updated(self):
+ return "2011-08-08T00:00:00+00:00"
+
+ def get_resources(self):
+ resources = []
+
+ res = extensions.ResourceExtension('os-quota-sets',
+ QuotaSetsController(),
+ member_actions={'defaults': 'GET'})
+ resources.append(res)
+
+ return resources
diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py
index 1425521a9..4e1da549e 100644
--- a/nova/api/openstack/create_instance_helper.py
+++ b/nova/api/openstack/create_instance_helper.py
@@ -122,6 +122,7 @@ class CreateInstanceHelper(object):
raise exc.HTTPBadRequest(explanation=msg)
zone_blob = server_dict.get('blob')
+ availability_zone = server_dict.get('availability_zone')
name = server_dict['name']
self._validate_server_name(name)
name = name.strip()
@@ -161,7 +162,8 @@ class CreateInstanceHelper(object):
zone_blob=zone_blob,
reservation_id=reservation_id,
min_count=min_count,
- max_count=max_count))
+ max_count=max_count,
+ availability_zone=availability_zone))
except quota.QuotaError as error:
self._handle_quota_error(error)
except exception.ImageNotFound as error:
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 736fdf6ce..335ecad86 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -163,7 +163,7 @@ class Controller(object):
@scheduler_api.redirect_handler
def update(self, req, id, body):
- """ Updates the server name or password """
+ """Update server name then pass on to version-specific controller"""
if len(req.body) == 0:
raise exc.HTTPUnprocessableEntity()
@@ -178,17 +178,15 @@ class Controller(object):
self.helper._validate_server_name(name)
update_dict['display_name'] = name.strip()
- self._parse_update(ctxt, id, body, update_dict)
-
try:
self.compute_api.update(ctxt, id, **update_dict)
except exception.NotFound:
raise exc.HTTPNotFound()
- return exc.HTTPNoContent()
+ return self._update(ctxt, req, id, body)
- def _parse_update(self, context, id, inst_dict, update_dict):
- pass
+ def _update(self, context, req, id, inst_dict):
+ return exc.HTTPNotImplemented()
@scheduler_api.redirect_handler
def action(self, req, id, body):
@@ -210,11 +208,15 @@ class Controller(object):
}
self.actions.update(admin_actions)
- for key in self.actions.keys():
- if key in body:
+ for key in body:
+ if key in self.actions:
return self.actions[key](body, req, id)
+ else:
+ msg = _("There is no such server action: %s") % (key,)
+ raise exc.HTTPBadRequest(explanation=msg)
- raise exc.HTTPNotImplemented()
+ msg = _("Invalid request body")
+ raise exc.HTTPBadRequest(explanation=msg)
def _action_create_backup(self, input_dict, req, instance_id):
"""Backup a server instance.
@@ -568,10 +570,11 @@ class ControllerV10(Controller):
def _limit_items(self, items, req):
return common.limited(items, req)
- def _parse_update(self, context, server_id, inst_dict, update_dict):
+ def _update(self, context, req, id, inst_dict):
if 'adminPass' in inst_dict['server']:
- self.compute_api.set_admin_password(context, server_id,
+ self.compute_api.set_admin_password(context, id,
inst_dict['server']['adminPass'])
+ return exc.HTTPNoContent()
def _action_resize(self, input_dict, req, id):
""" Resizes a given instance to the flavor size requested """
@@ -693,6 +696,10 @@ class ControllerV11(Controller):
LOG.info(msg)
raise exc.HTTPBadRequest(explanation=msg)
+ def _update(self, context, req, id, inst_dict):
+ instance = self.compute_api.routing_get(context, id)
+ return self._build_view(req, instance, is_detail=True)
+
def _action_resize(self, input_dict, req, id):
""" Resizes a given instance to the flavor size requested """
try:
diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py
index 8222f6766..edc328129 100644
--- a/nova/api/openstack/views/servers.py
+++ b/nova/api/openstack/views/servers.py
@@ -111,14 +111,14 @@ class ViewBuilderV10(ViewBuilder):
response['uuid'] = inst['uuid']
def _build_image(self, response, inst):
- if 'image_ref' in dict(inst):
+ if inst.get('image_ref', None):
image_ref = inst['image_ref']
if str(image_ref).startswith('http'):
raise exception.ListingImageRefsNotSupported()
response['imageId'] = int(image_ref)
def _build_flavor(self, response, inst):
- if 'instance_type' in dict(inst):
+ if inst.get('instance_type', None):
response['flavorId'] = inst['instance_type']['flavorid']
def _build_addresses(self, response, inst):
@@ -146,7 +146,7 @@ class ViewBuilderV11(ViewBuilder):
return response
def _build_image(self, response, inst):
- if 'image_ref' in dict(inst):
+ if inst.get("image_ref", None):
image_href = inst['image_ref']
image_id = str(common.get_id_from_href(image_href))
_bookmark = self.image_builder.generate_bookmark(image_id)
@@ -161,7 +161,7 @@ class ViewBuilderV11(ViewBuilder):
}
def _build_flavor(self, response, inst):
- if "instance_type" in dict(inst):
+ if inst.get("instance_type", None):
flavor_id = inst["instance_type"]['flavorid']
flavor_ref = self.flavor_builder.generate_href(flavor_id)
flavor_bookmark = self.flavor_builder.generate_bookmark(flavor_id)
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 91a0c93b2..e909e9959 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -393,10 +393,6 @@ class API(base.Base):
updates['hostname'] = self.hostname_factory(instance)
instance = self.update(context, instance_id, **updates)
-
- for group_id in security_groups:
- self.trigger_security_group_members_refresh(elevated, group_id)
-
return instance
def _ask_scheduler_to_create_instance(self, context, base_options,
@@ -565,18 +561,20 @@ class API(base.Base):
{"method": "refresh_security_group_rules",
"args": {"security_group_id": security_group.id}})
- def trigger_security_group_members_refresh(self, context, group_id):
+ def trigger_security_group_members_refresh(self, context, group_ids):
"""Called when a security group gains a new or loses a member.
Sends an update request to each compute node for whom this is
relevant.
"""
- # First, we get the security group rules that reference this group as
+ # First, we get the security group rules that reference these groups as
# the grantee..
- security_group_rules = \
+ security_group_rules = set()
+ for group_id in group_ids:
+ security_group_rules.update(
self.db.security_group_rule_get_by_security_group_grantee(
context,
- group_id)
+ group_id))
# ..then we distill the security groups to which they belong..
security_groups = set()
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index d38213083..66458fb36 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -170,7 +170,9 @@ class ComputeManager(manager.SchedulerDependentManager):
elif drv_state == power_state.RUNNING:
# Hyper-V and VMWareAPI drivers will raise and exception
try:
- self.driver.ensure_filtering_rules_for_instance(instance)
+ net_info = self._get_instance_nw_info(context, instance)
+ self.driver.ensure_filtering_rules_for_instance(instance,
+ net_info)
except NotImplementedError:
LOG.warning(_('Hypervisor driver does not '
'support firewall rules'))
@@ -321,10 +323,70 @@ class ComputeManager(manager.SchedulerDependentManager):
def _run_instance(self, context, instance_id, **kwargs):
"""Launch a new instance with specified options."""
+ def _check_image_size():
+ """Ensure image is smaller than the maximum size allowed by the
+ instance_type.
+
+ The image stored in Glance is potentially compressed, so we use two
+ checks to ensure that the size isn't exceeded:
+
+ 1) This one - checks compressed size, this a quick check to
+ eliminate any images which are obviously too large
+
+ 2) Check uncompressed size in nova.virt.xenapi.vm_utils. This
+ is a slower check since it requires uncompressing the entire
+ image, but is accurate because it reflects the image's
+ actual size.
+ """
+ # NOTE(jk0): image_ref is defined in the DB model, image_href is
+ # used by the image service. This should be refactored to be
+ # consistent.
+ image_href = instance['image_ref']
+ image_service, image_id = nova.image.get_image_service(image_href)
+ image_meta = image_service.show(context, image_id)
+
+ try:
+ size_bytes = image_meta['size']
+ except KeyError:
+ # Size is not a required field in the image service (yet), so
+ # we are unable to rely on it being there even though it's in
+ # glance.
+
+ # TODO(jk0): Should size be required in the image service?
+ return
+
+ instance_type_id = instance['instance_type_id']
+ instance_type = self.db.instance_type_get(context,
+ instance_type_id)
+ allowed_size_gb = instance_type['local_gb']
+
+ # NOTE(jk0): Since libvirt uses local_gb as a secondary drive, we
+ # need to handle potential situations where local_gb is 0. This is
+ # the default for m1.tiny.
+ if allowed_size_gb == 0:
+ return
+
+ allowed_size_bytes = allowed_size_gb * 1024 * 1024 * 1024
+
+ LOG.debug(_("image_id=%(image_id)d, image_size_bytes="
+ "%(size_bytes)d, allowed_size_bytes="
+ "%(allowed_size_bytes)d") % locals())
+
+ if size_bytes > allowed_size_bytes:
+ LOG.info(_("Image '%(image_id)d' size %(size_bytes)d exceeded"
+ " instance_type allowed size "
+ "%(allowed_size_bytes)d")
+ % locals())
+ raise exception.ImageTooLarge()
+
context = context.elevated()
instance = self.db.instance_get(context, instance_id)
+
if instance['name'] in self.driver.list_instances():
raise exception.Error(_("Instance has already been created"))
+
+ _check_image_size()
+
LOG.audit(_("instance %s: starting..."), instance_id,
context=context)
updates = {}
@@ -1224,6 +1286,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def check_shared_storage_test_file(self, context, filename):
"""Confirms existence of the tmpfile under FLAGS.instances_path.
+ Cannot confirm tmpfile return False.
:param context: security context
:param filename: confirm existence of FLAGS.instances_path/thisfile
@@ -1231,7 +1294,9 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
tmp_file = os.path.join(FLAGS.instances_path, filename)
if not os.path.exists(tmp_file):
- raise exception.FileNotFound(file_path=tmp_file)
+ return False
+ else:
+ return True
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def cleanup_shared_storage_test_file(self, context, filename):
@@ -1254,11 +1319,13 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
return self.driver.update_available_resource(context, self.host)
- def pre_live_migration(self, context, instance_id, time=None):
+ def pre_live_migration(self, context, instance_id, time=None,
+ block_migration=False, disk=None):
"""Preparations for live migration at dest host.
:param context: security context
:param instance_id: nova.db.sqlalchemy.models.Instance.Id
+ :param block_migration: if true, prepare for block migration
"""
if not time:
@@ -1308,19 +1375,27 @@ class ComputeManager(manager.SchedulerDependentManager):
# This nwfilter is necessary on the destination host.
# In addition, this method is creating filtering rule
# onto destination host.
- self.driver.ensure_filtering_rules_for_instance(instance_ref)
+ self.driver.ensure_filtering_rules_for_instance(instance_ref,
+ network_info)
- def live_migration(self, context, instance_id, dest):
+ # Preparation for block migration
+ if block_migration:
+ self.driver.pre_block_migration(context,
+ instance_ref,
+ disk)
+
+ def live_migration(self, context, instance_id,
+ dest, block_migration=False):
"""Executing live migration.
:param context: security context
:param instance_id: nova.db.sqlalchemy.models.Instance.Id
:param dest: destination host
+ :param block_migration: if true, do block migration
"""
# Get instance for error handling.
instance_ref = self.db.instance_get(context, instance_id)
- i_name = instance_ref.name
try:
# Checking volume node is working correctly when any volumes
@@ -1331,16 +1406,25 @@ class ComputeManager(manager.SchedulerDependentManager):
{"method": "check_for_export",
"args": {'instance_id': instance_id}})
- # Asking dest host to preparing live migration.
+ if block_migration:
+ disk = self.driver.get_instance_disk_info(context,
+ instance_ref)
+ else:
+ disk = None
+
rpc.call(context,
self.db.queue_get_for(context, FLAGS.compute_topic, dest),
{"method": "pre_live_migration",
- "args": {'instance_id': instance_id}})
+ "args": {'instance_id': instance_id,
+ 'block_migration': block_migration,
+ 'disk': disk}})
except Exception:
+ i_name = instance_ref.name
msg = _("Pre live migration for %(i_name)s failed at %(dest)s")
LOG.error(msg % locals())
- self.recover_live_migration(context, instance_ref)
+ self.rollback_live_migration(context, instance_ref,
+ dest, block_migration)
raise
# Executing live migration
@@ -1348,9 +1432,11 @@ class ComputeManager(manager.SchedulerDependentManager):
# nothing must be recovered in this version.
self.driver.live_migration(context, instance_ref, dest,
self.post_live_migration,
- self.recover_live_migration)
+ self.rollback_live_migration,
+ block_migration)
- def post_live_migration(self, ctxt, instance_ref, dest):
+ def post_live_migration(self, ctxt, instance_ref,
+ dest, block_migration=False):
"""Post operations for live migration.
This method is called from live_migration
@@ -1359,6 +1445,7 @@ class ComputeManager(manager.SchedulerDependentManager):
:param ctxt: security context
:param instance_id: nova.db.sqlalchemy.models.Instance.Id
:param dest: destination host
+ :param block_migration: if true, do block migration
"""
@@ -1401,8 +1488,29 @@ class ComputeManager(manager.SchedulerDependentManager):
"%(i_name)s cannot inherit floating "
"ip.\n%(e)s") % (locals()))
- # Restore instance/volume state
- self.recover_live_migration(ctxt, instance_ref, dest)
+ # Define domain at destination host, without doing it,
+ # pause/suspend/terminate do not work.
+ rpc.call(ctxt,
+ self.db.queue_get_for(ctxt, FLAGS.compute_topic, dest),
+ {"method": "post_live_migration_at_destination",
+ "args": {'instance_id': instance_ref.id,
+ 'block_migration': block_migration}})
+
+ # Restore instance state
+ self.db.instance_update(ctxt,
+ instance_ref['id'],
+ {'state_description': 'running',
+ 'state': power_state.RUNNING,
+ 'host': dest})
+ # Restore volume state
+ for volume_ref in instance_ref['volumes']:
+ volume_id = volume_ref['id']
+ self.db.volume_update(ctxt, volume_id, {'status': 'in-use'})
+
+ # No instance booting at source host, but instance dir
+ # must be deleted for preparing next block migration
+ if block_migration:
+ self.driver.destroy(instance_ref, network_info)
LOG.info(_('Migrating %(i_name)s to %(dest)s finished successfully.')
% locals())
@@ -1410,31 +1518,64 @@ class ComputeManager(manager.SchedulerDependentManager):
"Domain not found: no domain with matching name.\" "
"This error can be safely ignored."))
- def recover_live_migration(self, ctxt, instance_ref, host=None, dest=None):
- """Recovers Instance/volume state from migrating -> running.
+ def post_live_migration_at_destination(self, context,
+ instance_id, block_migration=False):
+ """Post operations for live migration .
- :param ctxt: security context
+ :param context: security context
:param instance_id: nova.db.sqlalchemy.models.Instance.Id
- :param host: DB column value is updated by this hostname.
- If none, the host instance currently running is selected.
+ :param block_migration: block_migration
"""
- if not host:
- host = instance_ref['host']
+ instance_ref = self.db.instance_get(context, instance_id)
+ LOG.info(_('Post operation of migraton started for %s .')
+ % instance_ref.name)
+ network_info = self._get_instance_nw_info(context, instance_ref)
+ self.driver.post_live_migration_at_destination(context,
+ instance_ref,
+ network_info,
+ block_migration)
- self.db.instance_update(ctxt,
+ def rollback_live_migration(self, context, instance_ref,
+ dest, block_migration):
+ """Recovers Instance/volume state from migrating -> running.
+
+ :param context: security context
+ :param instance_id: nova.db.sqlalchemy.models.Instance.Id
+ :param dest:
+ This method is called from live migration src host.
+ This param specifies destination host.
+ """
+ host = instance_ref['host']
+ self.db.instance_update(context,
instance_ref['id'],
{'state_description': 'running',
'state': power_state.RUNNING,
'host': host})
- if dest:
- volume_api = volume.API()
for volume_ref in instance_ref['volumes']:
volume_id = volume_ref['id']
- self.db.volume_update(ctxt, volume_id, {'status': 'in-use'})
- if dest:
- volume_api.remove_from_compute(ctxt, volume_id, dest)
+ self.db.volume_update(context, volume_id, {'status': 'in-use'})
+ volume.API().remove_from_compute(context, volume_id, dest)
+
+ # Block migration needs empty image at destination host
+ # before migration starts, so if any failure occurs,
+ # any empty images has to be deleted.
+ if block_migration:
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, dest),
+ {"method": "rollback_live_migration_at_destination",
+ "args": {'instance_id': instance_ref['id']}})
+
+ def rollback_live_migration_at_destination(self, context, instance_id):
+ """ Cleaning up image directory that is created pre_live_migration.
+
+ :param context: security context
+ :param instance_id: nova.db.sqlalchemy.models.Instance.Id
+ """
+ instances_ref = self.db.instance_get(context, instance_id)
+ network_info = self._get_instance_nw_info(context, instances_ref)
+ self.driver.destroy(instances_ref, network_info)
def periodic_tasks(self, context=None):
"""Tasks to be run at a periodic interval."""
diff --git a/nova/db/api.py b/nova/db/api.py
index 0f2218752..b9ea8757c 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -570,27 +570,6 @@ def instance_add_security_group(context, instance_id, security_group_id):
security_group_id)
-def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id):
- """Get instances.vcpus by host and project."""
- return IMPL.instance_get_vcpu_sum_by_host_and_project(context,
- hostname,
- proj_id)
-
-
-def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id):
- """Get amount of memory by host and project."""
- return IMPL.instance_get_memory_sum_by_host_and_project(context,
- hostname,
- proj_id)
-
-
-def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id):
- """Get total amount of disk by host and project."""
- return IMPL.instance_get_disk_sum_by_host_and_project(context,
- hostname,
- proj_id)
-
-
def instance_action_create(context, values):
"""Create an instance action from the values dictionary."""
return IMPL.instance_action_create(context, values)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 8119cdfb8..95ec3f715 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -1139,7 +1139,10 @@ def instance_get_all(context):
session = get_session()
return session.query(models.Instance).\
options(joinedload_all('fixed_ips.floating_ips')).\
- options(joinedload('virtual_interfaces')).\
+ options(joinedload_all('virtual_interfaces.network')).\
+ options(joinedload_all(
+ 'virtual_interfaces.fixed_ips.floating_ips')).\
+ options(joinedload('virtual_interfaces.instance')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ips.network')).\
options(joinedload('metadata')).\
@@ -1175,6 +1178,19 @@ def instance_get_all_by_filters(context, filters):
return True
return False
+ def _regexp_filter_by_metadata(instance, meta):
+ inst_metadata = [{node['key']: node['value']} \
+ for node in instance['metadata']]
+ if isinstance(meta, list):
+ for node in meta:
+ if node not in inst_metadata:
+ return False
+ elif isinstance(meta, dict):
+ for k, v in meta.iteritems():
+ if {k: v} not in inst_metadata:
+ return False
+ return True
+
def _regexp_filter_by_column(instance, filter_name, filter_re):
try:
v = getattr(instance, filter_name)
@@ -1202,6 +1218,7 @@ def instance_get_all_by_filters(context, filters):
options(joinedload_all('virtual_interfaces.network')).\
options(joinedload_all(
'virtual_interfaces.fixed_ips.floating_ips')).\
+ options(joinedload('virtual_interfaces.instance')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ips.network')).\
options(joinedload('metadata')).\
@@ -1232,7 +1249,9 @@ def instance_get_all_by_filters(context, filters):
query_prefix = _exact_match_filter(query_prefix, filter_name,
filters.pop(filter_name))
- instances = query_prefix.all()
+ instances = query_prefix.\
+ filter_by(deleted=can_read_deleted(context)).\
+ all()
if not instances:
return []
@@ -1248,6 +1267,9 @@ def instance_get_all_by_filters(context, filters):
filter_re = re.compile(str(filters[filter_name]))
if filter_func:
filter_l = lambda instance: filter_func(instance, filter_re)
+ elif filter_name == 'metadata':
+ filter_l = lambda instance: _regexp_filter_by_metadata(instance,
+ filters[filter_name])
else:
filter_l = lambda instance: _regexp_filter_by_column(instance,
filter_name, filter_re)
@@ -1479,45 +1501,6 @@ def instance_add_security_group(context, instance_id, security_group_id):
@require_context
-def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id):
- session = get_session()
- result = session.query(models.Instance).\
- filter_by(host=hostname).\
- filter_by(project_id=proj_id).\
- filter_by(deleted=False).\
- value(func.sum(models.Instance.vcpus))
- if not result:
- return 0
- return result
-
-
-@require_context
-def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id):
- session = get_session()
- result = session.query(models.Instance).\
- filter_by(host=hostname).\
- filter_by(project_id=proj_id).\
- filter_by(deleted=False).\
- value(func.sum(models.Instance.memory_mb))
- if not result:
- return 0
- return result
-
-
-@require_context
-def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id):
- session = get_session()
- result = session.query(models.Instance).\
- filter_by(host=hostname).\
- filter_by(project_id=proj_id).\
- filter_by(deleted=False).\
- value(func.sum(models.Instance.local_gb))
- if not result:
- return 0
- return result
-
-
-@require_context
def instance_action_create(context, values):
"""Create an instance action from the values dictionary."""
action_ref = models.InstanceActions()
@@ -1994,6 +1977,7 @@ def quota_get(context, project_id, resource, session=None):
@require_context
def quota_get_all_by_project(context, project_id):
+ authorize_project_context(context, project_id)
session = get_session()
result = {'project_id': project_id}
rows = session.query(models.Quota).\
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 939fde199..f2a4680b0 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -127,14 +127,14 @@ class ComputeNode(BASE, NovaBase):
'ComputeNode.service_id == Service.id,'
'ComputeNode.deleted == False)')
- vcpus = Column(Integer, nullable=True)
- memory_mb = Column(Integer, nullable=True)
- local_gb = Column(Integer, nullable=True)
- vcpus_used = Column(Integer, nullable=True)
- memory_mb_used = Column(Integer, nullable=True)
- local_gb_used = Column(Integer, nullable=True)
- hypervisor_type = Column(Text, nullable=True)
- hypervisor_version = Column(Integer, nullable=True)
+ vcpus = Column(Integer)
+ memory_mb = Column(Integer)
+ local_gb = Column(Integer)
+ vcpus_used = Column(Integer)
+ memory_mb_used = Column(Integer)
+ local_gb_used = Column(Integer)
+ hypervisor_type = Column(Text)
+ hypervisor_version = Column(Integer)
# Note(masumotok): Expected Strings example:
#
@@ -479,6 +479,11 @@ class SecurityGroupIngressRule(BASE, NovaBase):
# Note: This is not the parent SecurityGroup. It's SecurityGroup we're
# granting access for.
group_id = Column(Integer, ForeignKey('security_groups.id'))
+ grantee_group = relationship("SecurityGroup",
+ foreign_keys=group_id,
+ primaryjoin='and_('
+ 'SecurityGroupIngressRule.group_id == SecurityGroup.id,'
+ 'SecurityGroupIngressRule.deleted == False)')
class ProviderFirewallRule(BASE, NovaBase):
diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py
index 4a9a28f43..07f281938 100644
--- a/nova/db/sqlalchemy/session.py
+++ b/nova/db/sqlalchemy/session.py
@@ -19,37 +19,79 @@
Session Handling for SQLAlchemy backend
"""
-from sqlalchemy import create_engine
-from sqlalchemy import pool
-from sqlalchemy.orm import sessionmaker
+import eventlet.patcher
+eventlet.patcher.monkey_patch()
-from nova import exception
-from nova import flags
+import eventlet.db_pool
+import sqlalchemy.orm
+import sqlalchemy.pool
+
+import nova.exception
+import nova.flags
+import nova.log
+
+
+FLAGS = nova.flags.FLAGS
+LOG = nova.log.getLogger("nova.db.sqlalchemy")
+
+
+try:
+ import MySQLdb
+except ImportError:
+ MySQLdb = None
-FLAGS = flags.FLAGS
_ENGINE = None
_MAKER = None
def get_session(autocommit=True, expire_on_commit=False):
- """Helper method to grab session"""
- global _ENGINE
- global _MAKER
- if not _MAKER:
- if not _ENGINE:
- kwargs = {'pool_recycle': FLAGS.sql_idle_timeout,
- 'echo': False}
-
- if FLAGS.sql_connection.startswith('sqlite'):
- kwargs['poolclass'] = pool.NullPool
-
- _ENGINE = create_engine(FLAGS.sql_connection,
- **kwargs)
- _MAKER = (sessionmaker(bind=_ENGINE,
- autocommit=autocommit,
- expire_on_commit=expire_on_commit))
+ """Return a SQLAlchemy session."""
+ global _ENGINE, _MAKER
+
+ if _MAKER is None or _ENGINE is None:
+ _ENGINE = get_engine()
+ _MAKER = get_maker(_ENGINE, autocommit, expire_on_commit)
+
session = _MAKER()
- session.query = exception.wrap_db_error(session.query)
- session.flush = exception.wrap_db_error(session.flush)
+ session.query = nova.exception.wrap_db_error(session.query)
+ session.flush = nova.exception.wrap_db_error(session.flush)
return session
+
+
+def get_engine():
+ """Return a SQLAlchemy engine."""
+ connection_dict = sqlalchemy.engine.url.make_url(FLAGS.sql_connection)
+
+ engine_args = {
+ "pool_recycle": FLAGS.sql_idle_timeout,
+ "echo": False,
+ }
+
+ if "sqlite" in connection_dict.drivername:
+ engine_args["poolclass"] = sqlalchemy.pool.NullPool
+
+ elif MySQLdb and "mysql" in connection_dict.drivername:
+ LOG.info(_("Using mysql/eventlet db_pool."))
+ pool_args = {
+ "db": connection_dict.database,
+ "passwd": connection_dict.password,
+ "host": connection_dict.host,
+ "user": connection_dict.username,
+ "min_size": FLAGS.sql_min_pool_size,
+ "max_size": FLAGS.sql_max_pool_size,
+ "max_idle": FLAGS.sql_idle_timeout,
+ }
+ creator = eventlet.db_pool.ConnectionPool(MySQLdb, **pool_args)
+ engine_args["pool_size"] = FLAGS.sql_max_pool_size
+ engine_args["pool_timeout"] = FLAGS.sql_pool_timeout
+ engine_args["creator"] = creator.create
+
+ return sqlalchemy.create_engine(FLAGS.sql_connection, **engine_args)
+
+
+def get_maker(engine, autocommit=True, expire_on_commit=False):
+ """Return a SQLAlchemy sessionmaker using the given engine."""
+ return sqlalchemy.orm.sessionmaker(bind=engine,
+ autocommit=autocommit,
+ expire_on_commit=expire_on_commit)
diff --git a/nova/exception.py b/nova/exception.py
index a5a25086e..b09d50797 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -273,6 +273,11 @@ class DestinationHypervisorTooOld(Invalid):
"has been provided.")
+class DestinationDiskExists(Invalid):
+ message = _("The supplied disk path (%(path)s) already exists, "
+ "it is expected not to exist.")
+
+
class InvalidDevicePath(Invalid):
message = _("The supplied device path (%(path)s) is invalid.")
@@ -699,6 +704,10 @@ class InstanceExists(Duplicate):
message = _("Instance %(name)s already exists.")
+class InvalidSharedStorage(NovaException):
+ message = _("%(path)s is on shared storage: %(reason)s")
+
+
class MigrationError(NovaException):
message = _("Migration error") + ": %(reason)s"
@@ -721,3 +730,7 @@ class CannotResizeToSameSize(NovaException):
class CannotResizeToSmallerSize(NovaException):
message = _("Resizing to a smaller size is not supported.")
+
+
+class ImageTooLarge(NovaException):
+ message = _("Image is larger than instance type allows")
diff --git a/nova/flags.py b/nova/flags.py
index 7916501a4..48d5e8168 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -305,6 +305,7 @@ DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host')
DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval')
DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts')
DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to')
+DEFINE_boolean('rabbit_durable_queues', False, 'use durable queues')
DEFINE_list('enabled_apis', ['ec2', 'osapi'],
'list of APIs to enable by default')
DEFINE_string('ec2_host', '$my_ip', 'ip of api server')
@@ -345,6 +346,12 @@ DEFINE_string('logdir', None, 'output to a per-service log file in named '
'directory')
DEFINE_integer('logfile_mode', 0644, 'Default file mode of the logs.')
DEFINE_string('sqlite_db', 'nova.sqlite', 'file name for sqlite')
+DEFINE_integer('sql_pool_timeout', 30,
+ 'seconds to wait for connection from pool before erroring')
+DEFINE_integer('sql_min_pool_size', 10,
+ 'minimum number of SQL connections to pool')
+DEFINE_integer('sql_max_pool_size', 10,
+ 'maximum number of SQL connections to pool')
DEFINE_string('sql_connection',
'sqlite:///$state_path/$sqlite_db',
'connection string for sql database')
diff --git a/nova/image/s3.py b/nova/image/s3.py
index ccbfa89cd..abf01a942 100644
--- a/nova/image/s3.py
+++ b/nova/image/s3.py
@@ -193,6 +193,8 @@ class S3ImageService(service.BaseImageService):
def delayed_create():
"""This handles the fetching and decrypting of the part files."""
+ log_vars = {'image_location': image_location,
+ 'image_path': image_path}
metadata['properties']['image_state'] = 'downloading'
self.service.update(context, image_id, metadata)
@@ -213,11 +215,11 @@ class S3ImageService(service.BaseImageService):
shutil.copyfileobj(part, combined)
except Exception:
- LOG.error(_("Failed to download %(image_location)s "
- "to %(image_path)s"), locals())
+ LOG.exception(_("Failed to download %(image_location)s "
+ "to %(image_path)s"), log_vars)
metadata['properties']['image_state'] = 'failed_download'
self.service.update(context, image_id, metadata)
- raise
+ return
metadata['properties']['image_state'] = 'decrypting'
self.service.update(context, image_id, metadata)
@@ -237,11 +239,11 @@ class S3ImageService(service.BaseImageService):
encrypted_iv, cloud_pk,
dec_filename)
except Exception:
- LOG.error(_("Failed to decrypt %(image_location)s "
- "to %(image_path)s"), locals())
+ LOG.exception(_("Failed to decrypt %(image_location)s "
+ "to %(image_path)s"), log_vars)
metadata['properties']['image_state'] = 'failed_decrypt'
self.service.update(context, image_id, metadata)
- raise
+ return
metadata['properties']['image_state'] = 'untarring'
self.service.update(context, image_id, metadata)
@@ -249,11 +251,11 @@ class S3ImageService(service.BaseImageService):
try:
unz_filename = self._untarzip_image(image_path, dec_filename)
except Exception:
- LOG.error(_("Failed to untar %(image_location)s "
- "to %(image_path)s"), locals())
+ LOG.exception(_("Failed to untar %(image_location)s "
+ "to %(image_path)s"), log_vars)
metadata['properties']['image_state'] = 'failed_untar'
self.service.update(context, image_id, metadata)
- raise
+ return
metadata['properties']['image_state'] = 'uploading'
self.service.update(context, image_id, metadata)
@@ -262,11 +264,11 @@ class S3ImageService(service.BaseImageService):
self.service.update(context, image_id,
metadata, image_file)
except Exception:
- LOG.error(_("Failed to upload %(image_location)s "
- "to %(image_path)s"), locals())
+ LOG.exception(_("Failed to upload %(image_location)s "
+ "to %(image_path)s"), log_vars)
metadata['properties']['image_state'] = 'failed_upload'
self.service.update(context, image_id, metadata)
- raise
+ return
metadata['properties']['image_state'] = 'available'
metadata['status'] = 'active'
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index 4e1e1f85a..57c1d0c28 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -63,6 +63,11 @@ flags.DEFINE_string('dmz_cidr', '10.128.0.0/24',
'dmz range that should be accepted')
flags.DEFINE_string('dnsmasq_config_file', "",
'Override the default dnsmasq settings with this file')
+flags.DEFINE_string('linuxnet_interface_driver',
+ 'nova.network.linux_net.LinuxBridgeInterfaceDriver',
+ 'Driver used to create ethernet devices.')
+flags.DEFINE_string('linuxnet_ovs_integration_bridge',
+ 'br-int', 'Name of Open vSwitch bridge used with linuxnet')
binary_name = os.path.basename(inspect.stack()[-1][1])
@@ -414,7 +419,7 @@ def ensure_metadata_ip():
run_as_root=True, check_exit_code=False)
-def ensure_vlan_forward(public_ip, port, private_ip):
+def ensure_vpn_forward(public_ip, port, private_ip):
"""Sets up forwarding rules for vlan."""
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'-d %s -p udp '
@@ -452,111 +457,34 @@ def floating_forward_rules(floating_ip, fixed_ip):
'-s %s -j SNAT --to %s' % (fixed_ip, floating_ip))]
-def ensure_vlan_bridge(vlan_num, bridge, bridge_interface, net_attrs=None):
- """Create a vlan and bridge unless they already exist."""
- interface = ensure_vlan(vlan_num, bridge_interface)
- ensure_bridge(bridge, interface, net_attrs)
- return interface
+def initialize_gateway_device(dev, network_ref):
+ if not network_ref:
+ return
-
-@utils.synchronized('ensure_vlan', external=True)
-def ensure_vlan(vlan_num, bridge_interface):
- """Create a vlan unless it already exists."""
- interface = 'vlan%s' % vlan_num
- if not _device_exists(interface):
- LOG.debug(_('Starting VLAN inteface %s'), interface)
- _execute('vconfig', 'set_name_type', 'VLAN_PLUS_VID_NO_PAD',
- run_as_root=True)
- _execute('vconfig', 'add', bridge_interface, vlan_num,
- run_as_root=True)
- _execute('ip', 'link', 'set', interface, 'up', run_as_root=True)
- return interface
-
-
-@utils.synchronized('ensure_bridge', external=True)
-def ensure_bridge(bridge, interface, net_attrs=None):
- """Create a bridge unless it already exists.
-
- :param interface: the interface to create the bridge on.
- :param net_attrs: dictionary with attributes used to create the bridge.
-
- If net_attrs is set, it will add the net_attrs['gateway'] to the bridge
- using net_attrs['broadcast'] and net_attrs['cidr']. It will also add
- the ip_v6 address specified in net_attrs['cidr_v6'] if use_ipv6 is set.
-
- The code will attempt to move any ips that already exist on the interface
- onto the bridge and reset the default gateway if necessary.
-
- """
- if not _device_exists(bridge):
- LOG.debug(_('Starting Bridge interface for %s'), interface)
- _execute('brctl', 'addbr', bridge, run_as_root=True)
- _execute('brctl', 'setfd', bridge, 0, run_as_root=True)
- _execute('brctl', 'stp', bridge, 'off', run_as_root=True)
- _execute('ip', 'link', 'set', bridge, 'up', run_as_root=True)
- if net_attrs:
- # NOTE(vish): The ip for dnsmasq has to be the first address on the
- # bridge for it to respond to reqests properly
- suffix = net_attrs['cidr'].rpartition('/')[2]
- out, err = _execute('ip', 'addr', 'add',
+ # NOTE(vish): The ip for dnsmasq has to be the first address on the
+ # bridge for it to respond to reqests properly
+ suffix = network_ref['cidr'].rpartition('/')[2]
+ out, err = _execute('ip', 'addr', 'add',
'%s/%s' %
- (net_attrs['dhcp_server'], suffix),
+ (network_ref['dhcp_server'], suffix),
'brd',
- net_attrs['broadcast'],
+ network_ref['broadcast'],
'dev',
- bridge,
+ dev,
run_as_root=True,
check_exit_code=False)
- if err and err != 'RTNETLINK answers: File exists\n':
- raise exception.Error('Failed to add ip: %s' % err)
- if(FLAGS.use_ipv6):
- _execute('ip', '-f', 'inet6', 'addr',
- 'change', net_attrs['cidr_v6'],
- 'dev', bridge, run_as_root=True)
- # NOTE(vish): If the public interface is the same as the
- # bridge, then the bridge has to be in promiscuous
- # to forward packets properly.
- if(FLAGS.public_interface == bridge):
- _execute('ip', 'link', 'set',
- 'dev', bridge, 'promisc', 'on', run_as_root=True)
- if interface:
- # NOTE(vish): This will break if there is already an ip on the
- # interface, so we move any ips to the bridge
- gateway = None
- out, err = _execute('route', '-n', run_as_root=True)
- for line in out.split('\n'):
- fields = line.split()
- if fields and fields[0] == '0.0.0.0' and fields[-1] == interface:
- gateway = fields[1]
- _execute('route', 'del', 'default', 'gw', gateway,
- 'dev', interface,
- check_exit_code=False, run_as_root=True)
- out, err = _execute('ip', 'addr', 'show', 'dev', interface,
- 'scope', 'global', run_as_root=True)
- for line in out.split('\n'):
- fields = line.split()
- if fields and fields[0] == 'inet':
- params = fields[1:-1]
- _execute(*_ip_bridge_cmd('del', params, fields[-1]),
- run_as_root=True)
- _execute(*_ip_bridge_cmd('add', params, bridge),
- run_as_root=True)
- if gateway:
- _execute('route', 'add', 'default', 'gw', gateway,
- run_as_root=True)
- out, err = _execute('brctl', 'addif', bridge, interface,
- check_exit_code=False, run_as_root=True)
-
- if (err and err != "device %s is already a member of a bridge; can't "
- "enslave it to bridge %s.\n" % (interface, bridge)):
- raise exception.Error('Failed to add interface: %s' % err)
-
- iptables_manager.ipv4['filter'].add_rule('FORWARD',
- '--in-interface %s -j ACCEPT' % \
- bridge)
- iptables_manager.ipv4['filter'].add_rule('FORWARD',
- '--out-interface %s -j ACCEPT' % \
- bridge)
+ if err and err != 'RTNETLINK answers: File exists\n':
+ raise exception.Error('Failed to add ip: %s' % err)
+ if(FLAGS.use_ipv6):
+ _execute('ip', '-f', 'inet6', 'addr',
+ 'change', network_ref['cidr_v6'],
+ 'dev', dev, run_as_root=True)
+ # NOTE(vish): If the public interface is the same as the
+ # bridge, then the bridge has to be in promiscuous
+ # to forward packets properly.
+ if(FLAGS.public_interface == dev):
+ _execute('ip', 'link', 'set',
+ 'dev', dev, 'promisc', 'on', run_as_root=True)
def get_dhcp_leases(context, network_ref):
@@ -587,21 +515,21 @@ def get_dhcp_hosts(context, network_ref):
# configuration options (like dchp-range, vlan, ...)
# aren't reloaded.
@utils.synchronized('dnsmasq_start')
-def update_dhcp(context, network_ref):
+def update_dhcp(context, dev, network_ref):
"""(Re)starts a dnsmasq server for a given network.
If a dnsmasq instance is already running then send a HUP
signal causing it to reload, otherwise spawn a new instance.
"""
- conffile = _dhcp_file(network_ref['bridge'], 'conf')
+ conffile = _dhcp_file(dev, 'conf')
with open(conffile, 'w') as f:
f.write(get_dhcp_hosts(context, network_ref))
# Make sure dnsmasq can actually read it (it setuid()s to "nobody")
os.chmod(conffile, 0644)
- pid = _dnsmasq_pid_for(network_ref['bridge'])
+ pid = _dnsmasq_pid_for(dev)
# if dnsmasq is already running, then tell it to reload
if pid:
@@ -617,19 +545,19 @@ def update_dhcp(context, network_ref):
LOG.debug(_('Pid %d is stale, relaunching dnsmasq'), pid)
cmd = ['FLAGFILE=%s' % FLAGS.dhcpbridge_flagfile,
- 'DNSMASQ_INTERFACE=%s' % network_ref['bridge'],
+ 'NETWORK_ID=%s' % str(network_ref['id']),
'dnsmasq',
'--strict-order',
'--bind-interfaces',
- '--interface=%s' % network_ref['bridge'],
+ '--interface=%s' % dev,
'--conf-file=%s' % FLAGS.dnsmasq_config_file,
'--domain=%s' % FLAGS.dhcp_domain,
- '--pid-file=%s' % _dhcp_file(network_ref['bridge'], 'pid'),
+ '--pid-file=%s' % _dhcp_file(dev, 'pid'),
'--listen-address=%s' % network_ref['dhcp_server'],
'--except-interface=lo',
'--dhcp-range=%s,static,120s' % network_ref['dhcp_start'],
'--dhcp-lease-max=%s' % len(netaddr.IPNetwork(network_ref['cidr'])),
- '--dhcp-hostsfile=%s' % _dhcp_file(network_ref['bridge'], 'conf'),
+ '--dhcp-hostsfile=%s' % _dhcp_file(dev, 'conf'),
'--dhcp-script=%s' % FLAGS.dhcpbridge,
'--leasefile-ro']
if FLAGS.dns_server:
@@ -639,8 +567,8 @@ def update_dhcp(context, network_ref):
@utils.synchronized('radvd_start')
-def update_ra(context, network_ref):
- conffile = _ra_file(network_ref['bridge'], 'conf')
+def update_ra(context, dev, network_ref):
+ conffile = _ra_file(dev, 'conf')
with open(conffile, 'w') as f:
conf_str = """
interface %s
@@ -654,13 +582,13 @@ interface %s
AdvAutonomous on;
};
};
-""" % (network_ref['bridge'], network_ref['cidr_v6'])
+""" % (dev, network_ref['cidr_v6'])
f.write(conf_str)
# Make sure radvd can actually read it (it setuid()s to "nobody")
os.chmod(conffile, 0644)
- pid = _ra_pid_for(network_ref['bridge'])
+ pid = _ra_pid_for(dev)
# if radvd is already running, then tell it to reload
if pid:
@@ -675,8 +603,8 @@ interface %s
LOG.debug(_('Pid %d is stale, relaunching radvd'), pid)
cmd = ['radvd',
- '-C', '%s' % _ra_file(network_ref['bridge'], 'conf'),
- '-p', '%s' % _ra_file(network_ref['bridge'], 'pid')]
+ '-C', '%s' % _ra_file(dev, 'conf'),
+ '-p', '%s' % _ra_file(dev, 'pid')]
_execute(*cmd, run_as_root=True)
@@ -722,9 +650,9 @@ def _device_exists(device):
return not err
-def _stop_dnsmasq(network):
+def _stop_dnsmasq(dev):
"""Stops the dnsmasq instance for a given network."""
- pid = _dnsmasq_pid_for(network)
+ pid = _dnsmasq_pid_for(dev)
if pid:
try:
@@ -733,49 +661,49 @@ def _stop_dnsmasq(network):
LOG.debug(_('Killing dnsmasq threw %s'), exc)
-def _dhcp_file(bridge, kind):
- """Return path to a pid, leases or conf file for a bridge."""
+def _dhcp_file(dev, kind):
+ """Return path to a pid, leases or conf file for a bridge/device."""
if not os.path.exists(FLAGS.networks_path):
os.makedirs(FLAGS.networks_path)
return os.path.abspath('%s/nova-%s.%s' % (FLAGS.networks_path,
- bridge,
+ dev,
kind))
-def _ra_file(bridge, kind):
- """Return path to a pid or conf file for a bridge."""
+def _ra_file(dev, kind):
+ """Return path to a pid or conf file for a bridge/device."""
if not os.path.exists(FLAGS.networks_path):
os.makedirs(FLAGS.networks_path)
return os.path.abspath('%s/nova-ra-%s.%s' % (FLAGS.networks_path,
- bridge,
+ dev,
kind))
-def _dnsmasq_pid_for(bridge):
- """Returns the pid for prior dnsmasq instance for a bridge.
+def _dnsmasq_pid_for(dev):
+ """Returns the pid for prior dnsmasq instance for a bridge/device.
Returns None if no pid file exists.
If machine has rebooted pid might be incorrect (caller should check).
"""
- pid_file = _dhcp_file(bridge, 'pid')
+ pid_file = _dhcp_file(dev, 'pid')
if os.path.exists(pid_file):
with open(pid_file, 'r') as f:
return int(f.read())
-def _ra_pid_for(bridge):
- """Returns the pid for prior radvd instance for a bridge.
+def _ra_pid_for(dev):
+ """Returns the pid for prior radvd instance for a bridge/device.
Returns None if no pid file exists.
If machine has rebooted pid might be incorrect (caller should check).
"""
- pid_file = _ra_file(bridge, 'pid')
+ pid_file = _ra_file(dev, 'pid')
if os.path.exists(pid_file):
with open(pid_file, 'r') as f:
@@ -790,4 +718,180 @@ def _ip_bridge_cmd(action, params, device):
return cmd
+# Similar to compute virt layers, the Linux network node
+# code uses a flexible driver model to support different ways
+# of creating ethernet interfaces and attaching them to the network.
+# In the case of a network host, these interfaces
+# act as gateway/dhcp/vpn/etc. endpoints not VM interfaces.
+
+
+def plug(network, mac_address):
+ return interface_driver.plug(network, mac_address)
+
+
+def unplug(network):
+ return interface_driver.unplug(network)
+
+
+class LinuxNetInterfaceDriver(object):
+ """Abstract class that defines generic network host API"""
+ """ for for all Linux interface drivers."""
+
+ def plug(self, network, mac_address):
+ """Create Linux device, return device name"""
+ raise NotImplementedError()
+
+ def unplug(self, network):
+ """Destory Linux device, return device name"""
+ raise NotImplementedError()
+
+
+# plugs interfaces using Linux Bridge
+class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
+
+ def plug(self, network, mac_address):
+ if network.get('vlan', None) is not None:
+ LinuxBridgeInterfaceDriver.ensure_vlan_bridge(
+ network['vlan'],
+ network['bridge'],
+ network['bridge_interface'],
+ network,
+ mac_address)
+ else:
+ LinuxBridgeInterfaceDriver.ensure_bridge(
+ network['bridge'],
+ network['bridge_interface'],
+ network)
+
+ return network['bridge']
+
+ def unplug(self, network):
+ return network['bridge']
+
+ @classmethod
+ def ensure_vlan_bridge(_self, vlan_num, bridge, bridge_interface,
+ net_attrs=None, mac_address=None):
+ """Create a vlan and bridge unless they already exist."""
+ interface = LinuxBridgeInterfaceDriver.ensure_vlan(vlan_num,
+ bridge_interface, mac_address)
+ LinuxBridgeInterfaceDriver.ensure_bridge(bridge, interface, net_attrs)
+ return interface
+
+ @classmethod
+ @utils.synchronized('ensure_vlan', external=True)
+ def ensure_vlan(_self, vlan_num, bridge_interface, mac_address=None):
+ """Create a vlan unless it already exists."""
+ interface = 'vlan%s' % vlan_num
+ if not _device_exists(interface):
+ LOG.debug(_('Starting VLAN inteface %s'), interface)
+ _execute('vconfig', 'set_name_type',
+ 'VLAN_PLUS_VID_NO_PAD', run_as_root=True)
+ _execute('vconfig', 'add', bridge_interface,
+ vlan_num, run_as_root=True)
+ # (danwent) the bridge will inherit this address, so we want to
+ # make sure it is the value set from the NetworkManager
+ if mac_address:
+ _execute('ip', 'link', 'set', interface, "address",
+ mac_address, run_as_root=True)
+ _execute('ip', 'link', 'set', interface, 'up', run_as_root=True)
+ return interface
+
+ @classmethod
+ @utils.synchronized('ensure_bridge', external=True)
+ def ensure_bridge(_self, bridge, interface, net_attrs=None):
+ """Create a bridge unless it already exists.
+
+ :param interface: the interface to create the bridge on.
+ :param net_attrs: dictionary with attributes used to create bridge.
+
+ If net_attrs is set, it will add the net_attrs['gateway'] to the bridge
+ using net_attrs['broadcast'] and net_attrs['cidr']. It will also add
+ the ip_v6 address specified in net_attrs['cidr_v6'] if use_ipv6 is set.
+
+ The code will attempt to move any ips that already exist on the
+ interface onto the bridge and reset the default gateway if necessary.
+
+ """
+ if not _device_exists(bridge):
+ LOG.debug(_('Starting Bridge interface for %s'), interface)
+ _execute('brctl', 'addbr', bridge, run_as_root=True)
+ _execute('brctl', 'setfd', bridge, 0, run_as_root=True)
+ # _execute('brctl setageing %s 10' % bridge, run_as_root=True)
+ _execute('brctl', 'stp', bridge, 'off', run_as_root=True)
+ # (danwent) bridge device MAC address can't be set directly.
+ # instead it inherits the MAC address of the first device on the
+ # bridge, which will either be the vlan interface, or a
+ # physical NIC.
+ _execute('ip', 'link', 'set', bridge, 'up', run_as_root=True)
+
+ if interface:
+ out, err = _execute('brctl', 'addif', bridge, interface,
+ check_exit_code=False, run_as_root=True)
+
+ # NOTE(vish): This will break if there is already an ip on the
+ # interface, so we move any ips to the bridge
+ gateway = None
+ out, err = _execute('route', '-n', run_as_root=True)
+ for line in out.split('\n'):
+ fields = line.split()
+ if fields and fields[0] == '0.0.0.0' and \
+ fields[-1] == interface:
+ gateway = fields[1]
+ _execute('route', 'del', 'default', 'gw', gateway,
+ 'dev', interface, check_exit_code=False,
+ run_as_root=True)
+ out, err = _execute('ip', 'addr', 'show', 'dev', interface,
+ 'scope', 'global', run_as_root=True)
+ for line in out.split('\n'):
+ fields = line.split()
+ if fields and fields[0] == 'inet':
+ params = fields[1:-1]
+ _execute(*_ip_bridge_cmd('del', params, fields[-1]),
+ run_as_root=True)
+ _execute(*_ip_bridge_cmd('add', params, bridge),
+ run_as_root=True)
+ if gateway:
+ _execute('route', 'add', 'default', 'gw', gateway,
+ run_as_root=True)
+
+ if (err and err != "device %s is already a member of a bridge;"
+ "can't enslave it to bridge %s.\n" % (interface, bridge)):
+ raise exception.Error('Failed to add interface: %s' % err)
+
+ iptables_manager.ipv4['filter'].add_rule('FORWARD',
+ '--in-interface %s -j ACCEPT' % \
+ bridge)
+ iptables_manager.ipv4['filter'].add_rule('FORWARD',
+ '--out-interface %s -j ACCEPT' % \
+ bridge)
+
+
+# plugs interfaces using Open vSwitch
+class LinuxOVSInterfaceDriver(LinuxNetInterfaceDriver):
+
+ def plug(self, network, mac_address):
+ dev = "gw-" + str(network['id'])
+ if not _device_exists(dev):
+ bridge = FLAGS.linuxnet_ovs_integration_bridge
+ _execute('ovs-vsctl',
+ '--', '--may-exist', 'add-port', bridge, dev,
+ '--', 'set', 'Interface', dev, "type=internal",
+ '--', 'set', 'Interface', dev,
+ "external-ids:iface-id=nova-%s" % dev,
+ '--', 'set', 'Interface', dev,
+ "external-ids:iface-status=active",
+ '--', 'set', 'Interface', dev,
+ "external-ids:attached-mac=%s" % mac_address,
+ run_as_root=True)
+ _execute('ip', 'link', 'set', dev, "address", mac_address,
+ run_as_root=True)
+ _execute('ip', 'link', 'set', dev, 'up', run_as_root=True)
+
+ return dev
+
+ def unplug(self, network):
+ dev = "gw-" + str(network['id'])
+ return dev
+
iptables_manager = IptablesManager()
+interface_driver = utils.import_object(FLAGS.linuxnet_interface_driver)
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 8fc6a295f..08439b004 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -45,6 +45,7 @@ topologies. All of the network commands are issued to a subclass of
"""
import datetime
+import itertools
import math
import netaddr
import socket
@@ -61,6 +62,7 @@ from nova import quota
from nova import utils
from nova import rpc
from nova.network import api as network_api
+from nova.compute import api as compute_api
import random
@@ -313,6 +315,7 @@ class NetworkManager(manager.SchedulerDependentManager):
network_driver = FLAGS.network_driver
self.driver = utils.import_object(network_driver)
self.network_api = network_api.API()
+ self.compute_api = compute_api.API()
super(NetworkManager, self).__init__(service_name='network',
*args, **kwargs)
@@ -368,6 +371,15 @@ class NetworkManager(manager.SchedulerDependentManager):
self.host)
return host
+ def _do_trigger_security_group_members_refresh_for_instance(self,
+ instance_id):
+ admin_context = context.get_admin_context()
+ instance_ref = self.db.instance_get(admin_context, instance_id)
+ groups = instance_ref['security_groups']
+ group_ids = [group['id'] for group in groups]
+ self.compute_api.trigger_security_group_members_refresh(admin_context,
+ group_ids)
+
def _get_networks_for_instance(self, context, instance_id, project_id):
"""Determine & return which networks an instance should connect to."""
# TODO(tr3buchet) maybe this needs to be updated in the future if
@@ -526,7 +538,7 @@ class NetworkManager(manager.SchedulerDependentManager):
raise exception.VirtualInterfaceMacAddressException()
def generate_mac_address(self):
- """Generate a mac address for a vif on an instance."""
+ """Generate an Ethernet MAC address."""
mac = [0x02, 0x16, 0x3e,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
@@ -559,6 +571,8 @@ class NetworkManager(manager.SchedulerDependentManager):
address = self.db.fixed_ip_associate_pool(context.elevated(),
network['id'],
instance_id)
+ self._do_trigger_security_group_members_refresh_for_instance(
+ instance_id)
get_vif = self.db.virtual_interface_get_by_instance_and_network
vif = get_vif(context, instance_id, network['id'])
values = {'allocated': True,
@@ -573,6 +587,11 @@ class NetworkManager(manager.SchedulerDependentManager):
self.db.fixed_ip_update(context, address,
{'allocated': False,
'virtual_interface_id': None})
+ fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
+ instance_ref = fixed_ip_ref['instance']
+ instance_id = instance_ref['id']
+ self._do_trigger_security_group_members_refresh_for_instance(
+ instance_id)
def lease_fixed_ip(self, context, address):
"""Called by dhcp-bridge when ip is leased."""
@@ -618,61 +637,108 @@ class NetworkManager(manager.SchedulerDependentManager):
network_size, cidr_v6, gateway_v6, bridge,
bridge_interface, dns1=None, dns2=None, **kwargs):
"""Create networks based on parameters."""
+ # NOTE(jkoelker): these are dummy values to make sure iter works
+ fixed_net_v4 = netaddr.IPNetwork('0/32')
+ fixed_net_v6 = netaddr.IPNetwork('::0/128')
+ subnets_v4 = []
+ subnets_v6 = []
+
+ subnet_bits = int(math.ceil(math.log(network_size, 2)))
+
if cidr_v6:
fixed_net_v6 = netaddr.IPNetwork(cidr_v6)
- significant_bits_v6 = 64
- network_size_v6 = 1 << 64
+ prefixlen_v6 = 128 - subnet_bits
+ subnets_v6 = fixed_net_v6.subnet(prefixlen_v6, count=num_networks)
if cidr:
- fixed_net = netaddr.IPNetwork(cidr)
- significant_bits = 32 - int(math.log(network_size, 2))
-
- for index in range(num_networks):
+ fixed_net_v4 = netaddr.IPNetwork(cidr)
+ prefixlen_v4 = 32 - subnet_bits
+ subnets_v4 = list(fixed_net_v4.subnet(prefixlen_v4,
+ count=num_networks))
+
+ # NOTE(jkoelker): This replaces the _validate_cidrs call and
+ # prevents looping multiple times
+ try:
+ nets = self.db.network_get_all(context)
+ except exception.NoNetworksFound:
+ nets = []
+ used_subnets = [netaddr.IPNetwork(net['cidr']) for net in nets]
+
+ def find_next(subnet):
+ next_subnet = subnet.next()
+ while next_subnet in subnets_v4:
+ next_subnet = next_subnet.next()
+ if next_subnet in fixed_net_v4:
+ return next_subnet
+
+ for subnet in list(subnets_v4):
+ if subnet in used_subnets:
+ next_subnet = find_next(subnet)
+ if next_subnet:
+ subnets_v4.remove(subnet)
+ subnets_v4.append(next_subnet)
+ subnet = next_subnet
+ else:
+ raise ValueError(_('cidr already in use'))
+ for used_subnet in used_subnets:
+ if subnet in used_subnet:
+ msg = _('requested cidr (%(cidr)s) conflicts with '
+ 'existing supernet (%(super)s)')
+ raise ValueError(msg % {'cidr': subnet,
+ 'super': used_subnet})
+ if used_subnet in subnet:
+ next_subnet = find_next(subnet)
+ if next_subnet:
+ subnets_v4.remove(subnet)
+ subnets_v4.append(next_subnet)
+ subnet = next_subnet
+ else:
+ msg = _('requested cidr (%(cidr)s) conflicts '
+ 'with existing smaller cidr '
+ '(%(smaller)s)')
+ raise ValueError(msg % {'cidr': subnet,
+ 'smaller': used_subnet})
+
+ networks = []
+ subnets = itertools.izip_longest(subnets_v4, subnets_v6)
+ for index, (subnet_v4, subnet_v6) in enumerate(subnets):
net = {}
net['bridge'] = bridge
net['bridge_interface'] = bridge_interface
+ net['multi_host'] = multi_host
+
net['dns1'] = dns1
net['dns2'] = dns2
- if cidr:
- start = index * network_size
- project_net = netaddr.IPNetwork('%s/%s' % (fixed_net[start],
- significant_bits))
- net['cidr'] = str(project_net)
- net['multi_host'] = multi_host
- net['netmask'] = str(project_net.netmask)
- net['gateway'] = str(project_net[1])
- net['broadcast'] = str(project_net.broadcast)
- net['dhcp_start'] = str(project_net[2])
-
if num_networks > 1:
net['label'] = '%s_%d' % (label, index)
else:
net['label'] = label
- if cidr_v6:
- start_v6 = index * network_size_v6
- cidr_v6 = '%s/%s' % (fixed_net_v6[start_v6],
- significant_bits_v6)
- net['cidr_v6'] = cidr_v6
-
- project_net_v6 = netaddr.IPNetwork(cidr_v6)
+ if cidr and subnet_v4:
+ net['cidr'] = str(subnet_v4)
+ net['netmask'] = str(subnet_v4.netmask)
+ net['gateway'] = str(subnet_v4[1])
+ net['broadcast'] = str(subnet_v4.broadcast)
+ net['dhcp_start'] = str(subnet_v4[2])
+ if cidr_v6 and subnet_v6:
+ net['cidr_v6'] = str(subnet_v6)
if gateway_v6:
# use a pre-defined gateway if one is provided
net['gateway_v6'] = str(gateway_v6)
else:
- net['gateway_v6'] = str(project_net_v6[1])
+ net['gateway_v6'] = str(subnet_v6[1])
- net['netmask_v6'] = str(project_net_v6._prefixlen)
+ net['netmask_v6'] = str(subnet_v6._prefixlen)
if kwargs.get('vpn', False):
# this bit here is for vlan-manager
del net['dns1']
del net['dns2']
vlan = kwargs['vlan_start'] + index
- net['vpn_private_address'] = str(project_net[2])
- net['dhcp_start'] = str(project_net[3])
+ net['vpn_private_address'] = str(subnet_v4[2])
+ net['dhcp_start'] = str(subnet_v4[3])
net['vlan'] = vlan
net['bridge'] = 'br%s' % vlan
@@ -685,9 +751,12 @@ class NetworkManager(manager.SchedulerDependentManager):
if not network:
raise ValueError(_('Network already exists!'))
+ else:
+ networks.append(network)
- if network and cidr:
+ if network and cidr and subnet_v4:
self._create_fixed_ips(context, network['id'])
+ return networks
@property
def _bottom_reserved_ips(self): # pylint: disable=R0201
@@ -803,14 +872,16 @@ class FlatDHCPManager(FloatingIP, RPCAllocateFixedIP, NetworkManager):
def _setup_network(self, context, network_ref):
"""Sets up network on this host."""
network_ref['dhcp_server'] = self._get_dhcp_ip(context, network_ref)
- self.driver.ensure_bridge(network_ref['bridge'],
- network_ref['bridge_interface'],
- network_ref)
+
+ mac_address = self.generate_mac_address()
+ dev = self.driver.plug(network_ref, mac_address)
+ self.driver.initialize_gateway_device(dev, network_ref)
+
if not FLAGS.fake_network:
- self.driver.update_dhcp(context, network_ref)
+ self.driver.update_dhcp(context, dev, network_ref)
if(FLAGS.use_ipv6):
- self.driver.update_ra(context, network_ref)
- gateway = utils.get_my_linklocal(network_ref['bridge'])
+ self.driver.update_ra(context, dev, network_ref)
+ gateway = utils.get_my_linklocal(dev)
self.db.network_update(context, network_ref['id'],
{'gateway_v6': gateway})
@@ -857,7 +928,8 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
address = self.db.fixed_ip_associate_pool(context,
network['id'],
instance_id)
-
+ self._do_trigger_security_group_members_refresh_for_instance(
+ instance_id)
vif = self.db.virtual_interface_get_by_instance_and_network(context,
instance_id,
network['id'])
@@ -903,23 +975,23 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
else:
address = network_ref['vpn_public_address']
network_ref['dhcp_server'] = self._get_dhcp_ip(context, network_ref)
- self.driver.ensure_vlan_bridge(network_ref['vlan'],
- network_ref['bridge'],
- network_ref['bridge_interface'],
- network_ref)
+
+ mac_address = self.generate_mac_address()
+ dev = self.driver.plug(network_ref, mac_address)
+ self.driver.initialize_gateway_device(dev, network_ref)
# NOTE(vish): only ensure this forward if the address hasn't been set
# manually.
if address == FLAGS.vpn_ip and hasattr(self.driver,
- "ensure_vlan_forward"):
- self.driver.ensure_vlan_forward(FLAGS.vpn_ip,
+ "ensure_vpn_forward"):
+ self.driver.ensure_vpn_forward(FLAGS.vpn_ip,
network_ref['vpn_public_port'],
network_ref['vpn_private_address'])
if not FLAGS.fake_network:
- self.driver.update_dhcp(context, network_ref)
+ self.driver.update_dhcp(context, dev, network_ref)
if(FLAGS.use_ipv6):
- self.driver.update_ra(context, network_ref)
- gateway = utils.get_my_linklocal(network_ref['bridge'])
+ self.driver.update_ra(context, dev, network_ref)
+ gateway = utils.get_my_linklocal(dev)
self.db.network_update(context, network_ref['id'],
{'gateway_v6': gateway})
diff --git a/nova/rpc/amqp.py b/nova/rpc/amqp.py
index 61555795a..fe429b266 100644
--- a/nova/rpc/amqp.py
+++ b/nova/rpc/amqp.py
@@ -257,7 +257,7 @@ class TopicAdapterConsumer(AdapterConsumer):
self.queue = topic
self.routing_key = topic
self.exchange = FLAGS.control_exchange
- self.durable = False
+ self.durable = FLAGS.rabbit_durable_queues
super(TopicAdapterConsumer, self).__init__(connection=connection,
topic=topic, proxy=proxy)
@@ -345,7 +345,7 @@ class TopicPublisher(Publisher):
def __init__(self, connection=None, topic='broadcast'):
self.routing_key = topic
self.exchange = FLAGS.control_exchange
- self.durable = False
+ self.durable = FLAGS.rabbit_durable_queues
super(TopicPublisher, self).__init__(connection=connection)
@@ -373,6 +373,7 @@ class DirectConsumer(Consumer):
self.queue = msg_id
self.routing_key = msg_id
self.exchange = msg_id
+ self.durable = False
self.auto_delete = True
self.exclusive = True
super(DirectConsumer, self).__init__(connection=connection)
@@ -386,6 +387,7 @@ class DirectPublisher(Publisher):
def __init__(self, connection=None, msg_id=None):
self.routing_key = msg_id
self.exchange = msg_id
+ self.durable = False
self.auto_delete = True
super(DirectPublisher, self).__init__(connection=connection)
@@ -573,7 +575,7 @@ def send_message(topic, message, wait=True):
publisher = messaging.Publisher(connection=Connection.instance(),
exchange=FLAGS.control_exchange,
- durable=False,
+ durable=FLAGS.rabbit_durable_queues,
exchange_type='topic',
routing_key=topic)
publisher.send(message)
diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/abstract_scheduler.py
index d1924c9f9..eb924732a 100644
--- a/nova/scheduler/zone_aware_scheduler.py
+++ b/nova/scheduler/abstract_scheduler.py
@@ -14,7 +14,7 @@
# under the License.
"""
-The Zone Aware Scheduler is a base class Scheduler for creating instances
+The AbsractScheduler is a base class Scheduler for creating instances
across zones. There are two expansion points to this class for:
1. Assigning Weights to hosts for requested instances
2. Filtering Hosts based on required instance capabilities
@@ -40,7 +40,7 @@ from nova.scheduler import api
from nova.scheduler import driver
FLAGS = flags.FLAGS
-LOG = logging.getLogger('nova.scheduler.zone_aware_scheduler')
+LOG = logging.getLogger('nova.scheduler.abstract_scheduler')
class InvalidBlob(exception.NovaException):
@@ -48,8 +48,10 @@ class InvalidBlob(exception.NovaException):
"to instance create request.")
-class ZoneAwareScheduler(driver.Scheduler):
- """Base class for creating Zone Aware Schedulers."""
+class AbstractScheduler(driver.Scheduler):
+ """Base class for creating Schedulers that can work across any nova
+ deployment, from simple designs to multiply-nested zones.
+ """
def _call_zone_method(self, context, method, specs, zones):
"""Call novaclient zone method. Broken out for testing."""
@@ -266,7 +268,7 @@ class ZoneAwareScheduler(driver.Scheduler):
"""
if topic != "compute":
- raise NotImplementedError(_("Zone Aware Scheduler only understands"
+ raise NotImplementedError(_("Scheduler only understands"
" Compute nodes (for now)"))
num_instances = request_spec.get('num_instances', 1)
@@ -328,13 +330,31 @@ class ZoneAwareScheduler(driver.Scheduler):
requested_mem = instance_type['memory_mb'] * 1024 * 1024
return capabilities['host_memory_free'] >= requested_mem
+ def hold_filter_hosts(self, topic, request_spec, hosts=None):
+ """Filter the full host list (from the ZoneManager)"""
+ # NOTE(dabo): The logic used by the current _schedule() method
+ # is incorrect. Since this task is just to refactor the classes,
+ # I'm not fixing the logic now - that will be the next task.
+ # So for now this method is just renamed; afterwards this will
+ # become the filter_hosts() method, and the one below will
+ # be removed.
+ filter_name = request_spec.get('filter', None)
+ # Make sure that the requested filter is legitimate.
+ selected_filter = host_filter.choose_host_filter(filter_name)
+
+ # TODO(sandy): We're only using InstanceType-based specs
+ # currently. Later we'll need to snoop for more detailed
+ # host filter requests.
+ instance_type = request_spec['instance_type']
+ name, query = selected_filter.instance_type_to_filter(instance_type)
+ return selected_filter.filter_hosts(self.zone_manager, query)
+
def filter_hosts(self, topic, request_spec, host_list=None):
"""Return a list of hosts which are acceptable for scheduling.
Return value should be a list of (hostname, capability_dict)s.
Derived classes may override this, but may find the
'<topic>_filter' function more appropriate.
"""
-
def _default_filter(self, hostname, capabilities, request_spec):
"""Default filter function if there's no <topic>_filter"""
# NOTE(sirp): The default logic is the equivalent to
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index 1bfa7740a..f28353f05 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -30,6 +30,7 @@ from nova import log as logging
from nova import rpc
from nova import utils
from nova.compute import power_state
+from nova.api.ec2 import ec2utils
FLAGS = flags.FLAGS
@@ -78,7 +79,8 @@ class Scheduler(object):
"""Must override at least this method for scheduler to work."""
raise NotImplementedError(_("Must implement a fallback schedule"))
- def schedule_live_migration(self, context, instance_id, dest):
+ def schedule_live_migration(self, context, instance_id, dest,
+ block_migration=False):
"""Live migration scheduling method.
:param context:
@@ -87,9 +89,7 @@ class Scheduler(object):
:return:
The host where instance is running currently.
Then scheduler send request that host.
-
"""
-
# Whether instance exists and is running.
instance_ref = db.instance_get(context, instance_id)
@@ -97,10 +97,11 @@ class Scheduler(object):
self._live_migration_src_check(context, instance_ref)
# Checking destination host.
- self._live_migration_dest_check(context, instance_ref, dest)
-
+ self._live_migration_dest_check(context, instance_ref,
+ dest, block_migration)
# Common checking.
- self._live_migration_common_check(context, instance_ref, dest)
+ self._live_migration_common_check(context, instance_ref,
+ dest, block_migration)
# Changing instance_state.
db.instance_set_state(context,
@@ -130,7 +131,8 @@ class Scheduler(object):
# Checking instance is running.
if (power_state.RUNNING != instance_ref['state'] or \
'running' != instance_ref['state_description']):
- raise exception.InstanceNotRunning(instance_id=instance_ref['id'])
+ instance_id = ec2utils.id_to_ec2_id(instance_ref['id'])
+ raise exception.InstanceNotRunning(instance_id=instance_id)
# Checing volume node is running when any volumes are mounted
# to the instance.
@@ -147,7 +149,8 @@ class Scheduler(object):
if not self.service_is_up(services[0]):
raise exception.ComputeServiceUnavailable(host=src)
- def _live_migration_dest_check(self, context, instance_ref, dest):
+ def _live_migration_dest_check(self, context, instance_ref, dest,
+ block_migration):
"""Live migration check routine (for destination host).
:param context: security context
@@ -168,16 +171,18 @@ class Scheduler(object):
# and dest is not same.
src = instance_ref['host']
if dest == src:
- raise exception.UnableToMigrateToSelf(
- instance_id=instance_ref['id'],
- host=dest)
+ instance_id = ec2utils.id_to_ec2_id(instance_ref['id'])
+ raise exception.UnableToMigrateToSelf(instance_id=instance_id,
+ host=dest)
# Checking dst host still has enough capacities.
self.assert_compute_node_has_enough_resources(context,
instance_ref,
- dest)
+ dest,
+ block_migration)
- def _live_migration_common_check(self, context, instance_ref, dest):
+ def _live_migration_common_check(self, context, instance_ref, dest,
+ block_migration):
"""Live migration common check routine.
Below checkings are followed by
@@ -186,11 +191,26 @@ class Scheduler(object):
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest: destination host
+ :param block_migration if True, check for block_migration.
"""
# Checking shared storage connectivity
- self.mounted_on_same_shared_storage(context, instance_ref, dest)
+ # if block migration, instances_paths should not be on shared storage.
+ try:
+ self.mounted_on_same_shared_storage(context, instance_ref, dest)
+ if block_migration:
+ reason = _("Block migration can not be used "
+ "with shared storage.")
+ raise exception.InvalidSharedStorage(reason=reason, path=dest)
+ except exception.FileNotFound:
+ if not block_migration:
+ src = instance_ref['host']
+ ipath = FLAGS.instances_path
+ logging.error(_("Cannot confirm tmpfile at %(ipath)s is on "
+ "same shared storage between %(src)s "
+ "and %(dest)s.") % locals())
+ raise
# Checking dest exists.
dservice_refs = db.service_get_all_compute_by_host(context, dest)
@@ -229,14 +249,26 @@ class Scheduler(object):
"original host %(src)s.") % locals())
raise
- def assert_compute_node_has_enough_resources(self, context,
- instance_ref, dest):
+ def assert_compute_node_has_enough_resources(self, context, instance_ref,
+ dest, block_migration):
+
"""Checks if destination host has enough resource for live migration.
- Currently, only memory checking has been done.
- If storage migration(block migration, meaning live-migration
- without any shared storage) will be available, local storage
- checking is also necessary.
+ :param context: security context
+ :param instance_ref: nova.db.sqlalchemy.models.Instance object
+ :param dest: destination host
+ :param block_migration: if True, disk checking has been done
+
+ """
+ self.assert_compute_node_has_enough_memory(context, instance_ref, dest)
+ if not block_migration:
+ return
+ self.assert_compute_node_has_enough_disk(context, instance_ref, dest)
+
+ def assert_compute_node_has_enough_memory(self, context,
+ instance_ref, dest):
+ """Checks if destination host has enough memory for live migration.
+
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
@@ -244,23 +276,70 @@ class Scheduler(object):
"""
- # Getting instance information
- hostname = instance_ref['hostname']
+ # Getting total available memory and disk of host
+ avail = self._get_compute_info(context, dest, 'memory_mb')
- # Getting host information
- service_refs = db.service_get_all_compute_by_host(context, dest)
- compute_node_ref = service_refs[0]['compute_node'][0]
+ # Getting total used memory and disk of host
+ # It should be sum of memories that are assigned as max value,
+ # because overcommiting is risky.
+ used = 0
+ instance_refs = db.instance_get_all_by_host(context, dest)
+ used_list = [i['memory_mb'] for i in instance_refs]
+ if used_list:
+ used = reduce(lambda x, y: x + y, used_list)
- mem_total = int(compute_node_ref['memory_mb'])
- mem_used = int(compute_node_ref['memory_mb_used'])
- mem_avail = mem_total - mem_used
mem_inst = instance_ref['memory_mb']
- if mem_avail <= mem_inst:
- reason = _("Unable to migrate %(hostname)s to destination: "
- "%(dest)s (host:%(mem_avail)s <= instance:"
- "%(mem_inst)s)")
+ avail = avail - used
+ if avail <= mem_inst:
+ instance_id = ec2utils.id_to_ec2_id(instance_ref['id'])
+ reason = _("Unable to migrate %(instance_id)s to %(dest)s: "
+ "Lack of disk(host:%(avail)s <= instance:%(mem_inst)s)")
+ raise exception.MigrationError(reason=reason % locals())
+
+ def assert_compute_node_has_enough_disk(self, context,
+ instance_ref, dest):
+ """Checks if destination host has enough disk for block migration.
+
+ :param context: security context
+ :param instance_ref: nova.db.sqlalchemy.models.Instance object
+ :param dest: destination host
+
+ """
+
+ # Getting total available memory and disk of host
+ avail = self._get_compute_info(context, dest, 'local_gb')
+
+ # Getting total used memory and disk of host
+ # It should be sum of disks that are assigned as max value
+ # because overcommiting is risky.
+ used = 0
+ instance_refs = db.instance_get_all_by_host(context, dest)
+ used_list = [i['local_gb'] for i in instance_refs]
+ if used_list:
+ used = reduce(lambda x, y: x + y, used_list)
+
+ disk_inst = instance_ref['local_gb']
+ avail = avail - used
+ if avail <= disk_inst:
+ instance_id = ec2utils.id_to_ec2_id(instance_ref['id'])
+ reason = _("Unable to migrate %(instance_id)s to %(dest)s: "
+ "Lack of disk(host:%(avail)s "
+ "<= instance:%(disk_inst)s)")
raise exception.MigrationError(reason=reason % locals())
+ def _get_compute_info(self, context, host, key):
+ """get compute node's infomation specified by key
+
+ :param context: security context
+ :param host: hostname(must be compute node)
+ :param key: column name of compute_nodes
+ :return: value specified by key
+
+ """
+ compute_node_ref = db.service_get_all_compute_by_host(context, host)
+ compute_node_ref = compute_node_ref[0]['compute_node'][0]
+ return compute_node_ref[key]
+
def mounted_on_same_shared_storage(self, context, instance_ref, dest):
"""Check if the src and dest host mount same shared storage.
@@ -283,15 +362,13 @@ class Scheduler(object):
{"method": 'create_shared_storage_test_file'})
# make sure existence at src host.
- rpc.call(context, src_t,
- {"method": 'check_shared_storage_test_file',
- "args": {'filename': filename}})
+ ret = rpc.call(context, src_t,
+ {"method": 'check_shared_storage_test_file',
+ "args": {'filename': filename}})
+ if not ret:
+ raise exception.FileNotFound(file_path=filename)
- except rpc.RemoteError:
- ipath = FLAGS.instances_path
- logging.error(_("Cannot confirm tmpfile at %(ipath)s is on "
- "same shared storage between %(src)s "
- "and %(dest)s.") % locals())
+ except exception.FileNotFound:
raise
finally:
diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py
index b7bbbbcb8..45a8f40d8 100644
--- a/nova/scheduler/host_filter.py
+++ b/nova/scheduler/host_filter.py
@@ -14,7 +14,12 @@
# under the License.
"""
-Host Filter is a mechanism for requesting instance resources.
+The Host Filter classes are a way to ensure that only hosts that are
+appropriate are considered when creating a new instance. Hosts that are
+either incompatible or insufficient to accept a newly-requested instance
+are removed by Host Filter classes from consideration. Those that pass
+the filter are then passed on for weighting or other process for ordering.
+
Three filters are included: AllHosts, Flavor & JSON. AllHosts just
returns the full, unfiltered list of hosts. Flavor is a hard coded
matching mechanism based on flavor criteria and JSON is an ad-hoc
@@ -28,12 +33,6 @@ noted a need for a more expressive way of specifying instances.
Since we don't want to get into building full DSL this is a simple
form as an example of how this could be done. In reality, most
consumers will use the more rigid filters such as FlavorFilter.
-
-Note: These are "required" capability filters. These capabilities
-used must be present or the host will be excluded. The hosts
-returned are then weighed by the Weighted Scheduler. Weights
-can take the more esoteric factors into consideration (such as
-server affinity and customer separation).
"""
import json
@@ -41,9 +40,7 @@ import json
from nova import exception
from nova import flags
from nova import log as logging
-from nova.scheduler import zone_aware_scheduler
from nova import utils
-from nova.scheduler import zone_aware_scheduler
LOG = logging.getLogger('nova.scheduler.host_filter')
@@ -125,9 +122,8 @@ class InstanceTypeFilter(HostFilter):
spec_disk = instance_type['local_gb']
extra_specs = instance_type['extra_specs']
- if host_ram_mb >= spec_ram and \
- disk_bytes >= spec_disk and \
- self._satisfies_extra_specs(capabilities, instance_type):
+ if ((host_ram_mb >= spec_ram) and (disk_bytes >= spec_disk) and
+ self._satisfies_extra_specs(capabilities, instance_type)):
selected_hosts.append((host, capabilities))
return selected_hosts
@@ -309,7 +305,6 @@ def choose_host_filter(filter_name=None):
function checks the filter name against a predefined set
of acceptable filters.
"""
-
if not filter_name:
filter_name = FLAGS.default_host_filter
for filter_class in FILTERS:
@@ -317,33 +312,3 @@ def choose_host_filter(filter_name=None):
if host_match == filter_name:
return filter_class()
raise exception.SchedulerHostFilterNotFound(filter_name=filter_name)
-
-
-class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler):
- """The HostFilterScheduler uses the HostFilter to filter
- hosts for weighing. The particular filter used may be passed in
- as an argument or the default will be used.
-
- request_spec = {'filter': <Filter name>,
- 'instance_type': <InstanceType dict>}
- """
-
- def filter_hosts(self, topic, request_spec, hosts=None):
- """Filter the full host list (from the ZoneManager)"""
-
- filter_name = request_spec.get('filter', None)
- host_filter = choose_host_filter(filter_name)
-
- # TODO(sandy): We're only using InstanceType-based specs
- # currently. Later we'll need to snoop for more detailed
- # host filter requests.
- instance_type = request_spec['instance_type']
- name, query = host_filter.instance_type_to_filter(instance_type)
- return host_filter.filter_hosts(self.zone_manager, query)
-
- def weigh_hosts(self, topic, request_spec, hosts):
- """Derived classes must override this method and return
- a lists of hosts in [{weight, hostname}] format.
- """
- return [dict(weight=1, hostname=hostname, capabilities=caps)
- for hostname, caps in hosts]
diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py
index 329107efe..a58b11289 100644
--- a/nova/scheduler/least_cost.py
+++ b/nova/scheduler/least_cost.py
@@ -22,11 +22,14 @@ The cost-function and weights are tabulated, and the host with the least cost
is then selected for provisioning.
"""
+# TODO(dabo): This class will be removed in the next merge prop; it remains now
+# because much of the code will be refactored into different classes.
+
import collections
from nova import flags
from nova import log as logging
-from nova.scheduler import zone_aware_scheduler
+from nova.scheduler import abstract_scheduler
from nova import utils
from nova import exception
@@ -61,7 +64,7 @@ def compute_fill_first_cost_fn(host):
return free_mem
-class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler):
+class LeastCostScheduler(abstract_scheduler.AbstractScheduler):
def __init__(self, *args, **kwargs):
self.cost_fns_cache = {}
super(LeastCostScheduler, self).__init__(*args, **kwargs)
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index c8b16b622..0e395ee79 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -34,12 +34,13 @@ from nova.scheduler import zone_manager
LOG = logging.getLogger('nova.scheduler.manager')
FLAGS = flags.FLAGS
flags.DEFINE_string('scheduler_driver',
- 'nova.scheduler.chance.ChanceScheduler',
- 'Driver to use for the scheduler')
+ 'nova.scheduler.multi.MultiScheduler',
+ 'Default driver to use for the scheduler')
class SchedulerManager(manager.Manager):
"""Chooses a host to run instances on."""
+
def __init__(self, scheduler_driver=None, *args, **kwargs):
self.zone_manager = zone_manager.ZoneManager()
if not scheduler_driver:
@@ -71,8 +72,8 @@ class SchedulerManager(manager.Manager):
def update_service_capabilities(self, context=None, service_name=None,
host=None, capabilities=None):
"""Process a capability update from a service node."""
- if not capability:
- capability = {}
+ if not capabilities:
+ capabilities = {}
self.zone_manager.update_service_capabilities(service_name,
host, capabilities)
@@ -113,7 +114,7 @@ class SchedulerManager(manager.Manager):
# NOTE (masumotok) : This method should be moved to nova.api.ec2.admin.
# Based on bexar design summit discussion,
# just put this here for bexar release.
- def show_host_resources(self, context, host, *args):
+ def show_host_resources(self, context, host):
"""Shows the physical/usage resource given by hosts.
:param context: security context
@@ -121,43 +122,45 @@ class SchedulerManager(manager.Manager):
:returns:
example format is below.
{'resource':D, 'usage':{proj_id1:D, proj_id2:D}}
- D: {'vcpus':3, 'memory_mb':2048, 'local_gb':2048}
+ D: {'vcpus': 3, 'memory_mb': 2048, 'local_gb': 2048,
+ 'vcpus_used': 12, 'memory_mb_used': 10240,
+ 'local_gb_used': 64}
"""
+ # Getting compute node info and related instances info
compute_ref = db.service_get_all_compute_by_host(context, host)
compute_ref = compute_ref[0]
-
- # Getting physical resource information
- compute_node_ref = compute_ref['compute_node'][0]
- resource = {'vcpus': compute_node_ref['vcpus'],
- 'memory_mb': compute_node_ref['memory_mb'],
- 'local_gb': compute_node_ref['local_gb'],
- 'vcpus_used': compute_node_ref['vcpus_used'],
- 'memory_mb_used': compute_node_ref['memory_mb_used'],
- 'local_gb_used': compute_node_ref['local_gb_used']}
-
- # Getting usage resource information
- usage = {}
instance_refs = db.instance_get_all_by_host(context,
compute_ref['host'])
+
+ # Getting total available/used resource
+ compute_ref = compute_ref['compute_node'][0]
+ resource = {'vcpus': compute_ref['vcpus'],
+ 'memory_mb': compute_ref['memory_mb'],
+ 'local_gb': compute_ref['local_gb'],
+ 'vcpus_used': compute_ref['vcpus_used'],
+ 'memory_mb_used': compute_ref['memory_mb_used'],
+ 'local_gb_used': compute_ref['local_gb_used']}
+ usage = dict()
if not instance_refs:
return {'resource': resource, 'usage': usage}
+ # Getting usage resource per project
project_ids = [i['project_id'] for i in instance_refs]
project_ids = list(set(project_ids))
for project_id in project_ids:
- vcpus = db.instance_get_vcpu_sum_by_host_and_project(context,
- host,
- project_id)
- mem = db.instance_get_memory_sum_by_host_and_project(context,
- host,
- project_id)
- hdd = db.instance_get_disk_sum_by_host_and_project(context,
- host,
- project_id)
- usage[project_id] = {'vcpus': int(vcpus),
- 'memory_mb': int(mem),
- 'local_gb': int(hdd)}
+ vcpus = [i['vcpus'] for i in instance_refs \
+ if i['project_id'] == project_id]
+
+ mem = [i['memory_mb'] for i in instance_refs \
+ if i['project_id'] == project_id]
+
+ disk = [i['local_gb'] for i in instance_refs \
+ if i['project_id'] == project_id]
+
+ usage[project_id] = {'vcpus': reduce(lambda x, y: x + y, vcpus),
+ 'memory_mb': reduce(lambda x, y: x + y, mem),
+ 'local_gb': reduce(lambda x, y: x + y, disk)}
return {'resource': resource, 'usage': usage}
diff --git a/nova/scheduler/multi.py b/nova/scheduler/multi.py
new file mode 100644
index 000000000..b1578033c
--- /dev/null
+++ b/nova/scheduler/multi.py
@@ -0,0 +1,73 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Openstack, LLC.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Scheduler that allows routing some calls to one driver and others to another.
+"""
+
+from nova import flags
+from nova import utils
+from nova.scheduler import driver
+
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('compute_scheduler_driver',
+ 'nova.scheduler.chance.ChanceScheduler',
+ 'Driver to use for scheduling compute calls')
+flags.DEFINE_string('volume_scheduler_driver',
+ 'nova.scheduler.chance.ChanceScheduler',
+ 'Driver to use for scheduling volume calls')
+
+
+# A mapping of methods to topics so we can figure out which driver to use.
+_METHOD_MAP = {'run_instance': 'compute',
+ 'start_instance': 'compute',
+ 'create_volume': 'volume'}
+
+
+class MultiScheduler(driver.Scheduler):
+ """A scheduler that holds multiple sub-schedulers.
+
+ This exists to allow flag-driven composibility of schedulers, allowing
+ third parties to integrate custom schedulers more easily.
+
+ """
+
+ def __init__(self):
+ super(MultiScheduler, self).__init__()
+ compute_driver = utils.import_object(FLAGS.compute_scheduler_driver)
+ volume_driver = utils.import_object(FLAGS.volume_scheduler_driver)
+
+ self.drivers = {'compute': compute_driver,
+ 'volume': volume_driver}
+
+ def __getattr__(self, key):
+ if not key.startswith('schedule_'):
+ raise AttributeError(key)
+ method = key[len('schedule_'):]
+ if method not in _METHOD_MAP:
+ raise AttributeError(key)
+ return getattr(self.drivers[_METHOD_MAP[method]], key)
+
+ def set_zone_manager(self, zone_manager):
+ for k, v in self.drivers.iteritems():
+ v.set_zone_manager(zone_manager)
+
+ def schedule(self, context, topic, *_args, **_kwargs):
+ return self.drivers[topic].schedule(context, topic, *_args, **_kwargs)
diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py
index ab7ae2e54..704d06582 100644
--- a/nova/tests/api/openstack/contrib/test_floating_ips.py
+++ b/nova/tests/api/openstack/contrib/test_floating_ips.py
@@ -116,14 +116,14 @@ class FloatingIpTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
res_dict = json.loads(res.body)
- response = {'floating_ips': [{'floating_ip': {'instance_id': 11,
- 'ip': '10.10.10.10',
- 'fixed_ip': '10.0.0.1',
- 'id': 1}},
- {'floating_ip': {'instance_id': None,
- 'ip': '10.10.10.11',
- 'fixed_ip': None,
- 'id': 2}}]}
+ response = {'floating_ips': [{'instance_id': 11,
+ 'ip': '10.10.10.10',
+ 'fixed_ip': '10.0.0.1',
+ 'id': 1},
+ {'instance_id': None,
+ 'ip': '10.10.10.11',
+ 'fixed_ip': None,
+ 'id': 2}]}
self.assertEqual(res_dict, response)
def test_floating_ip_show(self):
@@ -177,8 +177,10 @@ class FloatingIpTest(test.TestCase):
self.assertEqual(actual, expected)
def test_floating_ip_disassociate(self):
+ body = dict()
req = webob.Request.blank('/v1.1/os-floating-ips/1/disassociate')
req.method = 'POST'
+ req.body = json.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
diff --git a/nova/tests/api/openstack/contrib/test_keypairs.py b/nova/tests/api/openstack/contrib/test_keypairs.py
index c9dc34d65..eb3bc7af0 100644
--- a/nova/tests/api/openstack/contrib/test_keypairs.py
+++ b/nova/tests/api/openstack/contrib/test_keypairs.py
@@ -28,6 +28,7 @@ def fake_keypair(name):
'fingerprint': 'FAKE_FINGERPRINT',
'name': name}
+
def db_key_pair_get_all_by_user(self, user_id):
return [fake_keypair('FAKE')]
@@ -77,8 +78,21 @@ class KeypairsTest(test.TestCase):
self.assertTrue(len(res_dict['keypair']['private_key']) > 0)
def test_keypair_import(self):
- body = {'keypair': {'name': 'create_test',
- 'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznAx9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6YQj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymiMZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7ljj5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGcj7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwBbHkXa6OciiJDvkRzJXzf'}}
+ body = {
+ 'keypair': {
+ 'name': 'create_test',
+ 'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
+ 'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
+ 'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
+ 'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
+ 'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
+ 'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
+ 'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
+ 'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
+ 'bHkXa6OciiJDvkRzJXzf',
+ },
+ }
+
req = webob.Request.blank('/v1.1/os-keypairs')
req.method = 'POST'
req.body = json.dumps(body)
@@ -96,4 +110,3 @@ class KeypairsTest(test.TestCase):
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
-
diff --git a/nova/tests/api/openstack/contrib/test_quotas.py b/nova/tests/api/openstack/contrib/test_quotas.py
new file mode 100644
index 000000000..f6a25385f
--- /dev/null
+++ b/nova/tests/api/openstack/contrib/test_quotas.py
@@ -0,0 +1,152 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import webob
+
+from nova import context
+from nova import test
+from nova.tests.api.openstack import fakes
+
+from nova.api.openstack.contrib.quotas import QuotaSetsController
+
+
+def quota_set(id):
+ return {'quota_set': {'id': id, 'metadata_items': 128, 'volumes': 10,
+ 'gigabytes': 1000, 'ram': 51200, 'floating_ips': 10,
+ 'instances': 10, 'injected_files': 5, 'cores': 20,
+ 'injected_file_content_bytes': 10240}}
+
+
+def quota_set_list():
+ return {'quota_set_list': [quota_set('1234'), quota_set('5678'),
+ quota_set('update_me')]}
+
+
+class QuotaSetsTest(test.TestCase):
+
+ def setUp(self):
+ super(QuotaSetsTest, self).setUp()
+ self.controller = QuotaSetsController()
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.user_context = context.RequestContext(self.user_id,
+ self.project_id)
+ self.admin_context = context.RequestContext(self.user_id,
+ self.project_id,
+ is_admin=True)
+
+ def test_format_quota_set(self):
+ raw_quota_set = {
+ 'instances': 10,
+ 'cores': 20,
+ 'ram': 51200,
+ 'volumes': 10,
+ 'floating_ips': 10,
+ 'metadata_items': 128,
+ 'gigabytes': 1000,
+ 'injected_files': 5,
+ 'injected_file_content_bytes': 10240}
+
+ quota_set = QuotaSetsController()._format_quota_set('1234',
+ raw_quota_set)
+ qs = quota_set['quota_set']
+
+ self.assertEqual(qs['id'], '1234')
+ self.assertEqual(qs['instances'], 10)
+ self.assertEqual(qs['cores'], 20)
+ self.assertEqual(qs['ram'], 51200)
+ self.assertEqual(qs['volumes'], 10)
+ self.assertEqual(qs['gigabytes'], 1000)
+ self.assertEqual(qs['floating_ips'], 10)
+ self.assertEqual(qs['metadata_items'], 128)
+ self.assertEqual(qs['injected_files'], 5)
+ self.assertEqual(qs['injected_file_content_bytes'], 10240)
+
+ def test_quotas_defaults(self):
+ req = webob.Request.blank('/v1.1/os-quota-sets/fake_tenant/defaults')
+ req.method = 'GET'
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(fakes.wsgi_app())
+
+ self.assertEqual(res.status_int, 200)
+ expected = {'quota_set': {
+ 'id': 'fake_tenant',
+ 'instances': 10,
+ 'cores': 20,
+ 'ram': 51200,
+ 'volumes': 10,
+ 'gigabytes': 1000,
+ 'floating_ips': 10,
+ 'metadata_items': 128,
+ 'injected_files': 5,
+ 'injected_file_content_bytes': 10240}}
+
+ self.assertEqual(json.loads(res.body), expected)
+
+ def test_quotas_show_as_admin(self):
+ req = webob.Request.blank('/v1.1/os-quota-sets/1234')
+ req.method = 'GET'
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(fakes.wsgi_app(
+ fake_auth_context=self.admin_context))
+
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(json.loads(res.body), quota_set('1234'))
+
+ def test_quotas_show_as_unauthorized_user(self):
+ req = webob.Request.blank('/v1.1/os-quota-sets/1234')
+ req.method = 'GET'
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(fakes.wsgi_app(
+ fake_auth_context=self.user_context))
+
+ self.assertEqual(res.status_int, 403)
+
+ def test_quotas_update_as_admin(self):
+ updated_quota_set = {'quota_set': {'instances': 50,
+ 'cores': 50, 'ram': 51200, 'volumes': 10,
+ 'gigabytes': 1000, 'floating_ips': 10,
+ 'metadata_items': 128, 'injected_files': 5,
+ 'injected_file_content_bytes': 10240}}
+
+ req = webob.Request.blank('/v1.1/os-quota-sets/update_me')
+ req.method = 'PUT'
+ req.body = json.dumps(updated_quota_set)
+ req.headers['Content-Type'] = 'application/json'
+
+ res = req.get_response(fakes.wsgi_app(
+ fake_auth_context=self.admin_context))
+
+ self.assertEqual(json.loads(res.body), updated_quota_set)
+
+ def test_quotas_update_as_user(self):
+ updated_quota_set = {'quota_set': {'instances': 50,
+ 'cores': 50, 'ram': 51200, 'volumes': 10,
+ 'gigabytes': 1000, 'floating_ips': 10,
+ 'metadata_items': 128, 'injected_files': 5,
+ 'injected_file_content_bytes': 10240}}
+
+ req = webob.Request.blank('/v1.1/os-quota-sets/update_me')
+ req.method = 'PUT'
+ req.body = json.dumps(updated_quota_set)
+ req.headers['Content-Type'] = 'application/json'
+
+ res = req.get_response(fakes.wsgi_app(
+ fake_auth_context=self.user_context))
+
+ self.assertEqual(res.status_int, 403)
diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py
index 5a6e43579..b422bc4d1 100644
--- a/nova/tests/api/openstack/test_common.py
+++ b/nova/tests/api/openstack/test_common.py
@@ -249,6 +249,10 @@ class MiscFunctionsTest(test.TestCase):
common.get_id_from_href,
fixture)
+ def test_get_id_from_href_int(self):
+ fixture = 1
+ self.assertEqual(fixture, common.get_id_from_href(fixture))
+
def test_get_version_from_href(self):
fixture = 'http://www.testsite.com/v1.1/images'
expected = '1.1'
diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py
index ea8fe68a7..5d3208e10 100644
--- a/nova/tests/api/openstack/test_extensions.py
+++ b/nova/tests/api/openstack/test_extensions.py
@@ -84,6 +84,18 @@ class ExtensionControllerTest(test.TestCase):
super(ExtensionControllerTest, self).setUp()
ext_path = os.path.join(os.path.dirname(__file__), "extensions")
self.flags(osapi_extensions_path=ext_path)
+ self.ext_list = [
+ "FlavorExtraSpecs",
+ "Floating_ips",
+ "Fox In Socks",
+ "Hosts",
+ "Keypairs",
+ "Multinic",
+ "Quotas",
+ "SecurityGroups",
+ "Volumes",
+ ]
+ self.ext_list.sort()
def test_list_extensions_json(self):
app = openstack.APIRouterV11()
@@ -96,12 +108,10 @@ class ExtensionControllerTest(test.TestCase):
data = json.loads(response.body)
names = [x['name'] for x in data['extensions']]
names.sort()
- self.assertEqual(names, ["FlavorExtraSpecs", "Floating_ips",
- "Fox In Socks", "Hosts", "Keypairs", "Multinic", "SecurityGroups",
- "Volumes"])
+ self.assertEqual(names, self.ext_list)
# Make sure that at least Fox in Sox is correct.
- (fox_ext,) = [
+ (fox_ext, ) = [
x for x in data['extensions'] if x['alias'] == 'FOXNSOX']
self.assertEqual(fox_ext, {
'namespace': 'http://www.fox.in.socks/api/ext/pie/v1.0',
@@ -127,9 +137,7 @@ class ExtensionControllerTest(test.TestCase):
"updated": "2011-01-22T13:25:27-06:00",
"description": "The Fox In Socks Extension",
"alias": "FOXNSOX",
- "links": [],
- },
- )
+ "links": []})
def test_list_extensions_xml(self):
app = openstack.APIRouterV11()
@@ -145,10 +153,10 @@ class ExtensionControllerTest(test.TestCase):
# Make sure we have all the extensions.
exts = root.findall('{0}extension'.format(NS))
- self.assertEqual(len(exts), 8)
+ self.assertEqual(len(exts), len(self.ext_list))
# Make sure that at least Fox in Sox is correct.
- (fox_ext,) = [x for x in exts if x.get('alias') == 'FOXNSOX']
+ (fox_ext, ) = [x for x in exts if x.get('alias') == 'FOXNSOX']
self.assertEqual(fox_ext.get('name'), 'Fox In Socks')
self.assertEqual(fox_ext.get('namespace'),
'http://www.fox.in.socks/api/ext/pie/v1.0')
@@ -220,6 +228,7 @@ class ResourceExtensionTest(test.TestCase):
class InvalidExtension(object):
+
def get_alias(self):
return "THIRD"
@@ -281,7 +290,7 @@ class ActionExtensionTest(test.TestCase):
def test_invalid_action_body(self):
body = dict(blah=dict(name="test")) # Doesn't exist
response = self._send_server_action_request("/servers/1/action", body)
- self.assertEqual(501, response.status_int)
+ self.assertEqual(400, response.status_int)
def test_invalid_action(self):
body = dict(blah=dict(name="test"))
@@ -336,27 +345,18 @@ class ExtensionsXMLSerializerTest(test.TestCase):
def test_serialize_extenstion(self):
serializer = extensions.ExtensionsXMLSerializer()
- data = {
- 'extension': {
- 'name': 'ext1',
- 'namespace': 'http://docs.rack.com/servers/api/ext/pie/v1.0',
- 'alias': 'RS-PIE',
- 'updated': '2011-01-22T13:25:27-06:00',
- 'description': 'Adds the capability to share an image.',
- 'links': [
- {
- 'rel': 'describedby',
- 'type': 'application/pdf',
- 'href': 'http://docs.rack.com/servers/api/ext/cs.pdf',
- },
- {
- 'rel': 'describedby',
- 'type': 'application/vnd.sun.wadl+xml',
- 'href': 'http://docs.rack.com/servers/api/ext/cs.wadl',
- },
- ],
- },
- }
+ data = {'extension': {
+ 'name': 'ext1',
+ 'namespace': 'http://docs.rack.com/servers/api/ext/pie/v1.0',
+ 'alias': 'RS-PIE',
+ 'updated': '2011-01-22T13:25:27-06:00',
+ 'description': 'Adds the capability to share an image.',
+ 'links': [{'rel': 'describedby',
+ 'type': 'application/pdf',
+ 'href': 'http://docs.rack.com/servers/api/ext/cs.pdf'},
+ {'rel': 'describedby',
+ 'type': 'application/vnd.sun.wadl+xml',
+ 'href': 'http://docs.rack.com/servers/api/ext/cs.wadl'}]}}
xml = serializer.serialize(data, 'show')
print xml
@@ -378,48 +378,30 @@ class ExtensionsXMLSerializerTest(test.TestCase):
def test_serialize_extensions(self):
serializer = extensions.ExtensionsXMLSerializer()
- data = {
- "extensions": [
- {
- "name": "Public Image Extension",
- "namespace": "http://foo.com/api/ext/pie/v1.0",
- "alias": "RS-PIE",
- "updated": "2011-01-22T13:25:27-06:00",
- "description": "Adds the capability to share an image.",
- "links": [
- {
- "rel": "describedby",
+ data = {"extensions": [{
+ "name": "Public Image Extension",
+ "namespace": "http://foo.com/api/ext/pie/v1.0",
+ "alias": "RS-PIE",
+ "updated": "2011-01-22T13:25:27-06:00",
+ "description": "Adds the capability to share an image.",
+ "links": [{"rel": "describedby",
"type": "application/pdf",
- "href": "http://foo.com/api/ext/cs-pie.pdf",
- },
- {
- "rel": "describedby",
"type": "application/vnd.sun.wadl+xml",
- "href": "http://foo.com/api/ext/cs-pie.wadl",
- },
- ],
- },
- {
- "name": "Cloud Block Storage",
- "namespace": "http://foo.com/api/ext/cbs/v1.0",
- "alias": "RS-CBS",
- "updated": "2011-01-12T11:22:33-06:00",
- "description": "Allows mounting cloud block storage.",
- "links": [
- {
- "rel": "describedby",
- "type": "application/pdf",
- "href": "http://foo.com/api/ext/cs-cbs.pdf",
- },
- {
- "rel": "describedby",
+ "href": "http://foo.com/api/ext/cs-pie.pdf"},
+ {"rel": "describedby",
"type": "application/vnd.sun.wadl+xml",
- "href": "http://foo.com/api/ext/cs-cbs.wadl",
- },
- ],
- },
- ],
- }
+ "href": "http://foo.com/api/ext/cs-pie.wadl"}]},
+ {"name": "Cloud Block Storage",
+ "namespace": "http://foo.com/api/ext/cbs/v1.0",
+ "alias": "RS-CBS",
+ "updated": "2011-01-12T11:22:33-06:00",
+ "description": "Allows mounting cloud block storage.",
+ "links": [{"rel": "describedby",
+ "type": "application/pdf",
+ "href": "http://foo.com/api/ext/cs-cbs.pdf"},
+ {"rel": "describedby",
+ "type": "application/vnd.sun.wadl+xml",
+ "href": "http://foo.com/api/ext/cs-cbs.wadl"}]}]}
xml = serializer.serialize(data, 'index')
print xml
diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py
index 1dc3c3a17..801b06230 100644
--- a/nova/tests/api/openstack/test_limits.py
+++ b/nova/tests/api/openstack/test_limits.py
@@ -915,86 +915,56 @@ class LimitsViewBuilderV11Test(test.TestCase):
def setUp(self):
self.view_builder = views.limits.ViewBuilderV11()
- self.rate_limits = [
- {
- "URI": "*",
- "regex": ".*",
- "value": 10,
- "verb": "POST",
- "remaining": 2,
- "unit": "MINUTE",
- "resetTime": 1311272226,
- },
- {
- "URI": "*/servers",
- "regex": "^/servers",
- "value": 50,
- "verb": "POST",
- "remaining": 10,
- "unit": "DAY",
- "resetTime": 1311272226,
- },
- ]
- self.absolute_limits = {
- "metadata_items": 1,
- "injected_files": 5,
- "injected_file_content_bytes": 5,
- }
+ self.rate_limits = [{"URI": "*",
+ "regex": ".*",
+ "value": 10,
+ "verb": "POST",
+ "remaining": 2,
+ "unit": "MINUTE",
+ "resetTime": 1311272226},
+ {"URI": "*/servers",
+ "regex": "^/servers",
+ "value": 50,
+ "verb": "POST",
+ "remaining": 10,
+ "unit": "DAY",
+ "resetTime": 1311272226}]
+ self.absolute_limits = {"metadata_items": 1,
+ "injected_files": 5,
+ "injected_file_content_bytes": 5}
def tearDown(self):
pass
def test_build_limits(self):
- expected_limits = {
- "limits": {
- "rate": [
- {
- "uri": "*",
- "regex": ".*",
- "limit": [
- {
- "value": 10,
- "verb": "POST",
- "remaining": 2,
- "unit": "MINUTE",
- "next-available": "2011-07-21T18:17:06Z",
- },
- ]
- },
- {
- "uri": "*/servers",
- "regex": "^/servers",
- "limit": [
- {
- "value": 50,
- "verb": "POST",
- "remaining": 10,
- "unit": "DAY",
- "next-available": "2011-07-21T18:17:06Z",
- },
- ]
- },
- ],
- "absolute": {
- "maxServerMeta": 1,
- "maxImageMeta": 1,
- "maxPersonality": 5,
- "maxPersonalitySize": 5
- }
- }
- }
+ expected_limits = {"limits": {
+ "rate": [{
+ "uri": "*",
+ "regex": ".*",
+ "limit": [{"value": 10,
+ "verb": "POST",
+ "remaining": 2,
+ "unit": "MINUTE",
+ "next-available": "2011-07-21T18:17:06Z"}]},
+ {"uri": "*/servers",
+ "regex": "^/servers",
+ "limit": [{"value": 50,
+ "verb": "POST",
+ "remaining": 10,
+ "unit": "DAY",
+ "next-available": "2011-07-21T18:17:06Z"}]}],
+ "absolute": {"maxServerMeta": 1,
+ "maxImageMeta": 1,
+ "maxPersonality": 5,
+ "maxPersonalitySize": 5}}}
output = self.view_builder.build(self.rate_limits,
self.absolute_limits)
self.assertDictMatch(output, expected_limits)
def test_build_limits_empty_limits(self):
- expected_limits = {
- "limits": {
- "rate": [],
- "absolute": {},
- }
- }
+ expected_limits = {"limits": {"rate": [],
+ "absolute": {}}}
abs_limits = {}
rate_limits = []
@@ -1012,45 +982,28 @@ class LimitsXMLSerializationTest(test.TestCase):
def test_index(self):
serializer = limits.LimitsXMLSerializer()
-
- fixture = {
- "limits": {
- "rate": [
- {
- "uri": "*",
- "regex": ".*",
- "limit": [
- {
- "value": 10,
- "verb": "POST",
- "remaining": 2,
- "unit": "MINUTE",
- "next-available": "2011-12-15T22:42:45Z",
- },
- ]
- },
- {
- "uri": "*/servers",
- "regex": "^/servers",
- "limit": [
- {
- "value": 50,
- "verb": "POST",
- "remaining": 10,
- "unit": "DAY",
- "next-available": "2011-12-15T22:42:45Z"
- },
- ]
- },
- ],
- "absolute": {
- "maxServerMeta": 1,
- "maxImageMeta": 1,
- "maxPersonality": 5,
- "maxPersonalitySize": 10240
- }
- }
- }
+ fixture = {"limits": {
+ "rate": [{
+ "uri": "*",
+ "regex": ".*",
+ "limit": [{
+ "value": 10,
+ "verb": "POST",
+ "remaining": 2,
+ "unit": "MINUTE",
+ "next-available": "2011-12-15T22:42:45Z"}]},
+ {"uri": "*/servers",
+ "regex": "^/servers",
+ "limit": [{
+ "value": 50,
+ "verb": "POST",
+ "remaining": 10,
+ "unit": "DAY",
+ "next-available": "2011-12-15T22:42:45Z"}]}],
+ "absolute": {"maxServerMeta": 1,
+ "maxImageMeta": 1,
+ "maxPersonality": 5,
+ "maxPersonalitySize": 10240}}}
output = serializer.serialize(fixture, 'index')
actual = minidom.parseString(output.replace(" ", ""))
@@ -1083,12 +1036,9 @@ class LimitsXMLSerializationTest(test.TestCase):
def test_index_no_limits(self):
serializer = limits.LimitsXMLSerializer()
- fixture = {
- "limits": {
- "rate": [],
- "absolute": {},
- }
- }
+ fixture = {"limits": {
+ "rate": [],
+ "absolute": {}}}
output = serializer.serialize(fixture, 'index')
actual = minidom.parseString(output.replace(" ", ""))
diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py
index 717e11c00..687a19390 100644
--- a/nova/tests/api/openstack/test_server_actions.py
+++ b/nova/tests/api/openstack/test_server_actions.py
@@ -352,7 +352,7 @@ class ServerActionsTest(test.TestCase):
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
response = req.get_response(fakes.wsgi_app())
- self.assertEqual(501, response.status_int)
+ self.assertEqual(400, response.status_int)
def test_create_backup_with_metadata(self):
self.flags(allow_admin_api=True)
@@ -487,6 +487,24 @@ class ServerActionsTestV11(test.TestCase):
def tearDown(self):
self.stubs.UnsetAll()
+ def test_server_bad_body(self):
+ body = {}
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_server_unknown_action(self):
+ body = {'sockTheFox': {'fakekey': '1234'}}
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
def test_server_change_password(self):
mock_method = MockSetAdminPassword()
self.stubs.Set(nova.compute.api.API, 'set_admin_password', mock_method)
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index b6342ae2f..a510d7d97 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -134,8 +134,8 @@ def return_security_group(context, instance_id, security_group_id):
pass
-def instance_update(context, instance_id, kwargs):
- return stub_instance(instance_id)
+def instance_update(context, instance_id, values):
+ return stub_instance(instance_id, name=values.get('display_name'))
def instance_addresses(context, instance_id):
@@ -145,7 +145,7 @@ def instance_addresses(context, instance_id):
def stub_instance(id, user_id='fake', project_id='fake', private_address=None,
public_addresses=None, host=None, power_state=0,
reservation_id="", uuid=FAKE_UUID, image_ref="10",
- flavor_id="1", interfaces=None):
+ flavor_id="1", interfaces=None, name=None):
metadata = []
metadata.append(InstanceMetadata(key='seq', value=id))
@@ -161,7 +161,7 @@ def stub_instance(id, user_id='fake', project_id='fake', private_address=None,
host = str(host)
# ReservationID isn't sent back, hack it in there.
- server_name = "server%s" % id
+ server_name = name or "server%s" % id
if reservation_id != "":
server_name = "reservation_%s" % (reservation_id, )
@@ -1653,6 +1653,22 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
+ def test_create_instance_v1_1_invalid_flavor_id_int(self):
+ self._setup_for_create_instance()
+
+ image_href = 'http://localhost/v1.1/images/2'
+ flavor_ref = -1
+ body = dict(server=dict(
+ name='server_test', imageRef=image_href, flavorRef=flavor_ref,
+ metadata={'hello': 'world', 'open': 'stack'},
+ personality={}))
+ req = webob.Request.blank('/v1.1/servers')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
def test_create_instance_v1_1_bad_flavor_href(self):
self._setup_for_create_instance()
@@ -1864,13 +1880,17 @@ class ServersTest(test.TestCase):
self.assertEqual(res.status_int, 400)
def test_update_server_name_v1_1(self):
+ self.stubs.Set(nova.db.api, 'instance_get',
+ return_server_with_attributes(name='server_test'))
req = webob.Request.blank('/v1.1/servers/1')
req.method = 'PUT'
req.content_type = 'application/json'
- req.body = json.dumps({'server': {'name': 'new-name'}})
+ req.body = json.dumps({'server': {'name': 'server_test'}})
res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 204)
- self.assertEqual(res.body, '')
+ self.assertEqual(res.status_int, 200)
+ res_dict = json.loads(res.body)
+ self.assertEqual(res_dict['server']['id'], 1)
+ self.assertEqual(res_dict['server']['name'], 'server_test')
def test_update_server_adminPass_ignored_v1_1(self):
inst_dict = dict(name='server_test', adminPass='bacon')
@@ -1881,16 +1901,19 @@ class ServersTest(test.TestCase):
self.assertEqual(params, filtered_dict)
return filtered_dict
- self.stubs.Set(nova.db.api, 'instance_update',
- server_update)
+ self.stubs.Set(nova.db.api, 'instance_update', server_update)
+ self.stubs.Set(nova.db.api, 'instance_get',
+ return_server_with_attributes(name='server_test'))
req = webob.Request.blank('/v1.1/servers/1')
req.method = 'PUT'
req.content_type = "application/json"
req.body = self.body
res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 204)
- self.assertEqual(res.body, '')
+ self.assertEqual(res.status_int, 200)
+ res_dict = json.loads(res.body)
+ self.assertEqual(res_dict['server']['id'], 1)
+ self.assertEqual(res_dict['server']['name'], 'server_test')
def test_create_backup_schedules(self):
req = webob.Request.blank('/v1.0/servers/1/backup_schedule')
@@ -3033,8 +3056,7 @@ class ServersViewBuilderV11Test(test.TestCase):
address_builder,
flavor_builder,
image_builder,
- base_url,
- )
+ base_url)
return view_builder
def test_build_server(self):
diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py
index d51b19ccd..f2a19f22d 100644
--- a/nova/tests/glance/stubs.py
+++ b/nova/tests/glance/stubs.py
@@ -32,6 +32,7 @@ class FakeGlance(object):
IMAGE_RAMDISK = 3
IMAGE_RAW = 4
IMAGE_VHD = 5
+ IMAGE_ISO = 6
IMAGE_FIXTURES = {
IMAGE_MACHINE: {
@@ -58,6 +59,11 @@ class FakeGlance(object):
'image_meta': {'name': 'fakevhd', 'size': 0,
'disk_format': 'vhd',
'container_format': 'ovf'},
+ 'image_data': StringIO.StringIO('')},
+ IMAGE_ISO: {
+ 'image_meta': {'name': 'fakeiso', 'size': 0,
+ 'disk_format': 'iso',
+ 'container_format': 'bare'},
'image_data': StringIO.StringIO('')}}
def __init__(self, host, port=None, use_ssl=False, auth_tok=None):
diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py
index 150279a95..725f6d529 100644
--- a/nova/tests/integrated/test_servers.py
+++ b/nova/tests/integrated/test_servers.py
@@ -27,6 +27,7 @@ LOG = logging.getLogger('nova.tests.integrated')
class ServersTest(integrated_helpers._IntegratedTestBase):
+
def test_get_servers(self):
"""Simple check that listing servers works."""
servers = self.api.get_servers()
@@ -103,6 +104,10 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
# It should be available...
# TODO(justinsb): Mock doesn't yet do this...
#self.assertEqual('available', found_server['status'])
+ servers = self.api.get_servers(detail=True)
+ for server in servers:
+ self.assertTrue("image" in server)
+ self.assertTrue("flavor" in server)
self._delete_server(created_server_id)
diff --git a/nova/tests/scheduler/test_zone_aware_scheduler.py b/nova/tests/scheduler/test_abstract_scheduler.py
index 788efca52..f4f5cc233 100644
--- a/nova/tests/scheduler/test_zone_aware_scheduler.py
+++ b/nova/tests/scheduler/test_abstract_scheduler.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
"""
-Tests For Zone Aware Scheduler.
+Tests For Abstract Scheduler.
"""
import json
@@ -25,7 +25,7 @@ from nova import rpc
from nova import test
from nova.compute import api as compute_api
from nova.scheduler import driver
-from nova.scheduler import zone_aware_scheduler
+from nova.scheduler import abstract_scheduler
from nova.scheduler import zone_manager
@@ -60,7 +60,7 @@ def fake_zone_manager_service_states(num_hosts):
return states
-class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler):
+class FakeAbstractScheduler(abstract_scheduler.AbstractScheduler):
# No need to stub anything at the moment
pass
@@ -161,15 +161,15 @@ def fake_zone_get_all(context):
]
-class ZoneAwareSchedulerTestCase(test.TestCase):
- """Test case for Zone Aware Scheduler."""
+class AbstractSchedulerTestCase(test.TestCase):
+ """Test case for Abstract Scheduler."""
- def test_zone_aware_scheduler(self):
+ def test_abstract_scheduler(self):
"""
Create a nested set of FakeZones, try to build multiple instances
and ensure that a select call returns the appropriate build plan.
"""
- sched = FakeZoneAwareScheduler()
+ sched = FakeAbstractScheduler()
self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all)
@@ -194,7 +194,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
properly adjusted based on the scale/offset in the zone
db entries.
"""
- sched = FakeZoneAwareScheduler()
+ sched = FakeAbstractScheduler()
child_results = fake_call_zone_method(None, None, None, None)
zones = fake_zone_get_all(None)
sched._adjust_child_weights(child_results, zones)
@@ -209,11 +209,11 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
if zone == 'zone3': # Scale x1000
self.assertEqual(scaled.pop(0), w)
- def test_empty_zone_aware_scheduler(self):
+ def test_empty_abstract_scheduler(self):
"""
Ensure empty hosts & child_zones result in NoValidHosts exception.
"""
- sched = FakeZoneAwareScheduler()
+ sched = FakeAbstractScheduler()
self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method)
self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all)
@@ -231,7 +231,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
If the zone_blob hint was passed in, don't re-schedule.
"""
global was_called
- sched = FakeZoneAwareScheduler()
+ sched = FakeAbstractScheduler()
was_called = False
self.stubs.Set(sched, '_provision_resource', fake_provision_resource)
request_spec = {
@@ -248,7 +248,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
def test_provision_resource_local(self):
"""Provision a resource locally or remotely."""
global was_called
- sched = FakeZoneAwareScheduler()
+ sched = FakeAbstractScheduler()
was_called = False
self.stubs.Set(sched, '_provision_resource_locally',
fake_provision_resource_locally)
@@ -260,7 +260,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
def test_provision_resource_remote(self):
"""Provision a resource locally or remotely."""
global was_called
- sched = FakeZoneAwareScheduler()
+ sched = FakeAbstractScheduler()
was_called = False
self.stubs.Set(sched, '_provision_resource_from_blob',
fake_provision_resource_from_blob)
@@ -272,9 +272,9 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
def test_provision_resource_from_blob_empty(self):
"""Provision a resource locally or remotely given no hints."""
global was_called
- sched = FakeZoneAwareScheduler()
+ sched = FakeAbstractScheduler()
request_spec = {}
- self.assertRaises(zone_aware_scheduler.InvalidBlob,
+ self.assertRaises(abstract_scheduler.InvalidBlob,
sched._provision_resource_from_blob,
None, {}, 1, {}, {})
@@ -283,7 +283,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
Provision a resource locally or remotely when blob hint passed in.
"""
global was_called
- sched = FakeZoneAwareScheduler()
+ sched = FakeAbstractScheduler()
was_called = False
def fake_create_db_entry_for_new_instance(self, context,
@@ -317,7 +317,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
passed in.
"""
global was_called
- sched = FakeZoneAwareScheduler()
+ sched = FakeAbstractScheduler()
self.stubs.Set(sched, '_decrypt_blob',
fake_decrypt_blob_returns_child_info)
was_called = False
@@ -336,7 +336,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
from an immediate child.
"""
global was_called
- sched = FakeZoneAwareScheduler()
+ sched = FakeAbstractScheduler()
was_called = False
self.stubs.Set(sched, '_ask_child_zone_to_create_instance',
fake_ask_child_zone_to_create_instance)
@@ -350,14 +350,14 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
def test_decrypt_blob(self):
"""Test that the decrypt method works."""
- fixture = FakeZoneAwareScheduler()
+ fixture = FakeAbstractScheduler()
test_data = {"foo": "bar"}
class StubDecryptor(object):
def decryptor(self, key):
return lambda blob: blob
- self.stubs.Set(zone_aware_scheduler, 'crypto',
+ self.stubs.Set(abstract_scheduler, 'crypto',
StubDecryptor())
self.assertEqual(fixture._decrypt_blob(test_data),
diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py
index fbe6b2f77..de7581d0a 100644
--- a/nova/tests/scheduler/test_least_cost_scheduler.py
+++ b/nova/tests/scheduler/test_least_cost_scheduler.py
@@ -18,7 +18,7 @@ Tests For Least Cost Scheduler
from nova import test
from nova.scheduler import least_cost
-from nova.tests.scheduler import test_zone_aware_scheduler
+from nova.tests.scheduler import test_abstract_scheduler
MB = 1024 * 1024
@@ -70,7 +70,7 @@ class LeastCostSchedulerTestCase(test.TestCase):
zone_manager = FakeZoneManager()
- states = test_zone_aware_scheduler.fake_zone_manager_service_states(
+ states = test_abstract_scheduler.fake_zone_manager_service_states(
num_hosts=10)
zone_manager.service_states = states
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index 7a26fd1bb..158df2a27 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -36,8 +36,9 @@ from nova import test
from nova import rpc
from nova import utils
from nova.scheduler import api
-from nova.scheduler import manager
from nova.scheduler import driver
+from nova.scheduler import manager
+from nova.scheduler import multi
from nova.compute import power_state
@@ -256,7 +257,9 @@ class SimpleDriverTestCase(test.TestCase):
def _create_instance(self, **kwargs):
"""Create a test instance"""
inst = {}
- inst['image_id'] = 1
+ # NOTE(jk0): If an integer is passed as the image_ref, the image
+ # service will use the default image service (in this case, the fake).
+ inst['image_ref'] = '1'
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
@@ -391,7 +394,7 @@ class SimpleDriverTestCase(test.TestCase):
compute1.kill()
compute2.kill()
- def test_wont_sechedule_if_specified_host_is_down_no_queue(self):
+ def test_wont_schedule_if_specified_host_is_down_no_queue(self):
compute1 = service.Service('host1',
'nova-compute',
'compute',
@@ -643,10 +646,13 @@ class SimpleDriverTestCase(test.TestCase):
self.mox.StubOutWithMock(driver_i, '_live_migration_dest_check')
self.mox.StubOutWithMock(driver_i, '_live_migration_common_check')
driver_i._live_migration_src_check(nocare, nocare)
- driver_i._live_migration_dest_check(nocare, nocare, i_ref['host'])
- driver_i._live_migration_common_check(nocare, nocare, i_ref['host'])
+ driver_i._live_migration_dest_check(nocare, nocare,
+ i_ref['host'], False)
+ driver_i._live_migration_common_check(nocare, nocare,
+ i_ref['host'], False)
self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
- kwargs = {'instance_id': instance_id, 'dest': i_ref['host']}
+ kwargs = {'instance_id': instance_id, 'dest': i_ref['host'],
+ 'block_migration': False}
rpc.cast(self.context,
db.queue_get_for(nocare, FLAGS.compute_topic, i_ref['host']),
{"method": 'live_migration', "args": kwargs})
@@ -654,7 +660,8 @@ class SimpleDriverTestCase(test.TestCase):
self.mox.ReplayAll()
self.scheduler.live_migration(self.context, FLAGS.compute_topic,
instance_id=instance_id,
- dest=i_ref['host'])
+ dest=i_ref['host'],
+ block_migration=False)
i_ref = db.instance_get(self.context, instance_id)
self.assertTrue(i_ref['state_description'] == 'migrating')
@@ -735,7 +742,7 @@ class SimpleDriverTestCase(test.TestCase):
self.assertRaises(exception.ComputeServiceUnavailable,
self.scheduler.driver._live_migration_dest_check,
- self.context, i_ref, i_ref['host'])
+ self.context, i_ref, i_ref['host'], False)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
@@ -748,7 +755,7 @@ class SimpleDriverTestCase(test.TestCase):
self.assertRaises(exception.UnableToMigrateToSelf,
self.scheduler.driver._live_migration_dest_check,
- self.context, i_ref, i_ref['host'])
+ self.context, i_ref, i_ref['host'], False)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
@@ -756,15 +763,33 @@ class SimpleDriverTestCase(test.TestCase):
def test_live_migration_dest_check_service_lack_memory(self):
"""Confirms exception raises when dest doesn't have enough memory."""
instance_id = self._create_instance()
+ instance_id2 = self._create_instance(host='somewhere',
+ memory_mb=12)
i_ref = db.instance_get(self.context, instance_id)
- s_ref = self._create_compute_service(host='somewhere',
- memory_mb_used=12)
+ s_ref = self._create_compute_service(host='somewhere')
self.assertRaises(exception.MigrationError,
self.scheduler.driver._live_migration_dest_check,
- self.context, i_ref, 'somewhere')
+ self.context, i_ref, 'somewhere', False)
db.instance_destroy(self.context, instance_id)
+ db.instance_destroy(self.context, instance_id2)
+ db.service_destroy(self.context, s_ref['id'])
+
+ def test_block_migration_dest_check_service_lack_disk(self):
+ """Confirms exception raises when dest doesn't have enough disk."""
+ instance_id = self._create_instance()
+ instance_id2 = self._create_instance(host='somewhere',
+ local_gb=70)
+ i_ref = db.instance_get(self.context, instance_id)
+ s_ref = self._create_compute_service(host='somewhere')
+
+ self.assertRaises(exception.MigrationError,
+ self.scheduler.driver._live_migration_dest_check,
+ self.context, i_ref, 'somewhere', True)
+
+ db.instance_destroy(self.context, instance_id)
+ db.instance_destroy(self.context, instance_id2)
db.service_destroy(self.context, s_ref['id'])
def test_live_migration_dest_check_service_works_correctly(self):
@@ -776,7 +801,8 @@ class SimpleDriverTestCase(test.TestCase):
ret = self.scheduler.driver._live_migration_dest_check(self.context,
i_ref,
- 'somewhere')
+ 'somewhere',
+ False)
self.assertTrue(ret is None)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
@@ -809,9 +835,10 @@ class SimpleDriverTestCase(test.TestCase):
"args": {'filename': fpath}})
self.mox.ReplayAll()
- self.assertRaises(exception.SourceHostUnavailable,
+ #self.assertRaises(exception.SourceHostUnavailable,
+ self.assertRaises(exception.FileNotFound,
self.scheduler.driver._live_migration_common_check,
- self.context, i_ref, dest)
+ self.context, i_ref, dest, False)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
@@ -835,7 +862,7 @@ class SimpleDriverTestCase(test.TestCase):
self.mox.ReplayAll()
self.assertRaises(exception.InvalidHypervisorType,
self.scheduler.driver._live_migration_common_check,
- self.context, i_ref, dest)
+ self.context, i_ref, dest, False)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
@@ -861,7 +888,7 @@ class SimpleDriverTestCase(test.TestCase):
self.mox.ReplayAll()
self.assertRaises(exception.DestinationHypervisorTooOld,
self.scheduler.driver._live_migration_common_check,
- self.context, i_ref, dest)
+ self.context, i_ref, dest, False)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
@@ -893,7 +920,8 @@ class SimpleDriverTestCase(test.TestCase):
try:
self.scheduler.driver._live_migration_common_check(self.context,
i_ref,
- dest)
+ dest,
+ False)
except rpc.RemoteError, e:
c = (e.message.find(_("doesn't have compatibility to")) >= 0)
@@ -903,6 +931,25 @@ class SimpleDriverTestCase(test.TestCase):
db.service_destroy(self.context, s_ref2['id'])
+class MultiDriverTestCase(SimpleDriverTestCase):
+ """Test case for multi driver."""
+
+ def setUp(self):
+ super(MultiDriverTestCase, self).setUp()
+ self.flags(connection_type='fake',
+ stub_network=True,
+ max_cores=4,
+ max_gigabytes=4,
+ network_manager='nova.network.manager.FlatManager',
+ volume_driver='nova.volume.driver.FakeISCSIDriver',
+ compute_scheduler_driver=('nova.scheduler.simple'
+ '.SimpleScheduler'),
+ volume_scheduler_driver=('nova.scheduler.simple'
+ '.SimpleScheduler'),
+ scheduler_driver='nova.scheduler.multi.MultiScheduler')
+ self.scheduler = manager.SchedulerManager()
+
+
class FakeZone(object):
def __init__(self, id, api_url, username, password):
self.id = id
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index 73c9bd78d..4f5d36f14 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -632,7 +632,7 @@ class ComputeTestCase(test.TestCase):
vid = i_ref['volumes'][i]['id']
volmock.setup_compute_volume(c, vid).InAnyOrder('g1')
drivermock.plug_vifs(i_ref, [])
- drivermock.ensure_filtering_rules_for_instance(i_ref)
+ drivermock.ensure_filtering_rules_for_instance(i_ref, [])
self.compute.db = dbmock
self.compute.volume_manager = volmock
@@ -657,7 +657,7 @@ class ComputeTestCase(test.TestCase):
self.mox.StubOutWithMock(compute_manager.LOG, 'info')
compute_manager.LOG.info(_("%s has no volume."), i_ref['hostname'])
drivermock.plug_vifs(i_ref, [])
- drivermock.ensure_filtering_rules_for_instance(i_ref)
+ drivermock.ensure_filtering_rules_for_instance(i_ref, [])
self.compute.db = dbmock
self.compute.driver = drivermock
@@ -714,11 +714,15 @@ class ComputeTestCase(test.TestCase):
dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
AndReturn(topic)
rpc.call(c, topic, {"method": "pre_live_migration",
- "args": {'instance_id': i_ref['id']}})
+ "args": {'instance_id': i_ref['id'],
+ 'block_migration': False,
+ 'disk': None}})
+
self.mox.StubOutWithMock(self.compute.driver, 'live_migration')
self.compute.driver.live_migration(c, i_ref, i_ref['host'],
self.compute.post_live_migration,
- self.compute.recover_live_migration)
+ self.compute.rollback_live_migration,
+ False)
self.compute.db = dbmock
self.mox.ReplayAll()
@@ -739,13 +743,18 @@ class ComputeTestCase(test.TestCase):
dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
AndReturn(topic)
rpc.call(c, topic, {"method": "pre_live_migration",
- "args": {'instance_id': i_ref['id']}}).\
+ "args": {'instance_id': i_ref['id'],
+ 'block_migration': False,
+ 'disk': None}}).\
AndRaise(rpc.RemoteError('', '', ''))
dbmock.instance_update(c, i_ref['id'], {'state_description': 'running',
'state': power_state.RUNNING,
'host': i_ref['host']})
for v in i_ref['volumes']:
dbmock.volume_update(c, v['id'], {'status': 'in-use'})
+ # mock for volume_api.remove_from_compute
+ rpc.call(c, topic, {"method": "remove_volume",
+ "args": {'volume_id': v['id']}})
self.compute.db = dbmock
self.mox.ReplayAll()
@@ -766,7 +775,9 @@ class ComputeTestCase(test.TestCase):
AndReturn(topic)
self.mox.StubOutWithMock(rpc, 'call')
rpc.call(c, topic, {"method": "pre_live_migration",
- "args": {'instance_id': i_ref['id']}}).\
+ "args": {'instance_id': i_ref['id'],
+ 'block_migration': False,
+ 'disk': None}}).\
AndRaise(rpc.RemoteError('', '', ''))
dbmock.instance_update(c, i_ref['id'], {'state_description': 'running',
'state': power_state.RUNNING,
@@ -791,11 +802,14 @@ class ComputeTestCase(test.TestCase):
dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
AndReturn(topic)
rpc.call(c, topic, {"method": "pre_live_migration",
- "args": {'instance_id': i_ref['id']}})
+ "args": {'instance_id': i_ref['id'],
+ 'block_migration': False,
+ 'disk': None}})
self.mox.StubOutWithMock(self.compute.driver, 'live_migration')
self.compute.driver.live_migration(c, i_ref, i_ref['host'],
self.compute.post_live_migration,
- self.compute.recover_live_migration)
+ self.compute.rollback_live_migration,
+ False)
self.compute.db = dbmock
self.mox.ReplayAll()
@@ -829,6 +843,10 @@ class ComputeTestCase(test.TestCase):
self.compute.volume_manager.remove_compute_volume(c, v['id'])
self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance')
self.compute.driver.unfilter_instance(i_ref, [])
+ self.mox.StubOutWithMock(rpc, 'call')
+ rpc.call(c, db.queue_get_for(c, FLAGS.compute_topic, dest),
+ {"method": "post_live_migration_at_destination",
+ "args": {'instance_id': i_ref['id'], 'block_migration': False}})
# executing
self.mox.ReplayAll()
@@ -1323,6 +1341,69 @@ class ComputeTestCase(test.TestCase):
db.instance_destroy(c, instance_id2)
db.instance_destroy(c, instance_id3)
+ def test_get_all_by_metadata(self):
+ """Test searching instances by metadata"""
+
+ c = context.get_admin_context()
+ instance_id0 = self._create_instance()
+ instance_id1 = self._create_instance({
+ 'metadata': {'key1': 'value1'}})
+ instance_id2 = self._create_instance({
+ 'metadata': {'key2': 'value2'}})
+ instance_id3 = self._create_instance({
+ 'metadata': {'key3': 'value3'}})
+ instance_id4 = self._create_instance({
+ 'metadata': {'key3': 'value3',
+ 'key4': 'value4'}})
+
+ # get all instances
+ instances = self.compute_api.get_all(c,
+ search_opts={'metadata': {}})
+ self.assertEqual(len(instances), 5)
+
+ # wrong key/value combination
+ instances = self.compute_api.get_all(c,
+ search_opts={'metadata': {'key1': 'value3'}})
+ self.assertEqual(len(instances), 0)
+
+ # non-existing keys
+ instances = self.compute_api.get_all(c,
+ search_opts={'metadata': {'key5': 'value1'}})
+ self.assertEqual(len(instances), 0)
+
+ # find existing instance
+ instances = self.compute_api.get_all(c,
+ search_opts={'metadata': {'key2': 'value2'}})
+ self.assertEqual(len(instances), 1)
+ self.assertEqual(instances[0].id, instance_id2)
+
+ instances = self.compute_api.get_all(c,
+ search_opts={'metadata': {'key3': 'value3'}})
+ self.assertEqual(len(instances), 2)
+ instance_ids = [instance.id for instance in instances]
+ self.assertTrue(instance_id3 in instance_ids)
+ self.assertTrue(instance_id4 in instance_ids)
+
+ # multiple criterias as a dict
+ instances = self.compute_api.get_all(c,
+ search_opts={'metadata': {'key3': 'value3',
+ 'key4': 'value4'}})
+ self.assertEqual(len(instances), 1)
+ self.assertEqual(instances[0].id, instance_id4)
+
+ # multiple criterias as a list
+ instances = self.compute_api.get_all(c,
+ search_opts={'metadata': [{'key4': 'value4'},
+ {'key3': 'value3'}]})
+ self.assertEqual(len(instances), 1)
+ self.assertEqual(instances[0].id, instance_id4)
+
+ db.instance_destroy(c, instance_id0)
+ db.instance_destroy(c, instance_id1)
+ db.instance_destroy(c, instance_id2)
+ db.instance_destroy(c, instance_id3)
+ db.instance_destroy(c, instance_id4)
+
@staticmethod
def _parse_db_block_device_mapping(bdm_ref):
attr_list = ('delete_on_termination', 'device_name', 'no_device',
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index 2180cf4f0..688518bb8 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -21,6 +21,7 @@ import os
import re
import shutil
import sys
+import tempfile
from xml.etree.ElementTree import fromstring as xml_to_tree
from xml.dom.minidom import parseString as xml_to_dom
@@ -49,18 +50,19 @@ def _create_network_info(count=1, ipv6=None):
if ipv6 is None:
ipv6 = FLAGS.use_ipv6
fake = 'fake'
- fake_ip = '0.0.0.0/0'
- fake_ip_2 = '0.0.0.1/0'
- fake_ip_3 = '0.0.0.1/0'
+ fake_ip = '10.11.12.13'
+ fake_ip_2 = '0.0.0.1'
+ fake_ip_3 = '0.0.0.1'
fake_vlan = 100
fake_bridge_interface = 'eth0'
network = {'bridge': fake,
'cidr': fake_ip,
'cidr_v6': fake_ip,
+ 'gateway_v6': fake,
'vlan': fake_vlan,
'bridge_interface': fake_bridge_interface}
mapping = {'mac': fake,
- 'dhcp_server': fake,
+ 'dhcp_server': '10.0.0.1',
'gateway': fake,
'gateway6': fake,
'ips': [{'ip': fake_ip}, {'ip': fake_ip}]}
@@ -71,12 +73,12 @@ def _create_network_info(count=1, ipv6=None):
return [(network, mapping) for x in xrange(0, count)]
-def _setup_networking(instance_id, ip='1.2.3.4'):
+def _setup_networking(instance_id, ip='1.2.3.4', mac='56:12:12:12:12:12'):
ctxt = context.get_admin_context()
network_ref = db.project_get_networks(ctxt,
'fake',
associate=True)[0]
- vif = {'address': '56:12:12:12:12:12',
+ vif = {'address': mac,
'network_id': network_ref['id'],
'instance_id': instance_id}
vif_ref = db.virtual_interface_create(ctxt, vif)
@@ -273,15 +275,14 @@ class LibvirtConnTestCase(test.TestCase):
conn = connection.LibvirtConnection(True)
instance_ref = db.instance_create(self.context, self.test_instance)
- result = conn._prepare_xml_info(instance_ref, False)
- self.assertFalse(result['nics'])
-
- result = conn._prepare_xml_info(instance_ref, False,
- _create_network_info())
+ result = conn._prepare_xml_info(instance_ref,
+ _create_network_info(),
+ False)
self.assertTrue(len(result['nics']) == 1)
- result = conn._prepare_xml_info(instance_ref, False,
- _create_network_info(2))
+ result = conn._prepare_xml_info(instance_ref,
+ _create_network_info(2),
+ False)
self.assertTrue(len(result['nics']) == 2)
def test_xml_and_uri_no_ramdisk_no_kernel(self):
@@ -408,16 +409,16 @@ class LibvirtConnTestCase(test.TestCase):
network_info = _create_network_info(2)
conn = connection.LibvirtConnection(True)
instance_ref = db.instance_create(self.context, instance_data)
- xml = conn.to_xml(instance_ref, False, network_info)
+ xml = conn.to_xml(instance_ref, network_info, False)
tree = xml_to_tree(xml)
interfaces = tree.findall("./devices/interface")
self.assertEquals(len(interfaces), 2)
parameters = interfaces[0].findall('./filterref/parameter')
self.assertEquals(interfaces[0].get('type'), 'bridge')
self.assertEquals(parameters[0].get('name'), 'IP')
- self.assertEquals(parameters[0].get('value'), '0.0.0.0/0')
+ self.assertEquals(parameters[0].get('value'), '10.11.12.13')
self.assertEquals(parameters[1].get('name'), 'DHCPSERVER')
- self.assertEquals(parameters[1].get('value'), 'fake')
+ self.assertEquals(parameters[1].get('value'), '10.0.0.1')
def _check_xml_and_container(self, instance):
user_context = context.RequestContext(self.user_id,
@@ -431,7 +432,8 @@ class LibvirtConnTestCase(test.TestCase):
uri = conn.get_uri()
self.assertEquals(uri, 'lxc:///')
- xml = conn.to_xml(instance_ref)
+ network_info = _create_network_info()
+ xml = conn.to_xml(instance_ref, network_info)
tree = xml_to_tree(xml)
check = [
@@ -528,17 +530,20 @@ class LibvirtConnTestCase(test.TestCase):
uri = conn.get_uri()
self.assertEquals(uri, expected_uri)
- xml = conn.to_xml(instance_ref, rescue)
+ network_info = _create_network_info()
+ xml = conn.to_xml(instance_ref, network_info, rescue)
tree = xml_to_tree(xml)
for i, (check, expected_result) in enumerate(checks):
self.assertEqual(check(tree),
expected_result,
- '%s failed check %d' % (xml, i))
+ '%s != %s failed check %d' %
+ (check(tree), expected_result, i))
for i, (check, expected_result) in enumerate(common_checks):
self.assertEqual(check(tree),
expected_result,
- '%s failed common check %d' % (xml, i))
+ '%s != %s failed common check %d' %
+ (check(tree), expected_result, i))
# This test is supposed to make sure we don't
# override a specifically set uri
@@ -623,7 +628,7 @@ class LibvirtConnTestCase(test.TestCase):
return
# Preparing mocks
- def fake_none(self):
+ def fake_none(self, *args):
return
def fake_raise(self):
@@ -640,6 +645,7 @@ class LibvirtConnTestCase(test.TestCase):
self.create_fake_libvirt_mock()
instance_ref = db.instance_create(self.context, self.test_instance)
+ network_info = _create_network_info()
# Start test
self.mox.ReplayAll()
@@ -649,6 +655,7 @@ class LibvirtConnTestCase(test.TestCase):
conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
conn.firewall_driver.setattr('instance_filter_exists', fake_none)
conn.ensure_filtering_rules_for_instance(instance_ref,
+ network_info,
time=fake_timer)
except exception.Error, e:
c1 = (0 <= e.message.find('Timeout migrating for'))
@@ -690,17 +697,20 @@ class LibvirtConnTestCase(test.TestCase):
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
- self.mox.StubOutWithMock(self.compute, "recover_live_migration")
- self.compute.recover_live_migration(self.context, instance_ref,
- dest='dest')
-
- # Start test
+# self.mox.StubOutWithMock(self.compute, "recover_live_migration")
+ self.mox.StubOutWithMock(self.compute, "rollback_live_migration")
+# self.compute.recover_live_migration(self.context, instance_ref,
+# dest='dest')
+ self.compute.rollback_live_migration(self.context, instance_ref,
+ 'dest', False)
+
+ #start test
self.mox.ReplayAll()
conn = connection.LibvirtConnection(False)
self.assertRaises(libvirt.libvirtError,
conn._live_migration,
- self.context, instance_ref, 'dest', '',
- self.compute.recover_live_migration)
+ self.context, instance_ref, 'dest', False,
+ self.compute.rollback_live_migration)
instance_ref = db.instance_get(self.context, instance_ref['id'])
self.assertTrue(instance_ref['state_description'] == 'running')
@@ -711,6 +721,95 @@ class LibvirtConnTestCase(test.TestCase):
db.volume_destroy(self.context, volume_ref['id'])
db.instance_destroy(self.context, instance_ref['id'])
+ def test_pre_block_migration_works_correctly(self):
+ """Confirms pre_block_migration works correctly."""
+
+ # Skip if non-libvirt environment
+ if not self.lazy_load_library_exists():
+ return
+
+ # Replace instances_path since this testcase creates tmpfile
+ tmpdir = tempfile.mkdtemp()
+ store = FLAGS.instances_path
+ FLAGS.instances_path = tmpdir
+
+ # Test data
+ instance_ref = db.instance_create(self.context, self.test_instance)
+ dummyjson = '[{"path": "%s/disk", "local_gb": "10G", "type": "raw"}]'
+
+ # Preparing mocks
+ # qemu-img should be mockd since test environment might not have
+ # large disk space.
+ self.mox.StubOutWithMock(utils, "execute")
+ utils.execute('sudo', 'qemu-img', 'create', '-f', 'raw',
+ '%s/%s/disk' % (tmpdir, instance_ref.name), '10G')
+
+ self.mox.ReplayAll()
+ conn = connection.LibvirtConnection(False)
+ conn.pre_block_migration(self.context, instance_ref,
+ dummyjson % tmpdir)
+
+ self.assertTrue(os.path.exists('%s/%s/' %
+ (tmpdir, instance_ref.name)))
+
+ shutil.rmtree(tmpdir)
+ db.instance_destroy(self.context, instance_ref['id'])
+ # Restore FLAGS.instances_path
+ FLAGS.instances_path = store
+
+ def test_get_instance_disk_info_works_correctly(self):
+ """Confirms pre_block_migration works correctly."""
+ # Skip if non-libvirt environment
+ if not self.lazy_load_library_exists():
+ return
+
+ # Test data
+ instance_ref = db.instance_create(self.context, self.test_instance)
+ dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
+ "<devices>"
+ "<disk type='file'><driver name='qemu' type='raw'/>"
+ "<source file='/test/disk'/>"
+ "<target dev='vda' bus='virtio'/></disk>"
+ "<disk type='file'><driver name='qemu' type='qcow2'/>"
+ "<source file='/test/disk.local'/>"
+ "<target dev='vdb' bus='virtio'/></disk>"
+ "</devices></domain>")
+
+ ret = ("image: /test/disk\nfile format: raw\n"
+ "virtual size: 20G (21474836480 bytes)\ndisk size: 3.1G\n")
+
+ # Preparing mocks
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "XMLDesc")
+ vdmock.XMLDesc(0).AndReturn(dummyxml)
+
+ def fake_lookup(instance_name):
+ if instance_name == instance_ref.name:
+ return vdmock
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+
+ self.mox.StubOutWithMock(os.path, "getsize")
+ # based on above testdata, one is raw image, so getsize is mocked.
+ os.path.getsize("/test/disk").AndReturn(10 * 1024 * 1024 * 1024)
+ # another is qcow image, so qemu-img should be mocked.
+ self.mox.StubOutWithMock(utils, "execute")
+ utils.execute('sudo', 'qemu-img', 'info', '/test/disk.local').\
+ AndReturn((ret, ''))
+
+ self.mox.ReplayAll()
+ conn = connection.LibvirtConnection(False)
+ info = conn.get_instance_disk_info(self.context, instance_ref)
+ info = utils.loads(info)
+
+ self.assertTrue(info[0]['type'] == 'raw' and
+ info[1]['type'] == 'qcow2' and
+ info[0]['path'] == '/test/disk' and
+ info[1]['path'] == '/test/disk.local' and
+ info[0]['local_gb'] == '10G' and
+ info[1]['local_gb'] == '20G')
+
+ db.instance_destroy(self.context, instance_ref['id'])
+
def test_spawn_with_network_info(self):
# Skip if non-libvirt environment
if not self.lazy_load_library_exists():
@@ -884,7 +983,11 @@ class IptablesFirewallTestCase(test.TestCase):
def test_static_filters(self):
instance_ref = self._create_instance_ref()
- _setup_networking(instance_ref['id'], self.test_ip)
+ src_instance_ref = self._create_instance_ref()
+ src_ip = '10.11.12.14'
+ src_mac = '56:12:12:12:12:13'
+ _setup_networking(instance_ref['id'], self.test_ip, src_mac)
+ _setup_networking(src_instance_ref['id'], src_ip)
admin_ctxt = context.get_admin_context()
secgroup = db.security_group_create(admin_ctxt,
@@ -893,6 +996,12 @@ class IptablesFirewallTestCase(test.TestCase):
'name': 'testgroup',
'description': 'test group'})
+ src_secgroup = db.security_group_create(admin_ctxt,
+ {'user_id': 'fake',
+ 'project_id': 'fake',
+ 'name': 'testsourcegroup',
+ 'description': 'src group'})
+
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
@@ -914,9 +1023,19 @@ class IptablesFirewallTestCase(test.TestCase):
'to_port': 81,
'cidr': '192.168.10.0/24'})
+ db.security_group_rule_create(admin_ctxt,
+ {'parent_group_id': secgroup['id'],
+ 'protocol': 'tcp',
+ 'from_port': 80,
+ 'to_port': 81,
+ 'group_id': src_secgroup['id']})
+
db.instance_add_security_group(admin_ctxt, instance_ref['id'],
secgroup['id'])
+ db.instance_add_security_group(admin_ctxt, src_instance_ref['id'],
+ src_secgroup['id'])
instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
+ src_instance_ref = db.instance_get(admin_ctxt, src_instance_ref['id'])
# self.fw.add_instance(instance_ref)
def fake_iptables_execute(*cmd, **kwargs):
@@ -942,8 +1061,9 @@ class IptablesFirewallTestCase(test.TestCase):
from nova.network import linux_net
linux_net.iptables_manager.execute = fake_iptables_execute
- self.fw.prepare_instance_filter(instance_ref)
- self.fw.apply_instance_filter(instance_ref)
+ network_info = _create_network_info()
+ self.fw.prepare_instance_filter(instance_ref, network_info)
+ self.fw.apply_instance_filter(instance_ref, network_info)
in_rules = filter(lambda l: not l.startswith('#'),
self.in_filter_rules)
@@ -969,17 +1089,22 @@ class IptablesFirewallTestCase(test.TestCase):
self.assertTrue(security_group_chain,
"The security group chain wasn't added")
- regex = re.compile('-A .* -p icmp -s 192.168.11.0/24 -j ACCEPT')
+ regex = re.compile('-A .* -j ACCEPT -p icmp -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"ICMP acceptance rule wasn't added")
- regex = re.compile('-A .* -p icmp -s 192.168.11.0/24 -m icmp '
- '--icmp-type 8 -j ACCEPT')
+ regex = re.compile('-A .* -j ACCEPT -p icmp -m icmp --icmp-type 8'
+ ' -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"ICMP Echo Request acceptance rule wasn't added")
- regex = re.compile('-A .* -p tcp -s 192.168.10.0/24 -m multiport '
- '--dports 80:81 -j ACCEPT')
+ regex = re.compile('-A .* -j ACCEPT -p tcp -m multiport '
+ '--dports 80:81 -s %s' % (src_ip,))
+ self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
+ "TCP port 80/81 acceptance rule wasn't added")
+
+ regex = re.compile('-A .* -j ACCEPT -p tcp '
+ '-m multiport --dports 80:81 -s 192.168.10.0/24')
self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
db.instance_destroy(admin_ctxt, instance_ref['id'])
@@ -1008,7 +1133,7 @@ class IptablesFirewallTestCase(test.TestCase):
ipv6_len = len(self.fw.iptables.ipv6['filter'].rules)
inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref,
network_info)
- self.fw.add_filters_for_instance(instance_ref, network_info)
+ self.fw.prepare_instance_filter(instance_ref, network_info)
ipv4 = self.fw.iptables.ipv4['filter'].rules
ipv6 = self.fw.iptables.ipv6['filter'].rules
ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
@@ -1023,7 +1148,7 @@ class IptablesFirewallTestCase(test.TestCase):
self.mox.StubOutWithMock(self.fw,
'add_filters_for_instance',
use_mock_anything=True)
- self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg())
+ self.fw.prepare_instance_filter(instance_ref, mox.IgnoreArg())
self.fw.instances[instance_ref['id']] = instance_ref
self.mox.ReplayAll()
self.fw.do_refresh_security_group_rules("fake")
@@ -1043,11 +1168,12 @@ class IptablesFirewallTestCase(test.TestCase):
instance_ref = self._create_instance_ref()
_setup_networking(instance_ref['id'], self.test_ip)
- self.fw.setup_basic_filtering(instance_ref)
- self.fw.prepare_instance_filter(instance_ref)
- self.fw.apply_instance_filter(instance_ref)
+ network_info = _create_network_info()
+ self.fw.setup_basic_filtering(instance_ref, network_info)
+ self.fw.prepare_instance_filter(instance_ref, network_info)
+ self.fw.apply_instance_filter(instance_ref, network_info)
original_filter_count = len(fakefilter.filters)
- self.fw.unfilter_instance(instance_ref)
+ self.fw.unfilter_instance(instance_ref, network_info)
# should undefine just the instance filter
self.assertEqual(original_filter_count - len(fakefilter.filters), 1)
@@ -1057,14 +1183,14 @@ class IptablesFirewallTestCase(test.TestCase):
def test_provider_firewall_rules(self):
# setup basic instance data
instance_ref = self._create_instance_ref()
- nw_info = _create_network_info(1)
_setup_networking(instance_ref['id'], self.test_ip)
# FRAGILE: peeks at how the firewall names chains
chain_name = 'inst-%s' % instance_ref['id']
# create a firewall via setup_basic_filtering like libvirt_conn.spawn
# should have a chain with 0 rules
- self.fw.setup_basic_filtering(instance_ref, network_info=nw_info)
+ network_info = _create_network_info(1)
+ self.fw.setup_basic_filtering(instance_ref, network_info)
self.assertTrue('provider' in self.fw.iptables.ipv4['filter'].chains)
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
@@ -1094,8 +1220,8 @@ class IptablesFirewallTestCase(test.TestCase):
self.assertEqual(2, len(rules))
# create the instance filter and make sure it has a jump rule
- self.fw.prepare_instance_filter(instance_ref, network_info=nw_info)
- self.fw.apply_instance_filter(instance_ref)
+ self.fw.prepare_instance_filter(instance_ref, network_info)
+ self.fw.apply_instance_filter(instance_ref, network_info)
inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == chain_name]
jump_rules = [rule for rule in inst_rules if '-j' in rule.rule]
@@ -1247,7 +1373,7 @@ class NWFilterTestCase(test.TestCase):
def _ensure_all_called():
instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'],
- '561212121212')
+ 'fake')
secgroup_filter = 'nova-secgroup-%s' % self.security_group['id']
for required in [secgroup_filter, 'allow-dhcp-server',
'no-arp-spoofing', 'no-ip-spoofing',
@@ -1263,9 +1389,10 @@ class NWFilterTestCase(test.TestCase):
self.security_group.id)
instance = db.instance_get(self.context, inst_id)
- self.fw.setup_basic_filtering(instance)
- self.fw.prepare_instance_filter(instance)
- self.fw.apply_instance_filter(instance)
+ network_info = _create_network_info()
+ self.fw.setup_basic_filtering(instance, network_info)
+ self.fw.prepare_instance_filter(instance, network_info)
+ self.fw.apply_instance_filter(instance, network_info)
_ensure_all_called()
self.teardown_security_group()
db.instance_destroy(context.get_admin_context(), instance_ref['id'])
@@ -1296,11 +1423,12 @@ class NWFilterTestCase(test.TestCase):
instance = db.instance_get(self.context, inst_id)
_setup_networking(instance_ref['id'], self.test_ip)
- self.fw.setup_basic_filtering(instance)
- self.fw.prepare_instance_filter(instance)
- self.fw.apply_instance_filter(instance)
+ network_info = _create_network_info()
+ self.fw.setup_basic_filtering(instance, network_info)
+ self.fw.prepare_instance_filter(instance, network_info)
+ self.fw.apply_instance_filter(instance, network_info)
original_filter_count = len(fakefilter.filters)
- self.fw.unfilter_instance(instance)
+ self.fw.unfilter_instance(instance, network_info)
# should undefine 2 filters: instance and instance-secgroup
self.assertEqual(original_filter_count - len(fakefilter.filters), 2)
diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py
index ad678714e..bfc7a6d44 100644
--- a/nova/tests/test_metadata.py
+++ b/nova/tests/test_metadata.py
@@ -39,7 +39,7 @@ class MetadataTestCase(test.TestCase):
'key_name': None,
'host': 'test',
'launch_index': 1,
- 'instance_type': 'm1.tiny',
+ 'instance_type': {'name': 'm1.tiny'},
'reservation_id': 'r-xxxxxxxx',
'user_data': '',
'image_ref': 7,
diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py
index 2ca8b64f4..0ead680ee 100644
--- a/nova/tests/test_network.py
+++ b/nova/tests/test_network.py
@@ -210,7 +210,11 @@ class VlanNetworkTestCase(test.TestCase):
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
+ self.mox.StubOutWithMock(db, 'instance_get')
+ db.instance_get(mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn({'security_groups':
+ [{'id': 0}]})
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn('192.168.0.1')
@@ -247,6 +251,17 @@ class CommonNetworkTestCase(test.TestCase):
return [dict(address='10.0.0.0'), dict(address='10.0.0.1'),
dict(address='10.0.0.2')]
+ def network_get_by_cidr(self, context, cidr):
+ raise exception.NetworkNotFoundForCidr()
+
+ def network_create_safe(self, context, net):
+ fakenet = dict(net)
+ fakenet['id'] = 999
+ return fakenet
+
+ def network_get_all(self, context):
+ raise exception.NoNetworksFound()
+
def __init__(self):
self.db = self.FakeDB()
self.deallocate_called = None
@@ -254,6 +269,12 @@ class CommonNetworkTestCase(test.TestCase):
def deallocate_fixed_ip(self, context, address):
self.deallocate_called = address
+ def _create_fixed_ips(self, context, network_id):
+ pass
+
+ def fake_create_fixed_ips(self, context, network_id):
+ return None
+
def test_remove_fixed_ip_from_instance(self):
manager = self.FakeNetworkManager()
manager.remove_fixed_ip_from_instance(None, 99, '10.0.0.1')
@@ -265,3 +286,180 @@ class CommonNetworkTestCase(test.TestCase):
self.assertRaises(exception.FixedIpNotFoundForSpecificInstance,
manager.remove_fixed_ip_from_instance,
None, 99, 'bad input')
+
+ def test_validate_cidrs(self):
+ manager = self.FakeNetworkManager()
+ nets = manager.create_networks(None, 'fake', '192.168.0.0/24',
+ False, 1, 256, None, None, None,
+ None)
+ self.assertEqual(1, len(nets))
+ cidrs = [str(net['cidr']) for net in nets]
+ self.assertTrue('192.168.0.0/24' in cidrs)
+
+ def test_validate_cidrs_split_exact_in_half(self):
+ manager = self.FakeNetworkManager()
+ nets = manager.create_networks(None, 'fake', '192.168.0.0/24',
+ False, 2, 128, None, None, None,
+ None)
+ self.assertEqual(2, len(nets))
+ cidrs = [str(net['cidr']) for net in nets]
+ self.assertTrue('192.168.0.0/25' in cidrs)
+ self.assertTrue('192.168.0.128/25' in cidrs)
+
+ def test_validate_cidrs_split_cidr_in_use_middle_of_range(self):
+ manager = self.FakeNetworkManager()
+ self.mox.StubOutWithMock(manager.db, 'network_get_all')
+ ctxt = mox.IgnoreArg()
+ manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
+ 'cidr': '192.168.2.0/24'}])
+ self.mox.ReplayAll()
+ nets = manager.create_networks(None, 'fake', '192.168.0.0/16',
+ False, 4, 256, None, None, None,
+ None)
+ self.assertEqual(4, len(nets))
+ cidrs = [str(net['cidr']) for net in nets]
+ exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
+ '192.168.4.0/24']
+ for exp_cidr in exp_cidrs:
+ self.assertTrue(exp_cidr in cidrs)
+ self.assertFalse('192.168.2.0/24' in cidrs)
+
+ def test_validate_cidrs_smaller_subnet_in_use(self):
+ manager = self.FakeNetworkManager()
+ self.mox.StubOutWithMock(manager.db, 'network_get_all')
+ ctxt = mox.IgnoreArg()
+ manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
+ 'cidr': '192.168.2.9/25'}])
+ self.mox.ReplayAll()
+ # ValueError: requested cidr (192.168.2.0/24) conflicts with
+ # existing smaller cidr
+ args = (None, 'fake', '192.168.2.0/24', False, 1, 256, None, None,
+ None, None)
+ self.assertRaises(ValueError, manager.create_networks, *args)
+
+ def test_validate_cidrs_split_smaller_cidr_in_use(self):
+ manager = self.FakeNetworkManager()
+ self.mox.StubOutWithMock(manager.db, 'network_get_all')
+ ctxt = mox.IgnoreArg()
+ manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
+ 'cidr': '192.168.2.0/25'}])
+ self.mox.ReplayAll()
+ nets = manager.create_networks(None, 'fake', '192.168.0.0/16',
+ False, 4, 256, None, None, None, None)
+ self.assertEqual(4, len(nets))
+ cidrs = [str(net['cidr']) for net in nets]
+ exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
+ '192.168.4.0/24']
+ for exp_cidr in exp_cidrs:
+ self.assertTrue(exp_cidr in cidrs)
+ self.assertFalse('192.168.2.0/24' in cidrs)
+
+ def test_validate_cidrs_split_smaller_cidr_in_use2(self):
+ manager = self.FakeNetworkManager()
+ self.mox.StubOutWithMock(manager.db, 'network_get_all')
+ ctxt = mox.IgnoreArg()
+ manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
+ 'cidr': '192.168.2.9/29'}])
+ self.mox.ReplayAll()
+ nets = manager.create_networks(None, 'fake', '192.168.2.0/24',
+ False, 3, 32, None, None, None, None)
+ self.assertEqual(3, len(nets))
+ cidrs = [str(net['cidr']) for net in nets]
+ exp_cidrs = ['192.168.2.32/27', '192.168.2.64/27', '192.168.2.96/27']
+ for exp_cidr in exp_cidrs:
+ self.assertTrue(exp_cidr in cidrs)
+ self.assertFalse('192.168.2.0/27' in cidrs)
+
+ def test_validate_cidrs_split_all_in_use(self):
+ manager = self.FakeNetworkManager()
+ self.mox.StubOutWithMock(manager.db, 'network_get_all')
+ ctxt = mox.IgnoreArg()
+ in_use = [{'id': 1, 'cidr': '192.168.2.9/29'},
+ {'id': 2, 'cidr': '192.168.2.64/26'},
+ {'id': 3, 'cidr': '192.168.2.128/26'}]
+ manager.db.network_get_all(ctxt).AndReturn(in_use)
+ self.mox.ReplayAll()
+ args = (None, 'fake', '192.168.2.0/24', False, 3, 64, None, None,
+ None, None)
+ # ValueError: Not enough subnets avail to satisfy requested num_
+ # networks - some subnets in requested range already
+ # in use
+ self.assertRaises(ValueError, manager.create_networks, *args)
+
+ def test_validate_cidrs_one_in_use(self):
+ manager = self.FakeNetworkManager()
+ args = (None, 'fake', '192.168.0.0/24', False, 2, 256, None, None,
+ None, None)
+ # ValueError: network_size * num_networks exceeds cidr size
+ self.assertRaises(ValueError, manager.create_networks, *args)
+
+ def test_validate_cidrs_already_used(self):
+ manager = self.FakeNetworkManager()
+ self.mox.StubOutWithMock(manager.db, 'network_get_all')
+ ctxt = mox.IgnoreArg()
+ manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
+ 'cidr': '192.168.0.0/24'}])
+ self.mox.ReplayAll()
+ # ValueError: cidr already in use
+ args = (None, 'fake', '192.168.0.0/24', False, 1, 256, None, None,
+ None, None)
+ self.assertRaises(ValueError, manager.create_networks, *args)
+
+ def test_validate_cidrs_too_many(self):
+ manager = self.FakeNetworkManager()
+ args = (None, 'fake', '192.168.0.0/24', False, 200, 256, None, None,
+ None, None)
+ # ValueError: Not enough subnets avail to satisfy requested
+ # num_networks
+ self.assertRaises(ValueError, manager.create_networks, *args)
+
+ def test_validate_cidrs_split_partial(self):
+ manager = self.FakeNetworkManager()
+ nets = manager.create_networks(None, 'fake', '192.168.0.0/16',
+ False, 2, 256, None, None, None, None)
+ returned_cidrs = [str(net['cidr']) for net in nets]
+ self.assertTrue('192.168.0.0/24' in returned_cidrs)
+ self.assertTrue('192.168.1.0/24' in returned_cidrs)
+
+ def test_validate_cidrs_conflict_existing_supernet(self):
+ manager = self.FakeNetworkManager()
+ self.mox.StubOutWithMock(manager.db, 'network_get_all')
+ ctxt = mox.IgnoreArg()
+ fakecidr = [{'id': 1, 'cidr': '192.168.0.0/8'}]
+ manager.db.network_get_all(ctxt).AndReturn(fakecidr)
+ self.mox.ReplayAll()
+ args = (None, 'fake', '192.168.0.0/24', False, 1, 256, None, None,
+ None, None)
+ # ValueError: requested cidr (192.168.0.0/24) conflicts
+ # with existing supernet
+ self.assertRaises(ValueError, manager.create_networks, *args)
+
+ def test_create_networks(self):
+ cidr = '192.168.0.0/24'
+ manager = self.FakeNetworkManager()
+ self.stubs.Set(manager, '_create_fixed_ips',
+ self.fake_create_fixed_ips)
+ args = [None, 'foo', cidr, None, 1, 256, 'fd00::/48', None, None,
+ None]
+ result = manager.create_networks(*args)
+ self.assertTrue(manager.create_networks(*args))
+
+ def test_create_networks_cidr_already_used(self):
+ manager = self.FakeNetworkManager()
+ self.mox.StubOutWithMock(manager.db, 'network_get_all')
+ ctxt = mox.IgnoreArg()
+ fakecidr = [{'id': 1, 'cidr': '192.168.0.0/24'}]
+ manager.db.network_get_all(ctxt).AndReturn(fakecidr)
+ self.mox.ReplayAll()
+ args = [None, 'foo', '192.168.0.0/24', None, 1, 256,
+ 'fd00::/48', None, None, None]
+ self.assertRaises(ValueError, manager.create_networks, *args)
+
+ def test_create_networks_many(self):
+ cidr = '192.168.0.0/16'
+ manager = self.FakeNetworkManager()
+ self.stubs.Set(manager, '_create_fixed_ips',
+ self.fake_create_fixed_ips)
+ args = [None, 'foo', cidr, None, 10, 256, 'fd00::/48', None, None,
+ None]
+ self.assertTrue(manager.create_networks(*args))
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 1deb5a780..2f0559366 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -519,6 +519,11 @@ class XenAPIVMTestCase(test.TestCase):
os_type="windows", architecture="i386")
self.check_vm_params_for_windows()
+ def test_spawn_iso_glance(self):
+ self._test_spawn(glance_stubs.FakeGlance.IMAGE_ISO, None, None,
+ os_type="windows", architecture="i386")
+ self.check_vm_params_for_windows()
+
def test_spawn_glance(self):
self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
glance_stubs.FakeGlance.IMAGE_KERNEL,
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index 0d0f84e32..a6a1febd6 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -28,10 +28,10 @@ from nova import utils
def stubout_instance_snapshot(stubs):
@classmethod
- def fake_fetch_image(cls, context, session, instance_id, image, user,
+ def fake_fetch_image(cls, context, session, instance, image, user,
project, type):
from nova.virt.xenapi.fake import create_vdi
- name_label = "instance-%s" % instance_id
+ name_label = "instance-%s" % instance.id
#TODO: create fake SR record
sr_ref = "fakesr"
vdi_ref = create_vdi(name_label=name_label, read_only=False,
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index df4a66ac2..20af2666d 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -252,7 +252,7 @@ class ComputeDriver(object):
# TODO(Vek): Need to pass context in for access to auth_token
pass
- def ensure_filtering_rules_for_instance(self, instance_ref):
+ def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
"""Setting up filtering rules and waiting for its completion.
To migrate an instance, filtering rules to hypervisors
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 880702af1..dc0628772 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -487,16 +487,16 @@ class FakeConnection(driver.ComputeDriver):
"""This method is supported only by libvirt."""
raise NotImplementedError('This method is supported only by libvirt.')
- def ensure_filtering_rules_for_instance(self, instance_ref):
+ def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
"""This method is supported only by libvirt."""
raise NotImplementedError('This method is supported only by libvirt.')
def live_migration(self, context, instance_ref, dest,
- post_method, recover_method):
+ post_method, recover_method, block_migration=False):
"""This method is supported only by libvirt."""
return
- def unfilter_instance(self, instance_ref, network_info=None):
+ def unfilter_instance(self, instance_ref, network_info):
"""This method is supported only by libvirt."""
raise NotImplementedError('This method is supported only by libvirt.')
diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py
index 6d043577a..e8a657bac 100644
--- a/nova/virt/libvirt/connection.py
+++ b/nova/virt/libvirt/connection.py
@@ -32,7 +32,7 @@ Supports KVM, LXC, QEMU, UML, and XEN.
:rescue_kernel_id: Rescue aki image (default: aki-rescue).
:rescue_ramdisk_id: Rescue ari image (default: ari-rescue).
:injected_network_template: Template file for injected network
-:allow_project_net_traffic: Whether to allow in project network traffic
+:allow_same_net_traffic: Whether to allow in project network traffic
"""
@@ -96,9 +96,9 @@ flags.DEFINE_string('libvirt_uri',
'',
'Override the default libvirt URI (which is dependent'
' on libvirt_type)')
-flags.DEFINE_bool('allow_project_net_traffic',
+flags.DEFINE_bool('allow_same_net_traffic',
True,
- 'Whether to allow in project network traffic')
+ 'Whether to allow network traffic from same network')
flags.DEFINE_bool('use_cow_images',
True,
'Whether to use cow images')
@@ -117,6 +117,10 @@ flags.DEFINE_string('live_migration_uri',
flags.DEFINE_string('live_migration_flag',
"VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER",
'Define live migration behavior.')
+flags.DEFINE_string('block_migration_flag',
+ "VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, "
+ "VIR_MIGRATE_NON_SHARED_INC",
+ 'Define block migration behavior.')
flags.DEFINE_integer('live_migration_bandwidth', 0,
'Define live migration behavior')
flags.DEFINE_string('qemu_img', 'qemu-img',
@@ -463,18 +467,18 @@ class LibvirtConnection(driver.ComputeDriver):
"""
virt_dom = self._conn.lookupByName(instance['name'])
# NOTE(itoumsn): Use XML delived from the running instance
- # instead of using to_xml(instance). This is almost the ultimate
- # stupid workaround.
+ # instead of using to_xml(instance, network_info). This is almost
+ # the ultimate stupid workaround.
xml = virt_dom.XMLDesc(0)
# NOTE(itoumsn): self.shutdown() and wait instead of self.destroy() is
# better because we cannot ensure flushing dirty buffers
# in the guest OS. But, in case of KVM, shutdown() does not work...
self.destroy(instance, network_info, cleanup=False)
self.plug_vifs(instance, network_info)
- self.firewall_driver.setup_basic_filtering(instance)
- self.firewall_driver.prepare_instance_filter(instance)
+ self.firewall_driver.setup_basic_filtering(instance, network_info)
+ self.firewall_driver.prepare_instance_filter(instance, network_info)
self._create_new_domain(xml)
- self.firewall_driver.apply_instance_filter(instance)
+ self.firewall_driver.apply_instance_filter(instance, network_info)
def _wait_for_reboot():
"""Called at an interval until the VM is running again."""
@@ -531,7 +535,7 @@ class LibvirtConnection(driver.ComputeDriver):
"""
self.destroy(instance, network_info, cleanup=False)
- xml = self.to_xml(instance, rescue=True)
+ xml = self.to_xml(instance, network_info, rescue=True)
rescue_images = {'image_id': FLAGS.rescue_image_id,
'kernel_id': FLAGS.rescue_kernel_id,
'ramdisk_id': FLAGS.rescue_ramdisk_id}
@@ -574,9 +578,9 @@ class LibvirtConnection(driver.ComputeDriver):
# NOTE(ilyaalekseyev): Implementation like in multinics
# for xenapi(tr3buchet)
@exception.wrap_exception()
- def spawn(self, context, instance,
- network_info=None, block_device_info=None):
- xml = self.to_xml(instance, False, network_info=network_info,
+ def spawn(self, context, instance, network_info,
+ block_device_info=None):
+ xml = self.to_xml(instance, network_info, False,
block_device_info=block_device_info)
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
@@ -584,7 +588,7 @@ class LibvirtConnection(driver.ComputeDriver):
block_device_info=block_device_info)
domain = self._create_new_domain(xml)
LOG.debug(_("instance %s: is running"), instance['name'])
- self.firewall_driver.apply_instance_filter(instance)
+ self.firewall_driver.apply_instance_filter(instance, network_info)
def _wait_for_boot():
"""Called at an interval until the VM is running."""
@@ -727,6 +731,7 @@ class LibvirtConnection(driver.ComputeDriver):
If cow is True, it will make a CoW image instead of a copy.
"""
+
if not os.path.exists(target):
base_dir = os.path.join(FLAGS.instances_path, '_base')
if not os.path.exists(base_dir):
@@ -988,14 +993,10 @@ class LibvirtConnection(driver.ComputeDriver):
else:
raise exception.InvalidDevicePath(path=device_path)
- def _prepare_xml_info(self, instance, rescue=False, network_info=None,
+ def _prepare_xml_info(self, instance, network_info, rescue,
block_device_info=None):
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
- # TODO(adiantum) remove network_info creation code
- # when multinics will be completed
- if not network_info:
- network_info = netutils.get_network_info(instance)
nics = []
for (network, mapping) in network_info:
@@ -1082,11 +1083,11 @@ class LibvirtConnection(driver.ComputeDriver):
xml_info['disk'] = xml_info['basepath'] + "/disk"
return xml_info
- def to_xml(self, instance, rescue=False, network_info=None,
+ def to_xml(self, instance, network_info, rescue=False,
block_device_info=None):
# TODO(termie): cache?
LOG.debug(_('instance %s: starting toXML method'), instance['name'])
- xml_info = self._prepare_xml_info(instance, rescue, network_info,
+ xml_info = self._prepare_xml_info(instance, network_info, rescue,
block_device_info)
xml = str(Template(self.libvirt_xml, searchList=[xml_info]))
LOG.debug(_('instance %s: finished toXML method'), instance['name'])
@@ -1506,7 +1507,7 @@ class LibvirtConnection(driver.ComputeDriver):
return
- def ensure_filtering_rules_for_instance(self, instance_ref,
+ def ensure_filtering_rules_for_instance(self, instance_ref, network_info,
time=None):
"""Setting up filtering rules and waiting for its completion.
@@ -1536,14 +1537,16 @@ class LibvirtConnection(driver.ComputeDriver):
# If any instances never launch at destination host,
# basic-filtering must be set here.
- self.firewall_driver.setup_basic_filtering(instance_ref)
- # setting up n)ova-instance-instance-xx mainly.
- self.firewall_driver.prepare_instance_filter(instance_ref)
+ self.firewall_driver.setup_basic_filtering(instance_ref, network_info)
+ # setting up nova-instance-instance-xx mainly.
+ self.firewall_driver.prepare_instance_filter(instance_ref,
+ network_info)
# wait for completion
timeout_count = range(FLAGS.live_migration_retry_count)
while timeout_count:
- if self.firewall_driver.instance_filter_exists(instance_ref):
+ if self.firewall_driver.instance_filter_exists(instance_ref,
+ network_info):
break
timeout_count.pop()
if len(timeout_count) == 0:
@@ -1552,7 +1555,7 @@ class LibvirtConnection(driver.ComputeDriver):
time.sleep(1)
def live_migration(self, ctxt, instance_ref, dest,
- post_method, recover_method):
+ post_method, recover_method, block_migration=False):
"""Spawning live_migration operation for distributing high-load.
:params ctxt: security context
@@ -1560,20 +1563,22 @@ class LibvirtConnection(driver.ComputeDriver):
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params dest: destination host
+ :params block_migration: destination host
:params post_method:
post operation method.
expected nova.compute.manager.post_live_migration.
:params recover_method:
recovery method when any exception occurs.
expected nova.compute.manager.recover_live_migration.
+ :params block_migration: if true, do block migration.
"""
greenthread.spawn(self._live_migration, ctxt, instance_ref, dest,
- post_method, recover_method)
+ post_method, recover_method, block_migration)
- def _live_migration(self, ctxt, instance_ref, dest,
- post_method, recover_method):
+ def _live_migration(self, ctxt, instance_ref, dest, post_method,
+ recover_method, block_migration=False):
"""Do live migration.
:params ctxt: security context
@@ -1592,27 +1597,21 @@ class LibvirtConnection(driver.ComputeDriver):
# Do live migration.
try:
- flaglist = FLAGS.live_migration_flag.split(',')
+ if block_migration:
+ flaglist = FLAGS.block_migration_flag.split(',')
+ else:
+ flaglist = FLAGS.live_migration_flag.split(',')
flagvals = [getattr(libvirt, x.strip()) for x in flaglist]
logical_sum = reduce(lambda x, y: x | y, flagvals)
- if self.read_only:
- tmpconn = self._connect(self.libvirt_uri, False)
- dom = tmpconn.lookupByName(instance_ref.name)
- dom.migrateToURI(FLAGS.live_migration_uri % dest,
- logical_sum,
- None,
- FLAGS.live_migration_bandwidth)
- tmpconn.close()
- else:
- dom = self._conn.lookupByName(instance_ref.name)
- dom.migrateToURI(FLAGS.live_migration_uri % dest,
- logical_sum,
- None,
- FLAGS.live_migration_bandwidth)
+ dom = self._conn.lookupByName(instance_ref.name)
+ dom.migrateToURI(FLAGS.live_migration_uri % dest,
+ logical_sum,
+ None,
+ FLAGS.live_migration_bandwidth)
except Exception:
- recover_method(ctxt, instance_ref, dest=dest)
+ recover_method(ctxt, instance_ref, dest, block_migration)
raise
# Waiting for completion of live_migration.
@@ -1624,11 +1623,150 @@ class LibvirtConnection(driver.ComputeDriver):
self.get_info(instance_ref.name)['state']
except exception.NotFound:
timer.stop()
- post_method(ctxt, instance_ref, dest)
+ post_method(ctxt, instance_ref, dest, block_migration)
timer.f = wait_for_live_migration
timer.start(interval=0.5, now=True)
+ def pre_block_migration(self, ctxt, instance_ref, disk_info_json):
+ """Preparation block migration.
+
+ :params ctxt: security context
+ :params instance_ref:
+ nova.db.sqlalchemy.models.Instance object
+ instance object that is migrated.
+ :params disk_info_json:
+ json strings specified in get_instance_disk_info
+
+ """
+ disk_info = utils.loads(disk_info_json)
+
+ # make instance directory
+ instance_dir = os.path.join(FLAGS.instances_path, instance_ref['name'])
+ if os.path.exists(instance_dir):
+ raise exception.DestinationDiskExists(path=instance_dir)
+ os.mkdir(instance_dir)
+
+ for info in disk_info:
+ base = os.path.basename(info['path'])
+ # Get image type and create empty disk image.
+ instance_disk = os.path.join(instance_dir, base)
+ utils.execute('sudo', 'qemu-img', 'create', '-f', info['type'],
+ instance_disk, info['local_gb'])
+
+ # if image has kernel and ramdisk, just download
+ # following normal way.
+ if instance_ref['kernel_id']:
+ user = manager.AuthManager().get_user(instance_ref['user_id'])
+ project = manager.AuthManager().get_project(
+ instance_ref['project_id'])
+ self._fetch_image(nova_context.get_admin_context(),
+ os.path.join(instance_dir, 'kernel'),
+ instance_ref['kernel_id'],
+ user,
+ project)
+ if instance_ref['ramdisk_id']:
+ self._fetch_image(nova_context.get_admin_context(),
+ os.path.join(instance_dir, 'ramdisk'),
+ instance_ref['ramdisk_id'],
+ user,
+ project)
+
+ def post_live_migration_at_destination(self, ctxt,
+ instance_ref,
+ network_info,
+ block_migration):
+ """Post operation of live migration at destination host.
+
+ :params ctxt: security context
+ :params instance_ref:
+ nova.db.sqlalchemy.models.Instance object
+ instance object that is migrated.
+ :params network_info: instance network infomation
+ :params : block_migration: if true, post operation of block_migraiton.
+ """
+ # Define migrated instance, otherwise, suspend/destroy does not work.
+ dom_list = self._conn.listDefinedDomains()
+ if instance_ref.name not in dom_list:
+ instance_dir = os.path.join(FLAGS.instances_path,
+ instance_ref.name)
+ xml_path = os.path.join(instance_dir, 'libvirt.xml')
+ # In case of block migration, destination does not have
+ # libvirt.xml
+ if not os.path.isfile(xml_path):
+ xml = self.to_xml(instance_ref, network_info=network_info)
+ f = open(os.path.join(instance_dir, 'libvirt.xml'), 'w+')
+ f.write(xml)
+ f.close()
+ # libvirt.xml should be made by to_xml(), but libvirt
+ # does not accept to_xml() result, since uuid is not
+ # included in to_xml() result.
+ dom = self._lookup_by_name(instance_ref.name)
+ self._conn.defineXML(dom.XMLDesc(0))
+
+ def get_instance_disk_info(self, ctxt, instance_ref):
+ """Preparation block migration.
+
+ :params ctxt: security context
+ :params instance_ref:
+ nova.db.sqlalchemy.models.Instance object
+ instance object that is migrated.
+ :return:
+ json strings with below format.
+ "[{'path':'disk', 'type':'raw', 'local_gb':'10G'},...]"
+
+ """
+ disk_info = []
+
+ virt_dom = self._lookup_by_name(instance_ref.name)
+ xml = virt_dom.XMLDesc(0)
+ doc = libxml2.parseDoc(xml)
+ disk_nodes = doc.xpathEval('//devices/disk')
+ path_nodes = doc.xpathEval('//devices/disk/source')
+ driver_nodes = doc.xpathEval('//devices/disk/driver')
+
+ for cnt, path_node in enumerate(path_nodes):
+ disk_type = disk_nodes[cnt].get_properties().getContent()
+ path = path_node.get_properties().getContent()
+
+ if disk_type != 'file':
+ LOG.debug(_('skipping %(path)s since it looks like volume') %
+ locals())
+ continue
+
+ # In case of libvirt.xml, disk type can be obtained
+ # by the below statement.
+ # -> disk_type = driver_nodes[cnt].get_properties().getContent()
+ # but this xml is generated by kvm, format is slightly different.
+ disk_type = \
+ driver_nodes[cnt].get_properties().get_next().getContent()
+ if disk_type == 'raw':
+ size = int(os.path.getsize(path))
+ else:
+ out, err = utils.execute('sudo', 'qemu-img', 'info', path)
+ size = [i.split('(')[1].split()[0] for i in out.split('\n')
+ if i.strip().find('virtual size') >= 0]
+ size = int(size[0])
+
+ # block migration needs same/larger size of empty image on the
+ # destination host. since qemu-img creates bit smaller size image
+ # depending on original image size, fixed value is necessary.
+ for unit, divisor in [('G', 1024 ** 3), ('M', 1024 ** 2),
+ ('K', 1024), ('', 1)]:
+ if size / divisor == 0:
+ continue
+ if size % divisor != 0:
+ size = size / divisor + 1
+ else:
+ size = size / divisor
+ size = str(size) + unit
+ break
+
+ disk_info.append({'type': disk_type, 'path': path,
+ 'local_gb': size})
+
+ return utils.dumps(disk_info)
+
def unfilter_instance(self, instance_ref, network_info):
"""See comments of same method in firewall_driver."""
self.firewall_driver.unfilter_instance(instance_ref,
diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py
index 9ce57b6c9..c2f4f91e8 100644
--- a/nova/virt/libvirt/firewall.py
+++ b/nova/virt/libvirt/firewall.py
@@ -40,17 +40,17 @@ except ImportError:
class FirewallDriver(object):
- def prepare_instance_filter(self, instance, network_info=None):
+ def prepare_instance_filter(self, instance, network_info):
"""Prepare filters for the instance.
At this point, the instance isn't running yet."""
raise NotImplementedError()
- def unfilter_instance(self, instance, network_info=None):
+ def unfilter_instance(self, instance, network_info):
"""Stop filtering instance"""
raise NotImplementedError()
- def apply_instance_filter(self, instance):
+ def apply_instance_filter(self, instance, network_info):
"""Apply instance filter.
Once this method returns, the instance should be firewalled
@@ -60,9 +60,7 @@ class FirewallDriver(object):
"""
raise NotImplementedError()
- def refresh_security_group_rules(self,
- security_group_id,
- network_info=None):
+ def refresh_security_group_rules(self, security_group_id):
"""Refresh security group rules from data store
Gets called when a rule has been added to or removed from
@@ -85,7 +83,7 @@ class FirewallDriver(object):
"""
raise NotImplementedError()
- def setup_basic_filtering(self, instance, network_info=None):
+ def setup_basic_filtering(self, instance, network_info):
"""Create rules to block spoofing and allow dhcp.
This gets called when spawning an instance, before
@@ -94,7 +92,7 @@ class FirewallDriver(object):
"""
raise NotImplementedError()
- def instance_filter_exists(self, instance):
+ def instance_filter_exists(self, instance, network_info):
"""Check nova-instance-instance-xxx exists"""
raise NotImplementedError()
@@ -150,7 +148,7 @@ class NWFilterFirewall(FirewallDriver):
self.static_filters_configured = False
self.handle_security_groups = False
- def apply_instance_filter(self, instance):
+ def apply_instance_filter(self, instance, network_info):
"""No-op. Everything is done in prepare_instance_filter"""
pass
@@ -189,13 +187,10 @@ class NWFilterFirewall(FirewallDriver):
</rule>
</filter>'''
- def setup_basic_filtering(self, instance, network_info=None):
+ def setup_basic_filtering(self, instance, network_info):
"""Set up basic filtering (MAC, IP, and ARP spoofing protection)"""
logging.info('called setup_basic_filtering in nwfilter')
- if not network_info:
- network_info = netutils.get_network_info(instance)
-
if self.handle_security_groups:
# No point in setting up a filter set that we'll be overriding
# anyway.
@@ -237,7 +232,7 @@ class NWFilterFirewall(FirewallDriver):
self._define_filter(self.nova_base_ipv6_filter)
self._define_filter(self.nova_dhcp_filter)
self._define_filter(self.nova_ra_filter)
- if FLAGS.allow_project_net_traffic:
+ if FLAGS.allow_same_net_traffic:
self._define_filter(self.nova_project_filter)
if FLAGS.use_ipv6:
self._define_filter(self.nova_project_filter_v6)
@@ -300,10 +295,8 @@ class NWFilterFirewall(FirewallDriver):
# execute in a native thread and block current greenthread until done
tpool.execute(self._conn.nwfilterDefineXML, xml)
- def unfilter_instance(self, instance, network_info=None):
+ def unfilter_instance(self, instance, network_info):
"""Clear out the nwfilter rules."""
- if not network_info:
- network_info = netutils.get_network_info(instance)
instance_name = instance.name
for (network, mapping) in network_info:
nic_id = mapping['mac'].replace(':', '')
@@ -326,16 +319,13 @@ class NWFilterFirewall(FirewallDriver):
LOG.debug(_('The nwfilter(%(instance_secgroup_filter_name)s) '
'for %(instance_name)s is not found.') % locals())
- def prepare_instance_filter(self, instance, network_info=None):
+ def prepare_instance_filter(self, instance, network_info):
"""Creates an NWFilter for the given instance.
In the process, it makes sure the filters for the provider blocks,
security groups, and base filter are all in place.
"""
- if not network_info:
- network_info = netutils.get_network_info(instance)
-
self.refresh_provider_fw_rules()
ctxt = context.get_admin_context()
@@ -388,7 +378,7 @@ class NWFilterFirewall(FirewallDriver):
instance_filter_children = [base_filter, 'nova-provider-rules',
instance_secgroup_filter_name]
- if FLAGS.allow_project_net_traffic:
+ if FLAGS.allow_same_net_traffic:
instance_filter_children.append('nova-project')
if FLAGS.use_ipv6:
instance_filter_children.append('nova-project-v6')
@@ -401,9 +391,7 @@ class NWFilterFirewall(FirewallDriver):
self._define_filter(self._filter_container(filter_name,
filter_children))
- def refresh_security_group_rules(self,
- security_group_id,
- network_info=None):
+ def refresh_security_group_rules(self, security_group_id):
return self._define_filter(
self.security_group_to_nwfilter_xml(security_group_id))
@@ -500,9 +488,8 @@ class NWFilterFirewall(FirewallDriver):
return 'nova-instance-%s' % (instance['name'])
return 'nova-instance-%s-%s' % (instance['name'], nic_id)
- def instance_filter_exists(self, instance):
+ def instance_filter_exists(self, instance, network_info):
"""Check nova-instance-instance-xxx exists"""
- network_info = netutils.get_network_info(instance)
for (network, mapping) in network_info:
nic_id = mapping['mac'].replace(':', '')
instance_filter_name = self._instance_filter_name(instance, nic_id)
@@ -521,6 +508,7 @@ class IptablesFirewallDriver(FirewallDriver):
from nova.network import linux_net
self.iptables = linux_net.iptables_manager
self.instances = {}
+ self.network_infos = {}
self.nwfilter = NWFilterFirewall(kwargs['get_connection'])
self.basicly_filtered = False
@@ -529,22 +517,22 @@ class IptablesFirewallDriver(FirewallDriver):
self.iptables.ipv6['filter'].add_chain('sg-fallback')
self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP')
- def setup_basic_filtering(self, instance, network_info=None):
+ def setup_basic_filtering(self, instance, network_info):
"""Set up provider rules and basic NWFilter."""
- if not network_info:
- network_info = netutils.get_network_info(instance)
self.nwfilter.setup_basic_filtering(instance, network_info)
if not self.basicly_filtered:
LOG.debug(_('iptables firewall: Setup Basic Filtering'))
self.refresh_provider_fw_rules()
self.basicly_filtered = True
- def apply_instance_filter(self, instance):
+ def apply_instance_filter(self, instance, network_info):
"""No-op. Everything is done in prepare_instance_filter"""
pass
- def unfilter_instance(self, instance, network_info=None):
+ def unfilter_instance(self, instance, network_info):
if self.instances.pop(instance['id'], None):
+ # NOTE(vish): use the passed info instead of the stored info
+ self.network_infos.pop(instance['id'])
self.remove_filters_for_instance(instance)
self.iptables.apply()
self.nwfilter.unfilter_instance(instance, network_info)
@@ -552,11 +540,10 @@ class IptablesFirewallDriver(FirewallDriver):
LOG.info(_('Attempted to unfilter instance %s which is not '
'filtered'), instance['id'])
- def prepare_instance_filter(self, instance, network_info=None):
- if not network_info:
- network_info = netutils.get_network_info(instance)
+ def prepare_instance_filter(self, instance, network_info):
self.instances[instance['id']] = instance
- self.add_filters_for_instance(instance, network_info)
+ self.network_infos[instance['id']] = network_info
+ self.add_filters_for_instance(instance)
self.iptables.apply()
def _create_filter(self, ips, chain_name):
@@ -583,7 +570,8 @@ class IptablesFirewallDriver(FirewallDriver):
for rule in ipv6_rules:
self.iptables.ipv6['filter'].add_rule(chain_name, rule)
- def add_filters_for_instance(self, instance, network_info=None):
+ def add_filters_for_instance(self, instance):
+ network_info = self.network_infos[instance['id']]
chain_name = self._instance_chain_name(instance)
if FLAGS.use_ipv6:
self.iptables.ipv6['filter'].add_chain(chain_name)
@@ -601,9 +589,7 @@ class IptablesFirewallDriver(FirewallDriver):
if FLAGS.use_ipv6:
self.iptables.ipv6['filter'].remove_chain(chain_name)
- def instance_rules(self, instance, network_info=None):
- if not network_info:
- network_info = netutils.get_network_info(instance)
+ def instance_rules(self, instance, network_info):
ctxt = context.get_admin_context()
ipv4_rules = []
@@ -621,14 +607,14 @@ class IptablesFirewallDriver(FirewallDriver):
ipv4_rules += ['-j $provider']
ipv6_rules += ['-j $provider']
- dhcp_servers = [info['gateway'] for (_n, info) in network_info]
+ dhcp_servers = [info['dhcp_server'] for (_n, info) in network_info]
for dhcp_server in dhcp_servers:
ipv4_rules.append('-s %s -p udp --sport 67 --dport 68 '
'-j ACCEPT' % (dhcp_server,))
#Allow project network traffic
- if FLAGS.allow_project_net_traffic:
+ if FLAGS.allow_same_net_traffic:
cidrs = [network['cidr'] for (network, _m) in network_info]
for cidr in cidrs:
ipv4_rules.append('-s %s -j ACCEPT' % (cidr,))
@@ -645,7 +631,7 @@ class IptablesFirewallDriver(FirewallDriver):
'-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,))
#Allow project network traffic
- if FLAGS.allow_project_net_traffic:
+ if FLAGS.allow_same_net_traffic:
cidrv6s = [network['cidr_v6'] for (network, _m) in
network_info]
@@ -664,11 +650,10 @@ class IptablesFirewallDriver(FirewallDriver):
LOG.debug(_('Adding security group rule: %r'), rule)
if not rule.cidr:
- # Eventually, a mechanism to grant access for security
- # groups will turn up here. It'll use ipsets.
- continue
+ version = 4
+ else:
+ version = netutils.get_ip_version(rule.cidr)
- version = netutils.get_ip_version(rule.cidr)
if version == 4:
fw_rules = ipv4_rules
else:
@@ -678,16 +663,16 @@ class IptablesFirewallDriver(FirewallDriver):
if version == 6 and rule.protocol == 'icmp':
protocol = 'icmpv6'
- args = ['-p', protocol, '-s', rule.cidr]
+ args = ['-j ACCEPT', '-p', protocol]
- if rule.protocol in ['udp', 'tcp']:
+ if protocol in ['udp', 'tcp']:
if rule.from_port == rule.to_port:
args += ['--dport', '%s' % (rule.from_port,)]
else:
args += ['-m', 'multiport',
'--dports', '%s:%s' % (rule.from_port,
rule.to_port)]
- elif rule.protocol == 'icmp':
+ elif protocol == 'icmp':
icmp_type = rule.from_port
icmp_code = rule.to_port
@@ -706,34 +691,44 @@ class IptablesFirewallDriver(FirewallDriver):
args += ['-m', 'icmp6', '--icmpv6-type',
icmp_type_arg]
- args += ['-j ACCEPT']
- fw_rules += [' '.join(args)]
-
+ if rule.cidr:
+ LOG.info('Using cidr %r', rule.cidr)
+ args += ['-s', rule.cidr]
+ fw_rules += [' '.join(args)]
+ else:
+ if rule['grantee_group']:
+ for instance in rule['grantee_group']['instances']:
+ LOG.info('instance: %r', instance)
+ ips = db.instance_get_fixed_addresses(ctxt,
+ instance['id'])
+ LOG.info('ips: %r', ips)
+ for ip in ips:
+ subrule = args + ['-s %s' % ip]
+ fw_rules += [' '.join(subrule)]
+
+ LOG.info('Using fw_rules: %r', fw_rules)
ipv4_rules += ['-j $sg-fallback']
ipv6_rules += ['-j $sg-fallback']
return ipv4_rules, ipv6_rules
- def instance_filter_exists(self, instance):
+ def instance_filter_exists(self, instance, network_info):
"""Check nova-instance-instance-xxx exists"""
- return self.nwfilter.instance_filter_exists(instance)
+ return self.nwfilter.instance_filter_exists(instance, network_info)
def refresh_security_group_members(self, security_group):
- pass
+ self.do_refresh_security_group_rules(security_group)
+ self.iptables.apply()
- def refresh_security_group_rules(self, security_group, network_info=None):
- self.do_refresh_security_group_rules(security_group, network_info)
+ def refresh_security_group_rules(self, security_group):
+ self.do_refresh_security_group_rules(security_group)
self.iptables.apply()
@utils.synchronized('iptables', external=True)
- def do_refresh_security_group_rules(self,
- security_group,
- network_info=None):
+ def do_refresh_security_group_rules(self, security_group):
for instance in self.instances.values():
self.remove_filters_for_instance(instance)
- if not network_info:
- network_info = netutils.get_network_info(instance)
- self.add_filters_for_instance(instance, network_info)
+ self.add_filters_for_instance(instance)
def refresh_provider_fw_rules(self):
"""See class:FirewallDriver: docs."""
diff --git a/nova/virt/libvirt/netutils.py b/nova/virt/libvirt/netutils.py
index a8e88fc07..6f303072d 100644
--- a/nova/virt/libvirt/netutils.py
+++ b/nova/virt/libvirt/netutils.py
@@ -23,12 +23,7 @@
import netaddr
-from nova import context
-from nova import db
-from nova import exception
from nova import flags
-from nova import ipv6
-from nova import utils
FLAGS = flags.FLAGS
@@ -47,65 +42,3 @@ def get_net_and_prefixlen(cidr):
def get_ip_version(cidr):
net = netaddr.IPNetwork(cidr)
return int(net.version)
-
-
-def get_network_info(instance):
- # TODO(tr3buchet): this function needs to go away! network info
- # MUST be passed down from compute
- # TODO(adiantum) If we will keep this function
- # we should cache network_info
- admin_context = context.get_admin_context()
-
- try:
- fixed_ips = db.fixed_ip_get_by_instance(admin_context, instance['id'])
- except exception.FixedIpNotFoundForInstance:
- fixed_ips = []
-
- vifs = db.virtual_interface_get_by_instance(admin_context, instance['id'])
- flavor = db.instance_type_get(admin_context,
- instance['instance_type_id'])
- network_info = []
-
- for vif in vifs:
- network = vif['network']
-
- # determine which of the instance's IPs belong to this network
- network_ips = [fixed_ip['address'] for fixed_ip in fixed_ips if
- fixed_ip['network_id'] == network['id']]
-
- def ip_dict(ip):
- return {
- 'ip': ip,
- 'netmask': network['netmask'],
- 'enabled': '1'}
-
- def ip6_dict():
- prefix = network['cidr_v6']
- mac = vif['address']
- project_id = instance['project_id']
- return {
- 'ip': ipv6.to_global(prefix, mac, project_id),
- 'netmask': network['netmask_v6'],
- 'enabled': '1'}
-
- mapping = {
- 'label': network['label'],
- 'gateway': network['gateway'],
- 'broadcast': network['broadcast'],
- 'dhcp_server': network['gateway'],
- 'mac': vif['address'],
- 'rxtx_cap': flavor['rxtx_cap'],
- 'dns': [],
- 'ips': [ip_dict(ip) for ip in network_ips]}
-
- if network['dns1']:
- mapping['dns'].append(network['dns1'])
- if network['dns2']:
- mapping['dns'].append(network['dns2'])
-
- if FLAGS.use_ipv6:
- mapping['ip6s'] = [ip6_dict()]
- mapping['gateway6'] = network['gateway_v6']
-
- network_info.append((network, mapping))
- return network_info
diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py
index e243d4fa0..5a91a4e28 100644
--- a/nova/virt/libvirt/vif.py
+++ b/nova/virt/libvirt/vif.py
@@ -44,7 +44,7 @@ class LibvirtBridgeDriver(VIFDriver):
gateway6 = mapping.get('gateway6')
mac_id = mapping['mac'].replace(':', '')
- if FLAGS.allow_project_net_traffic:
+ if FLAGS.allow_same_net_traffic:
template = "<parameter name=\"%s\"value=\"%s\" />\n"
net, mask = netutils.get_net_and_mask(network['cidr'])
values = [("PROJNET", net), ("PROJMASK", mask)]
@@ -80,12 +80,14 @@ class LibvirtBridgeDriver(VIFDriver):
LOG.debug(_('Ensuring vlan %(vlan)s and bridge %(bridge)s'),
{'vlan': network['vlan'],
'bridge': network['bridge']})
- linux_net.ensure_vlan_bridge(network['vlan'],
+ linux_net.LinuxBridgeInterfaceDriver.ensure_vlan_bridge(
+ network['vlan'],
network['bridge'],
network['bridge_interface'])
else:
LOG.debug(_("Ensuring bridge %s"), network['bridge'])
- linux_net.ensure_bridge(network['bridge'],
+ linux_net.LinuxBridgeInterfaceDriver.ensure_bridge(
+ network['bridge'],
network['bridge_interface'])
return self._get_configurations(network, mapping)
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index d5ac39473..7c91aa9b9 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -140,6 +140,7 @@ def create_vdi(name_label, read_only, sr_ref, sharable):
'location': '',
'xenstore_data': '',
'sm_config': {},
+ 'physical_utilisation': '123',
'VBDs': {}})
@@ -194,6 +195,7 @@ def create_local_pifs():
Do this one per host."""
for host_ref in _db_content['host'].keys():
_create_local_pif(host_ref)
+ _create_local_sr_iso(host_ref)
def create_local_srs():
@@ -222,6 +224,25 @@ def _create_local_sr(host_ref):
return sr_ref
+def _create_local_sr_iso(host_ref):
+ sr_ref = _create_object(
+ 'SR',
+ {'name_label': 'Local storage ISO',
+ 'type': 'lvm',
+ 'content_type': 'iso',
+ 'shared': False,
+ 'physical_size': str(1 << 30),
+ 'physical_utilisation': str(0),
+ 'virtual_allocation': str(0),
+ 'other_config': {
+ 'i18n-original-value-name_label': 'Local storage ISO',
+ 'i18n-key': 'local-storage-iso'},
+ 'VDIs': []})
+ pbd_ref = create_pbd('', host_ref, sr_ref, True)
+ _db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
+ return sr_ref
+
+
def _create_local_pif(host_ref):
pif_ref = _create_object('PIF',
{'name-label': 'Fake PIF',
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 6c44d53d4..4a1f07bb1 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -31,6 +31,7 @@ import uuid
from xml.dom import minidom
import glance.client
+from nova import db
from nova import exception
from nova import flags
import nova.image
@@ -77,6 +78,7 @@ class ImageType:
3 - raw disk image (local SR, NOT partitioned by plugin)
4 - vhd disk image (local SR, NOT inspected by XS, PV assumed for
linux, HVM assumed for Windows)
+ 5 - ISO disk image (local SR, NOT partitioned by plugin)
"""
KERNEL = 0
@@ -84,14 +86,17 @@ class ImageType:
DISK = 2
DISK_RAW = 3
DISK_VHD = 4
- _ids = (KERNEL, RAMDISK, DISK, DISK_RAW, DISK_VHD)
+ DISK_ISO = 5
+ _ids = (KERNEL, RAMDISK, DISK, DISK_RAW, DISK_VHD, DISK_ISO)
KERNEL_STR = "kernel"
RAMDISK_STR = "ramdisk"
DISK_STR = "os"
DISK_RAW_STR = "os_raw"
DISK_VHD_STR = "vhd"
- _strs = (KERNEL_STR, RAMDISK_STR, DISK_STR, DISK_RAW_STR, DISK_VHD_STR)
+ DISK_ISO_STR = "iso"
+ _strs = (KERNEL_STR, RAMDISK_STR, DISK_STR, DISK_RAW_STR, DISK_VHD_STR,
+ DISK_ISO_STR)
@classmethod
def to_string(cls, image_type):
@@ -223,6 +228,30 @@ class VMHelper(HelperBase):
return vbd_ref
@classmethod
+ def create_cd_vbd(cls, session, vm_ref, vdi_ref, userdevice, bootable):
+ """Create a VBD record. Returns a Deferred that gives the new
+ VBD reference specific to CDRom devices."""
+ vbd_rec = {}
+ vbd_rec['VM'] = vm_ref
+ vbd_rec['VDI'] = vdi_ref
+ vbd_rec['userdevice'] = str(userdevice)
+ vbd_rec['bootable'] = bootable
+ vbd_rec['mode'] = 'RO'
+ vbd_rec['type'] = 'CD'
+ vbd_rec['unpluggable'] = True
+ vbd_rec['empty'] = False
+ vbd_rec['other_config'] = {}
+ vbd_rec['qos_algorithm_type'] = ''
+ vbd_rec['qos_algorithm_params'] = {}
+ vbd_rec['qos_supported_algorithms'] = []
+ LOG.debug(_('Creating a CDROM-specific VBD for VM %(vm_ref)s,'
+ ' VDI %(vdi_ref)s ... ') % locals())
+ vbd_ref = session.call_xenapi('VBD.create', vbd_rec)
+ LOG.debug(_('Created a CDROM-specific VBD %(vbd_ref)s '
+ ' for VM %(vm_ref)s, VDI %(vdi_ref)s.') % locals())
+ return vbd_ref
+
+ @classmethod
def find_vbd_by_number(cls, session, vm_ref, number):
"""Get the VBD reference from the device number"""
vbd_refs = session.get_xenapi().VM.get_VBDs(vm_ref)
@@ -368,7 +397,24 @@ class VMHelper(HelperBase):
session.wait_for_task(task, instance.id)
@classmethod
- def fetch_image(cls, context, session, instance_id, image, user_id,
+ def fetch_blank_disk(cls, session, instance_type_id):
+ # Size the blank harddrive to suit the machine type:
+ one_gig = 1024 * 1024 * 1024
+ req_type = instance_types.get_instance_type(instance_type_id)
+ req_size = req_type['local_gb']
+
+ LOG.debug("Creating blank HD of size %(req_size)d gigs"
+ % locals())
+ vdi_size = one_gig * req_size
+
+ LOG.debug("ISO vm create: Looking for the SR")
+ sr_ref = safe_find_sr(session)
+
+ vdi_ref = cls.create_vdi(session, sr_ref, 'blank HD', vdi_size, False)
+ return vdi_ref
+
+ @classmethod
+ def fetch_image(cls, context, session, instance, image, user_id,
project_id, image_type):
"""Fetch image from glance based on image type.
@@ -377,18 +423,19 @@ class VMHelper(HelperBase):
"""
if image_type == ImageType.DISK_VHD:
return cls._fetch_image_glance_vhd(context,
- session, instance_id, image, image_type)
+ session, instance, image, image_type)
else:
return cls._fetch_image_glance_disk(context,
- session, instance_id, image, image_type)
+ session, instance, image, image_type)
@classmethod
- def _fetch_image_glance_vhd(cls, context, session, instance_id, image,
+ def _fetch_image_glance_vhd(cls, context, session, instance, image,
image_type):
"""Tell glance to download an image and put the VHDs into the SR
Returns: A list of dictionaries that describe VDIs
"""
+ instance_id = instance.id
LOG.debug(_("Asking xapi to fetch vhd image %(image)s")
% locals())
sr_ref = safe_find_sr(session)
@@ -422,17 +469,58 @@ class VMHelper(HelperBase):
cls.scan_sr(session, instance_id, sr_ref)
- # Pull out the UUID of the first VDI
- vdi_uuid = vdis[0]['vdi_uuid']
+ # Pull out the UUID of the first VDI (which is the os VDI)
+ os_vdi_uuid = vdis[0]['vdi_uuid']
+
# Set the name-label to ease debugging
- vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid)
+ vdi_ref = session.get_xenapi().VDI.get_by_uuid(os_vdi_uuid)
primary_name_label = get_name_label_for_image(image)
session.get_xenapi().VDI.set_name_label(vdi_ref, primary_name_label)
+ cls._check_vdi_size(context, session, instance, os_vdi_uuid)
return vdis
@classmethod
- def _fetch_image_glance_disk(cls, context, session, instance_id, image,
+ def _get_vdi_chain_size(cls, context, session, vdi_uuid):
+ """Compute the total size of a VDI chain, starting with the specified
+ VDI UUID.
+
+ This will walk the VDI chain to the root, add the size of each VDI into
+ the total.
+ """
+ size_bytes = 0
+ for vdi_rec in walk_vdi_chain(session, vdi_uuid):
+ cur_vdi_uuid = vdi_rec['uuid']
+ vdi_size_bytes = int(vdi_rec['physical_utilisation'])
+ LOG.debug(_('vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes='
+ '%(vdi_size_bytes)d' % locals()))
+ size_bytes += vdi_size_bytes
+ return size_bytes
+
+ @classmethod
+ def _check_vdi_size(cls, context, session, instance, vdi_uuid):
+ size_bytes = cls._get_vdi_chain_size(context, session, vdi_uuid)
+
+ # FIXME(jk0): this was copied directly from compute.manager.py, let's
+ # refactor this to a common area
+ instance_type_id = instance['instance_type_id']
+ instance_type = db.instance_type_get(context,
+ instance_type_id)
+ allowed_size_gb = instance_type['local_gb']
+ allowed_size_bytes = allowed_size_gb * 1024 * 1024 * 1024
+
+ LOG.debug(_("image_size_bytes=%(size_bytes)d, allowed_size_bytes="
+ "%(allowed_size_bytes)d") % locals())
+
+ if size_bytes > allowed_size_bytes:
+ LOG.info(_("Image size %(size_bytes)d exceeded"
+ " instance_type allowed size "
+ "%(allowed_size_bytes)d")
+ % locals())
+ raise exception.ImageTooLarge()
+
+ @classmethod
+ def _fetch_image_glance_disk(cls, context, session, instance, image,
image_type):
"""Fetch the image from Glance
@@ -444,12 +532,18 @@ class VMHelper(HelperBase):
Returns: A single filename if image_type is KERNEL_RAMDISK
A list of dictionaries that describe VDIs, otherwise
"""
+ instance_id = instance.id
# FIXME(sirp): Since the Glance plugin seems to be required for the
# VHD disk, it may be worth using the plugin for both VHD and RAW and
# DISK restores
LOG.debug(_("Fetching image %(image)s") % locals())
LOG.debug(_("Image Type: %s"), ImageType.to_string(image_type))
- sr_ref = safe_find_sr(session)
+
+ if image_type == ImageType.DISK_ISO:
+ sr_ref = safe_find_iso_sr(session)
+ LOG.debug(_("ISO: Found sr possibly containing the ISO image"))
+ else:
+ sr_ref = safe_find_sr(session)
glance_client, image_id = nova.image.get_glance_client(image)
glance_client.set_auth_token(getattr(context, 'auth_token', None))
@@ -527,7 +621,8 @@ class VMHelper(HelperBase):
ImageType.RAMDISK: 'RAMDISK',
ImageType.DISK: 'DISK',
ImageType.DISK_RAW: 'DISK_RAW',
- ImageType.DISK_VHD: 'DISK_VHD'}
+ ImageType.DISK_VHD: 'DISK_VHD',
+ ImageType.DISK_ISO: 'DISK_ISO'}
disk_format = pretty_format[image_type]
image_ref = instance.image_ref
instance_id = instance.id
@@ -540,7 +635,8 @@ class VMHelper(HelperBase):
'aki': ImageType.KERNEL,
'ari': ImageType.RAMDISK,
'raw': ImageType.DISK_RAW,
- 'vhd': ImageType.DISK_VHD}
+ 'vhd': ImageType.DISK_VHD,
+ 'iso': ImageType.DISK_ISO}
image_ref = instance.image_ref
glance_client, image_id = nova.image.get_glance_client(image_ref)
meta = glance_client.get_image_meta(image_id)
@@ -574,6 +670,8 @@ class VMHelper(HelperBase):
available
3. Glance (DISK): pv is assumed
+
+ 4. Glance (DISK_ISO): no pv is assumed
"""
LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
@@ -589,6 +687,9 @@ class VMHelper(HelperBase):
elif disk_image_type == ImageType.DISK:
# 3. Disk
is_pv = True
+ elif disk_image_type == ImageType.DISK_ISO:
+ # 4. ISO
+ is_pv = False
else:
raise exception.Error(_("Unknown image format %(disk_image_type)s")
% locals())
@@ -750,6 +851,21 @@ def get_vhd_parent_uuid(session, vdi_ref):
return None
+def walk_vdi_chain(session, vdi_uuid):
+ """Yield vdi_recs for each element in a VDI chain"""
+ # TODO(jk0): perhaps make get_vhd_parent use this
+ while True:
+ vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid)
+ vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
+ yield vdi_rec
+
+ parent_uuid = vdi_rec['sm_config'].get('vhd-parent')
+ if parent_uuid:
+ vdi_uuid = parent_uuid
+ else:
+ break
+
+
def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
original_parent_uuid):
""" Spin until the parent VHD is coalesced into its parent VHD
@@ -832,6 +948,48 @@ def find_sr(session):
return None
+def safe_find_iso_sr(session):
+ """Same as find_iso_sr except raises a NotFound exception if SR cannot be
+ determined
+ """
+ sr_ref = find_iso_sr(session)
+ if sr_ref is None:
+ raise exception.NotFound(_('Cannot find SR of content-type ISO'))
+ return sr_ref
+
+
+def find_iso_sr(session):
+ """Return the storage repository to hold ISO images"""
+ host = session.get_xenapi_host()
+ sr_refs = session.get_xenapi().SR.get_all()
+ for sr_ref in sr_refs:
+ sr_rec = session.get_xenapi().SR.get_record(sr_ref)
+
+ LOG.debug(_("ISO: looking at SR %(sr_rec)s") % locals())
+ if not sr_rec['content_type'] == 'iso':
+ LOG.debug(_("ISO: not iso content"))
+ continue
+ if not 'i18n-key' in sr_rec['other_config']:
+ LOG.debug(_("ISO: iso content_type, no 'i18n-key' key"))
+ continue
+ if not sr_rec['other_config']['i18n-key'] == 'local-storage-iso':
+ LOG.debug(_("ISO: iso content_type, i18n-key value not "
+ "'local-storage-iso'"))
+ continue
+
+ LOG.debug(_("ISO: SR MATCHing our criteria"))
+ for pbd_ref in sr_rec['PBDs']:
+ LOG.debug(_("ISO: ISO, looking to see if it is host local"))
+ pbd_rec = session.get_xenapi().PBD.get_record(pbd_ref)
+ pbd_rec_host = pbd_rec['host']
+ LOG.debug(_("ISO: PBD matching, want %(pbd_rec)s, have %(host)s") %
+ locals())
+ if pbd_rec_host == host:
+ LOG.debug(_("ISO: SR with local PBD"))
+ return sr_ref
+ return None
+
+
def remap_vbd_dev(dev):
"""Return the appropriate location for a plugged-in VBD device
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index b9cd59946..eb0a846b5 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -137,7 +137,7 @@ class VMOps(object):
def _create_disks(self, context, instance):
disk_image_type = VMHelper.determine_disk_image_type(instance)
vdis = VMHelper.fetch_image(context, self._session,
- instance.id, instance.image_ref,
+ instance, instance.image_ref,
instance.user_id, instance.project_id,
disk_image_type)
return vdis
@@ -182,11 +182,11 @@ class VMOps(object):
try:
if instance.kernel_id:
kernel = VMHelper.fetch_image(context, self._session,
- instance.id, instance.kernel_id, instance.user_id,
+ instance, instance.kernel_id, instance.user_id,
instance.project_id, ImageType.KERNEL)[0]
if instance.ramdisk_id:
ramdisk = VMHelper.fetch_image(context, self._session,
- instance.id, instance.kernel_id, instance.user_id,
+ instance, instance.ramdisk_id, instance.user_id,
instance.project_id, ImageType.RAMDISK)[0]
# Create the VM ref and attach the first disk
first_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid',
@@ -235,12 +235,51 @@ class VMOps(object):
raise vm_create_error
- VMHelper.create_vbd(session=self._session, vm_ref=vm_ref,
- vdi_ref=first_vdi_ref, userdevice=0, bootable=True)
+ # Add disks to VM
+ self._attach_disks(instance, disk_image_type, vm_ref, first_vdi_ref,
+ vdis)
+
+ # Alter the image before VM start for, e.g. network injection
+ if FLAGS.flat_injected:
+ VMHelper.preconfigure_instance(self._session, instance,
+ first_vdi_ref, network_info)
+
+ self.create_vifs(vm_ref, instance, network_info)
+ self.inject_network_info(instance, network_info, vm_ref)
+ return vm_ref
+
+ def _attach_disks(self, instance, disk_image_type, vm_ref, first_vdi_ref,
+ vdis):
+ # device 0 reserved for RW disk
+ userdevice = 0
+
+ # DISK_ISO needs two VBDs: the ISO disk and a blank RW disk
+ if disk_image_type == ImageType.DISK_ISO:
+ LOG.debug("detected ISO image type, going to create blank VM for "
+ "install")
+
+ cd_vdi_ref = first_vdi_ref
+ first_vdi_ref = VMHelper.fetch_blank_disk(session=self._session,
+ instance_type_id=instance.instance_type_id)
+
+ VMHelper.create_vbd(session=self._session, vm_ref=vm_ref,
+ vdi_ref=first_vdi_ref, userdevice=userdevice, bootable=False)
+
+ # device 1 reserved for rescue disk and we've used '0'
+ userdevice = 2
+ VMHelper.create_cd_vbd(session=self._session, vm_ref=vm_ref,
+ vdi_ref=cd_vdi_ref, userdevice=userdevice, bootable=True)
+
+ # set user device to next free value
+ userdevice += 1
+ else:
+ VMHelper.create_vbd(session=self._session, vm_ref=vm_ref,
+ vdi_ref=first_vdi_ref, userdevice=userdevice, bootable=True)
+ # set user device to next free value
+ # userdevice 1 is reserved for rescue and we've used '0'
+ userdevice = 2
# Attach any other disks
- # userdevice 1 is reserved for rescue
- userdevice = 2
for vdi in vdis[1:]:
# vdi['vdi_type'] is either 'os' or 'swap', but we don't
# really care what it is right here.
@@ -251,15 +290,6 @@ class VMOps(object):
bootable=False)
userdevice += 1
- # Alter the image before VM start for, e.g. network injection
- if FLAGS.flat_injected:
- VMHelper.preconfigure_instance(self._session, instance,
- first_vdi_ref, network_info)
-
- self.create_vifs(vm_ref, instance, network_info)
- self.inject_network_info(instance, network_info, vm_ref)
- return vm_ref
-
def _spawn(self, instance, vm_ref):
"""Spawn a new instance."""
LOG.debug(_('Starting VM %s...'), vm_ref)
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index 76b6c57fc..0d23e7689 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -309,12 +309,12 @@ class XenAPIConnection(driver.ComputeDriver):
"""This method is supported only by libvirt."""
raise NotImplementedError('This method is supported only by libvirt.')
- def ensure_filtering_rules_for_instance(self, instance_ref):
+ def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
"""This method is supported only libvirt."""
return
def live_migration(self, context, instance_ref, dest,
- post_method, recover_method):
+ post_method, recover_method, block_migration=False):
"""This method is supported only by libvirt."""
return
diff --git a/smoketests/openwrt-x86-ext2.image b/smoketests/openwrt-x86-ext2.image
deleted file mode 100644
index cd2dfa426..000000000
--- a/smoketests/openwrt-x86-ext2.image
+++ /dev/null
Binary files differ
diff --git a/smoketests/openwrt-x86-vmlinuz b/smoketests/openwrt-x86-vmlinuz
deleted file mode 100644
index 59cc9bb1f..000000000
--- a/smoketests/openwrt-x86-vmlinuz
+++ /dev/null
Binary files differ
diff --git a/smoketests/random.image b/smoketests/random.image
new file mode 100644
index 000000000..f2c0c30bb
--- /dev/null
+++ b/smoketests/random.image
Binary files differ
diff --git a/smoketests/random.kernel b/smoketests/random.kernel
new file mode 100644
index 000000000..01a6284dd
--- /dev/null
+++ b/smoketests/random.kernel
Binary files differ
diff --git a/smoketests/test_netadmin.py b/smoketests/test_netadmin.py
index 8c8fa35b8..ef73e6f4c 100644
--- a/smoketests/test_netadmin.py
+++ b/smoketests/test_netadmin.py
@@ -107,14 +107,18 @@ class AddressTests(base.UserSmokeTestCase):
class SecurityGroupTests(base.UserSmokeTestCase):
- def __public_instance_is_accessible(self):
- id_url = "latest/meta-data/instance-id"
+ def __get_metadata_item(self, category):
+ id_url = "latest/meta-data/%s" % category
options = "-f -s --max-time 1"
command = "curl %s %s/%s" % (options, self.data['public_ip'], id_url)
status, output = commands.getstatusoutput(command)
- instance_id = output.strip()
+ value = output.strip()
if status > 0:
return False
+ return value
+
+ def __public_instance_is_accessible(self):
+ instance_id = self.__get_metadata_item('instance-id')
if not instance_id:
return False
if instance_id != self.data['instance'].id:
@@ -166,7 +170,14 @@ class SecurityGroupTests(base.UserSmokeTestCase):
finally:
result = self.conn.disassociate_address(self.data['public_ip'])
- def test_005_can_revoke_security_group_ingress(self):
+ def test_005_validate_metadata(self):
+
+ instance = self.data['instance']
+ self.assertTrue(instance.instance_type,
+ self.__get_metadata_item("instance-type"))
+ #FIXME(dprince): validate more metadata here
+
+ def test_006_can_revoke_security_group_ingress(self):
self.assertTrue(self.conn.revoke_security_group(TEST_GROUP,
ip_protocol='tcp',
from_port=80,
diff --git a/smoketests/test_sysadmin.py b/smoketests/test_sysadmin.py
index 454f6f1d5..29cda1a9b 100644
--- a/smoketests/test_sysadmin.py
+++ b/smoketests/test_sysadmin.py
@@ -35,9 +35,9 @@ from smoketests import flags
from smoketests import base
FLAGS = flags.FLAGS
-flags.DEFINE_string('bundle_kernel', 'openwrt-x86-vmlinuz',
+flags.DEFINE_string('bundle_kernel', 'random.kernel',
'Local kernel file to use for bundling tests')
-flags.DEFINE_string('bundle_image', 'openwrt-x86-ext2.image',
+flags.DEFINE_string('bundle_image', 'random.image',
'Local image file to use for bundling tests')
TEST_PREFIX = 'test%s' % int(random.random() * 1000000)