summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap3
-rw-r--r--Authors7
-rw-r--r--MANIFEST.in1
-rwxr-xr-xbin/nova-dhcpbridge7
-rwxr-xr-xbin/nova-manage82
-rwxr-xr-xbin/stack2
-rw-r--r--doc/source/devref/index.rst1
-rw-r--r--doc/source/devref/zone.rst6
-rw-r--r--doc/source/man/novamanage.rst4
-rw-r--r--doc/source/runnova/managing.users.rst6
-rw-r--r--nova/CA/openssl.cnf.tmpl2
-rw-r--r--nova/api/ec2/__init__.py4
-rw-r--r--nova/api/ec2/cloud.py31
-rw-r--r--nova/api/ec2/ec2utils.py5
-rw-r--r--nova/api/openstack/__init__.py3
-rw-r--r--nova/api/openstack/auth.py17
-rw-r--r--nova/api/openstack/extensions.py84
-rw-r--r--nova/api/openstack/flavors.py3
-rw-r--r--nova/api/openstack/limits.py4
-rw-r--r--nova/api/openstack/servers.py29
-rw-r--r--nova/api/openstack/views/limits.py43
-rw-r--r--nova/api/openstack/zones.py43
-rw-r--r--nova/compute/api.py33
-rw-r--r--nova/compute/manager.py50
-rw-r--r--nova/crypto.py45
-rw-r--r--nova/db/api.py32
-rw-r--r--nova/db/sqlalchemy/api.py61
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/001_austin.py8
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py7
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py7
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py7
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py7
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py6
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py7
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py9
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py6
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py7
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/011_live_migration.py5
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py8
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py6
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py7
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py6
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py203
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py68
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py60
-rw-r--r--nova/db/sqlalchemy/models.py22
-rw-r--r--nova/exception.py8
-rw-r--r--nova/flags.py10
-rw-r--r--nova/ipv6/__init__.py (renamed from nova/tests/real_flags.py)13
-rw-r--r--nova/ipv6/account_identifier.py45
-rw-r--r--nova/ipv6/api.py41
-rw-r--r--nova/ipv6/rfc2462.py42
-rw-r--r--nova/network/api.py45
-rw-r--r--nova/network/linux_net.py228
-rw-r--r--nova/network/manager.py67
-rw-r--r--nova/network/vmwareapi_net.py14
-rw-r--r--nova/network/xenapi_net.py18
-rw-r--r--nova/notifier/__init__.py14
-rw-r--r--nova/notifier/api.py83
-rw-r--r--nova/notifier/log_notifier.py34
-rw-r--r--nova/notifier/no_op_notifier.py19
-rw-r--r--nova/notifier/rabbit_notifier.py36
-rw-r--r--nova/quota.py134
-rw-r--r--nova/scheduler/api.py45
-rw-r--r--nova/scheduler/host_filter.py22
-rw-r--r--nova/scheduler/zone_aware_scheduler.py119
-rw-r--r--nova/service.py10
-rw-r--r--nova/tests/api/openstack/extensions/foxinsocks.py26
-rw-r--r--nova/tests/api/openstack/fakes.py3
-rw-r--r--nova/tests/api/openstack/test_extensions.py45
-rw-r--r--nova/tests/api/openstack/test_limits.py89
-rw-r--r--nova/tests/api/openstack/test_servers.py138
-rw-r--r--nova/tests/api/openstack/test_zones.py42
-rw-r--r--nova/tests/db/fakes.py1
-rw-r--r--nova/tests/fake_flags.py28
-rw-r--r--nova/tests/integrated/integrated_helpers.py13
-rw-r--r--nova/tests/network/base.py9
-rw-r--r--nova/tests/public_key/dummy.fingerprint1
-rw-r--r--nova/tests/public_key/dummy.pub1
-rw-r--r--nova/tests/test_api.py42
-rw-r--r--nova/tests/test_cloud.py85
-rw-r--r--nova/tests/test_compute.py31
-rw-r--r--nova/tests/test_crypto.py48
-rw-r--r--nova/tests/test_flags.py14
-rw-r--r--nova/tests/test_host_filter.py26
-rw-r--r--nova/tests/test_ipv6.py60
-rw-r--r--nova/tests/test_libvirt.py (renamed from nova/tests/test_virt.py)73
-rw-r--r--nova/tests/test_notifier.py117
-rw-r--r--nova/tests/test_quota.py153
-rw-r--r--nova/tests/test_scheduler.py61
-rw-r--r--nova/tests/test_utils.py2
-rw-r--r--nova/tests/test_xenapi.py73
-rw-r--r--nova/tests/test_zone_aware_scheduler.py119
-rw-r--r--nova/tests/xenapi/stubs.py39
-rw-r--r--nova/utils.py31
-rw-r--r--nova/virt/connection.py2
-rw-r--r--nova/virt/disk.py50
-rw-r--r--nova/virt/hyperv.py8
-rw-r--r--nova/virt/images.py69
-rw-r--r--nova/virt/libvirt/__init__.py0
-rw-r--r--nova/virt/libvirt/connection.py (renamed from nova/virt/libvirt_conn.py)673
-rw-r--r--nova/virt/libvirt/firewall.py642
-rw-r--r--nova/virt/libvirt/netutils.py97
-rw-r--r--nova/virt/xenapi/vm_utils.py8
-rw-r--r--nova/virt/xenapi/vmops.py38
-rw-r--r--nova/virt/xenapi/volume_utils.py43
-rw-r--r--nova/virt/xenapi_conn.py172
-rw-r--r--nova/wsgi.py5
-rw-r--r--plugins/xenserver/xenapi/etc/xapi.d/plugins/glance4
-rw-r--r--plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost183
-rwxr-xr-xrun_tests.sh6
-rw-r--r--tools/install_venv.py172
-rw-r--r--tools/pip-requires3
113 files changed, 3968 insertions, 1688 deletions
diff --git a/.mailmap b/.mailmap
index 7e031fc7c..3f0238ee9 100644
--- a/.mailmap
+++ b/.mailmap
@@ -29,6 +29,7 @@
<matt.dietz@rackspace.com> <matthewdietz@Matthew-Dietzs-MacBook-Pro.local>
<matt.dietz@rackspace.com> <mdietz@openstack>
<mordred@inaugust.com> <mordred@hudson>
+<naveedm9@gmail.com> <naveed.massjouni@rackspace.com>
<nirmal.ranganathan@rackspace.com> <nirmal.ranganathan@rackspace.coom>
<paul@openstack.org> <paul.voccio@rackspace.com>
<paul@openstack.org> <pvoccio@castor.local>
@@ -36,6 +37,7 @@
<rlane@wikimedia.org> <laner@controller>
<sleepsonthefloor@gmail.com> <root@tonbuntu>
<soren.hansen@rackspace.com> <soren@linux2go.dk>
+<throughnothing@gmail.com> <will.wolf@rackspace.com>
<todd@ansolabs.com> <todd@lapex>
<todd@ansolabs.com> <todd@rubidine.com>
<tushar.vitthal.patil@gmail.com> <tpatil@vertex.co.in>
@@ -44,5 +46,4 @@
<ueno.nachi@lab.ntt.co.jp> <openstack@lab.ntt.co.jp>
<vishvananda@gmail.com> <root@mirror.nasanebula.net>
<vishvananda@gmail.com> <root@ubuntu>
-<naveedm9@gmail.com> <naveed.massjouni@rackspace.com>
<vishvananda@gmail.com> <vishvananda@yahoo.com>
diff --git a/Authors b/Authors
index 72eb0b6ae..6e45cc8bf 100644
--- a/Authors
+++ b/Authors
@@ -1,4 +1,5 @@
Alex Meade <alex.meade@rackspace.com>
+Andrey Brindeyev <abrindeyev@griddynamics.com>
Andy Smith <code@term.ie>
Andy Southgate <andy.southgate@citrix.com>
Anne Gentle <anne@openstack.org>
@@ -16,6 +17,7 @@ Christian Berendt <berendt@b1-systems.de>
Chuck Short <zulcss@ubuntu.com>
Cory Wright <corywright@gmail.com>
Dan Prince <dan.prince@rackspace.com>
+Dave Walker <DaveWalker@ubuntu.com>
David Pravec <David.Pravec@danix.org>
Dean Troyer <dtroyer@gmail.com>
Devin Carlen <devin.carlen@gmail.com>
@@ -44,12 +46,14 @@ Josh Kearney <josh@jk0.org>
Josh Kleinpeter <josh@kleinpeter.org>
Joshua McKenty <jmckenty@gmail.com>
Justin Santa Barbara <justin@fathomdb.com>
+Justin Shepherd <jshepher@rackspace.com>
Kei Masumoto <masumotok@nttdata.co.jp>
Ken Pepple <ken.pepple@gmail.com>
Kevin Bringard <kbringard@attinteractive.com>
Kevin L. Mitchell <kevin.mitchell@rackspace.com>
Koji Iida <iida.koji@lab.ntt.co.jp>
Lorin Hochstein <lorin@isi.edu>
+Lvov Maxim <usrleon@gmail.com>
Mark Washenberger <mark.washenberger@rackspace.com>
Masanori Itoh <itoumsn@nttdata.co.jp>
Matt Dietz <matt.dietz@rackspace.com>
@@ -63,6 +67,7 @@ Nachi Ueno <ueno.nachi@lab.ntt.co.jp>
Naveed Massjouni <naveedm9@gmail.com>
Nirmal Ranganathan <nirmal.ranganathan@rackspace.com>
Paul Voccio <paul@openstack.org>
+Renuka Apte <renuka.apte@citrix.com>
Ricardo Carrillo Cruz <emaildericky@gmail.com>
Rick Clark <rick@openstack.org>
Rick Harris <rconradharris@gmail.com>
@@ -79,7 +84,7 @@ Trey Morris <trey.morris@rackspace.com>
Tushar Patil <tushar.vitthal.patil@gmail.com>
Vasiliy Shlykov <vash@vasiliyshlykov.org>
Vishvananda Ishaya <vishvananda@gmail.com>
-William Wolf <will.wolf@rackspace.com>
+William Wolf <throughnothing@gmail.com>
Yoshiaki Tamura <yoshi@midokura.jp>
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
Yuriy Taraday <yorik.sar@gmail.com>
diff --git a/MANIFEST.in b/MANIFEST.in
index e7a6e7da4..4e145de75 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -35,6 +35,7 @@ include nova/tests/bundle/1mb.manifest.xml
include nova/tests/bundle/1mb.no_kernel_or_ramdisk.manifest.xml
include nova/tests/bundle/1mb.part.0
include nova/tests/bundle/1mb.part.1
+include nova/tests/public_key/*
include nova/tests/db/nova.austin.sqlite
include plugins/xenapi/README
include plugins/xenapi/etc/xapi.d/plugins/objectstore
diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge
index f42dfd6b5..5926b97de 100755
--- a/bin/nova-dhcpbridge
+++ b/bin/nova-dhcpbridge
@@ -108,6 +108,13 @@ def main():
interface = os.environ.get('DNSMASQ_INTERFACE', FLAGS.dnsmasq_interface)
if int(os.environ.get('TESTING', '0')):
from nova.tests import fake_flags
+
+ #if FLAGS.fake_rabbit:
+ # LOG.debug(_("leasing ip"))
+ # network_manager = utils.import_object(FLAGS.network_manager)
+ ## reload(fake_flags)
+ # from nova.tests import fake_flags
+
action = argv[1]
if action in ['add', 'del', 'old']:
mac = argv[2]
diff --git a/bin/nova-manage b/bin/nova-manage
index 2f6af6e2d..26c0d776c 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -97,7 +97,7 @@ flags.DECLARE('vlan_start', 'nova.network.manager')
flags.DECLARE('vpn_start', 'nova.network.manager')
flags.DECLARE('fixed_range_v6', 'nova.network.manager')
flags.DECLARE('images_path', 'nova.image.local')
-flags.DECLARE('libvirt_type', 'nova.virt.libvirt_conn')
+flags.DECLARE('libvirt_type', 'nova.virt.libvirt.connection')
flags.DEFINE_flag(flags.HelpFlag())
flags.DEFINE_flag(flags.HelpshortFlag())
flags.DEFINE_flag(flags.HelpXMLFlag())
@@ -362,27 +362,47 @@ class ProjectCommands(object):
def add(self, project_id, user_id):
"""Adds user to project
arguments: project_id user_id"""
- self.manager.add_to_project(user_id, project_id)
+ try:
+ self.manager.add_to_project(user_id, project_id)
+ except exception.UserNotFound as ex:
+ print ex
+ raise
def create(self, name, project_manager, description=None):
"""Creates a new project
arguments: name project_manager [description]"""
- self.manager.create_project(name, project_manager, description)
+ try:
+ self.manager.create_project(name, project_manager, description)
+ except exception.UserNotFound as ex:
+ print ex
+ raise
def modify(self, name, project_manager, description=None):
"""Modifies a project
arguments: name project_manager [description]"""
- self.manager.modify_project(name, project_manager, description)
+ try:
+ self.manager.modify_project(name, project_manager, description)
+ except exception.UserNotFound as ex:
+ print ex
+ raise
def delete(self, name):
"""Deletes an existing project
arguments: name"""
- self.manager.delete_project(name)
+ try:
+ self.manager.delete_project(name)
+ except exception.ProjectNotFound as ex:
+ print ex
+ raise
def environment(self, project_id, user_id, filename='novarc'):
"""Exports environment variables to an sourcable file
arguments: project_id user_id [filename='novarc]"""
- rc = self.manager.get_environment_rc(user_id, project_id)
+ try:
+ rc = self.manager.get_environment_rc(user_id, project_id)
+ except (exception.UserNotFound, exception.ProjectNotFound) as ex:
+ print ex
+ raise
with open(filename, 'w') as f:
f.write(rc)
@@ -397,19 +417,26 @@ class ProjectCommands(object):
arguments: project_id [key] [value]"""
ctxt = context.get_admin_context()
if key:
- quo = {'project_id': project_id, key: value}
+ if value.lower() == 'unlimited':
+ value = None
try:
- db.quota_update(ctxt, project_id, quo)
- except exception.NotFound:
- db.quota_create(ctxt, quo)
- project_quota = quota.get_quota(ctxt, project_id)
+ db.quota_update(ctxt, project_id, key, value)
+ except exception.ProjectQuotaNotFound:
+ db.quota_create(ctxt, project_id, key, value)
+ project_quota = quota.get_project_quotas(ctxt, project_id)
for key, value in project_quota.iteritems():
+ if value is None:
+ value = 'unlimited'
print '%s: %s' % (key, value)
def remove(self, project_id, user_id):
"""Removes user from project
arguments: project_id user_id"""
- self.manager.remove_from_project(user_id, project_id)
+ try:
+ self.manager.remove_from_project(user_id, project_id)
+ except (exception.UserNotFound, exception.ProjectNotFound) as ex:
+ print ex
+ raise
def scrub(self, project_id):
"""Deletes data associated with project
@@ -428,6 +455,9 @@ class ProjectCommands(object):
zip_file = self.manager.get_credentials(user_id, project_id)
with open(filename, 'w') as f:
f.write(zip_file)
+ except (exception.UserNotFound, exception.ProjectNotFound) as ex:
+ print ex
+ raise
except db.api.NoMoreNetworks:
print _('No more networks available. If this is a new '
'installation, you need\nto call something like this:\n\n'
@@ -523,8 +553,10 @@ class NetworkCommands(object):
[network_size=FLAG], [vlan_start=FLAG],
[vpn_start=FLAG], [fixed_range_v6=FLAG]"""
if not fixed_range:
- raise TypeError(_('Fixed range in the form of 10.0.0.0/8 is '
- 'required to create networks.'))
+ msg = _('Fixed range in the form of 10.0.0.0/8 is '
+ 'required to create networks.')
+ print msg
+ raise TypeError(msg)
if not num_networks:
num_networks = FLAGS.num_networks
if not network_size:
@@ -536,14 +568,18 @@ class NetworkCommands(object):
if not fixed_range_v6:
fixed_range_v6 = FLAGS.fixed_range_v6
net_manager = utils.import_object(FLAGS.network_manager)
- net_manager.create_networks(context.get_admin_context(),
- cidr=fixed_range,
- num_networks=int(num_networks),
- network_size=int(network_size),
- vlan_start=int(vlan_start),
- vpn_start=int(vpn_start),
- cidr_v6=fixed_range_v6,
- label=label)
+ try:
+ net_manager.create_networks(context.get_admin_context(),
+ cidr=fixed_range,
+ num_networks=int(num_networks),
+ network_size=int(network_size),
+ vlan_start=int(vlan_start),
+ vpn_start=int(vpn_start),
+ cidr_v6=fixed_range_v6,
+ label=label)
+ except ValueError, e:
+ print e
+ raise e
def list(self):
"""List all created networks"""
@@ -972,7 +1008,7 @@ class ImageCommands(object):
try:
internal_id = ec2utils.ec2_id_to_id(old_image_id)
image = self.image_service.show(context, internal_id)
- except exception.NotFound:
+ except (exception.InvalidEc2Id, exception.ImageNotFound):
image = self.image_service.show_by_name(context, old_image_id)
return image['id']
diff --git a/bin/stack b/bin/stack
index d84a82e27..a1c6d1348 100755
--- a/bin/stack
+++ b/bin/stack
@@ -65,7 +65,7 @@ def format_help(d):
indent = MAX_INDENT - 6
out = []
- for k, v in d.iteritems():
+ for k, v in sorted(d.iteritems()):
if (len(k) + 6) > MAX_INDENT:
out.extend([' %s' % k])
initial_indent = ' ' * (indent + 6)
diff --git a/doc/source/devref/index.rst b/doc/source/devref/index.rst
index 9613ba990..0a5a7a4d6 100644
--- a/doc/source/devref/index.rst
+++ b/doc/source/devref/index.rst
@@ -35,6 +35,7 @@ Programming Concepts
.. toctree::
:maxdepth: 3
+ zone
rabbit
API Reference
diff --git a/doc/source/devref/zone.rst b/doc/source/devref/zone.rst
index 3dd9d37d3..263560ee2 100644
--- a/doc/source/devref/zone.rst
+++ b/doc/source/devref/zone.rst
@@ -17,7 +17,7 @@
Zones
=====
-A Nova deployment is called a Zone. At the very least a Zone requires an API node, a Scheduler node, a database and RabbitMQ. Pushed further a Zone may contain many API nodes, many Scheduler, Volume, Network and Compute nodes as well as a cluster of databases and RabbitMQ servers. A Zone allows you to partition your deployments into logical groups for load balancing and instance distribution.
+A Nova deployment is called a Zone. A Zone allows you to partition your deployments into logical groups for load balancing and instance distribution. At the very least a Zone requires an API node, a Scheduler node, a database and RabbitMQ. Pushed further a Zone may contain many API nodes, many Scheduler, Volume, Network and Compute nodes as well as a cluster of databases and RabbitMQ servers.
The idea behind Zones is, if a particular deployment is not capable of servicing a particular request, the request may be forwarded to (child) Zones for possible processing. Zones may be nested in a tree fashion.
@@ -34,7 +34,7 @@ Routing between Zones is based on the Capabilities of that Zone. Capabilities ar
key=value;value;value, key=value;value;value
-Zones have Capabilities which are general to the Zone and are set via `--zone-capabilities` flag. Zones also have dynamic per-service Capabilities. Services derived from `nova.manager.SchedulerDependentManager` (such as Compute, Volume and Network) can set these capabilities by calling the `update_service_capabilities()` method on their `Manager` base class. These capabilities will be periodically sent to the Scheduler service automatically. The rate at which these updates are sent is controlled by the `--periodic_interval` flag.
+Zones have Capabilities which are general to the Zone and are set via `--zone_capabilities` flag. Zones also have dynamic per-service Capabilities. Services derived from `nova.manager.SchedulerDependentManager` (such as Compute, Volume and Network) can set these capabilities by calling the `update_service_capabilities()` method on their `Manager` base class. These capabilities will be periodically sent to the Scheduler service automatically. The rate at which these updates are sent is controlled by the `--periodic_interval` flag.
Flow within a Zone
------------------
@@ -47,7 +47,7 @@ Inter-service communication within a Zone is done with RabbitMQ. Each class of S
These capability messages are received by the Scheduler services and stored in the `ZoneManager` object. The SchedulerManager object has a reference to the `ZoneManager` it can use for load balancing.
-The `ZoneManager` also polls the child Zones periodically to gather their capabilities to aid in decision making. This is done via the OpenStack API `/v1.0/zones/info` REST call. This also captures the name of each child Zone. The Zone name is set via the `--zone-name` flag (and defaults to "nova").
+The `ZoneManager` also polls the child Zones periodically to gather their capabilities to aid in decision making. This is done via the OpenStack API `/v1.0/zones/info` REST call. This also captures the name of each child Zone. The Zone name is set via the `--zone_name` flag (and defaults to "nova").
Zone administrative functions
-----------------------------
diff --git a/doc/source/man/novamanage.rst b/doc/source/man/novamanage.rst
index 9c54f3608..397cc8e80 100644
--- a/doc/source/man/novamanage.rst
+++ b/doc/source/man/novamanage.rst
@@ -6,7 +6,7 @@ nova-manage
control and manage cloud computer instances and images
------------------------------------------------------
-:Author: nova@lists.launchpad.net
+:Author: openstack@lists.launchpad.net
:Date: 2010-11-16
:Copyright: OpenStack LLC
:Version: 0.1
@@ -121,7 +121,7 @@ Nova Role
nova-manage role <action> [<argument>]
``nova-manage role add <username> <rolename> <(optional) projectname>``
- Add a user to either a global or project-based role with the indicated <rolename> assigned to the named user. Role names can be one of the following five roles: admin, itsec, projectmanager, netadmin, developer. If you add the project name as the last argument then the role is assigned just for that project, otherwise the user is assigned the named role for all projects.
+ Add a user to either a global or project-based role with the indicated <rolename> assigned to the named user. Role names can be one of the following five roles: cloudadmin, itsec, sysadmin, netadmin, developer. If you add the project name as the last argument then the role is assigned just for that project, otherwise the user is assigned the named role for all projects.
``nova-manage role has <username> <projectname>``
Checks the user or project and responds with True if the user has a global role with a particular project.
diff --git a/doc/source/runnova/managing.users.rst b/doc/source/runnova/managing.users.rst
index 392142e86..d3442bed9 100644
--- a/doc/source/runnova/managing.users.rst
+++ b/doc/source/runnova/managing.users.rst
@@ -38,11 +38,11 @@ Role-based access control (RBAC) is an approach to restricting system access to
Nova’s rights management system employs the RBAC model and currently supports the following five roles:
-* **Cloud Administrator.** (admin) Users of this class enjoy complete system access.
+* **Cloud Administrator.** (cloudadmin) Users of this class enjoy complete system access.
* **IT Security.** (itsec) This role is limited to IT security personnel. It permits role holders to quarantine instances.
-* **Project Manager.** (projectmanager)The default for project owners, this role affords users the ability to add other users to a project, interact with project images, and launch and terminate instances.
+* **System Administrator.** (sysadmin) The default for project owners, this role affords users the ability to add other users to a project, interact with project images, and launch and terminate instances.
* **Network Administrator.** (netadmin) Users with this role are permitted to allocate and assign publicly accessible IP addresses as well as create and modify firewall rules.
-* **Developer.** This is a general purpose role that is assigned to users by default.
+* **Developer.** (developer) This is a general purpose role that is assigned to users by default.
RBAC management is exposed through the dashboard for simplified user management.
diff --git a/nova/CA/openssl.cnf.tmpl b/nova/CA/openssl.cnf.tmpl
index b80fadf40..f87d9f3b2 100644
--- a/nova/CA/openssl.cnf.tmpl
+++ b/nova/CA/openssl.cnf.tmpl
@@ -46,7 +46,7 @@ policy = policy_match
# RHEL 6 and Fedora 14 (using openssl-1.0.0-4.el6.x86_64 or
# openssl-1.0.0d-1.fc14.x86_64)
[ policy_match ]
-countryName = match
+countryName = supplied
stateOrProvinceName = supplied
organizationName = optional
organizationalUnitName = optional
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index cd59340bd..c13993dd3 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -338,6 +338,10 @@ class Executor(wsgi.Application):
else:
return self._error(req, context, type(ex).__name__,
unicode(ex))
+ except exception.KeyPairExists as ex:
+ LOG.debug(_('KeyPairExists raised: %s'), unicode(ex),
+ context=context)
+ return self._error(req, context, type(ex).__name__, unicode(ex))
except Exception as ex:
extra = {'environment': req.environ}
LOG.exception(_('Unexpected error raised: %s'), unicode(ex),
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 092b80fa2..c35b6024e 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -27,6 +27,8 @@ import datetime
import IPy
import os
import urllib
+import tempfile
+import shutil
from nova import compute
from nova import context
@@ -35,6 +37,7 @@ from nova import crypto
from nova import db
from nova import exception
from nova import flags
+from nova import ipv6
from nova import log as logging
from nova import network
from nova import utils
@@ -315,6 +318,27 @@ class CloudController(object):
'keyMaterial': data['private_key']}
# TODO(vish): when context is no longer an object, pass it here
+ def import_public_key(self, context, key_name, public_key,
+ fingerprint=None):
+ LOG.audit(_("Import key %s"), key_name, context=context)
+ key = {}
+ key['user_id'] = context.user_id
+ key['name'] = key_name
+ key['public_key'] = public_key
+ if fingerprint is None:
+ tmpdir = tempfile.mkdtemp()
+ pubfile = os.path.join(tmpdir, 'temp.pub')
+ fh = open(pubfile, 'w')
+ fh.write(public_key)
+ fh.close()
+ (out, err) = utils.execute('ssh-keygen', '-q', '-l', '-f',
+ '%s' % (pubfile))
+ fingerprint = out.split(' ')[1]
+ shutil.rmtree(tmpdir)
+ key['fingerprint'] = fingerprint
+ db.key_pair_create(context, key)
+ return True
+
def delete_key_pair(self, context, key_name, **kwargs):
LOG.audit(_("Delete key pair %s"), key_name, context=context)
try:
@@ -718,9 +742,10 @@ class CloudController(object):
fixed = instance['fixed_ip']
floating_addr = fixed['floating_ips'][0]['address']
if instance['fixed_ip']['network'] and 'use_v6' in kwargs:
- i['dnsNameV6'] = utils.to_global_ipv6(
+ i['dnsNameV6'] = ipv6.to_global(
instance['fixed_ip']['network']['cidr_v6'],
- instance['mac_address'])
+ instance['mac_address'],
+ instance['project_id'])
i['privateDnsName'] = fixed_addr
i['privateIpAddress'] = fixed_addr
@@ -906,7 +931,7 @@ class CloudController(object):
try:
internal_id = ec2utils.ec2_id_to_id(ec2_id)
return self.image_service.show(context, internal_id)
- except ValueError:
+ except (exception.InvalidEc2Id, exception.ImageNotFound):
try:
return self.image_service.show_by_name(context, ec2_id)
except exception.NotFound:
diff --git a/nova/api/ec2/ec2utils.py b/nova/api/ec2/ec2utils.py
index 1ac48163c..163aa4ed2 100644
--- a/nova/api/ec2/ec2utils.py
+++ b/nova/api/ec2/ec2utils.py
@@ -21,7 +21,10 @@ from nova import exception
def ec2_id_to_id(ec2_id):
"""Convert an ec2 ID (i-[base 16 number]) to an instance id (int)"""
- return int(ec2_id.split('-')[-1], 16)
+ try:
+ return int(ec2_id.split('-')[-1], 16)
+ except ValueError:
+ raise exception.InvalidEc2Id(ec2_id=ec2_id)
def id_to_ec2_id(instance_id, template='i-%08x'):
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py
index 348b70d5b..5b7f080ad 100644
--- a/nova/api/openstack/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -98,7 +98,8 @@ class APIRouter(wsgi.Router):
server_members['inject_network_info'] = 'POST'
mapper.resource("zone", "zones", controller=zones.Controller(),
- collection={'detail': 'GET', 'info': 'GET'}),
+ collection={'detail': 'GET', 'info': 'GET',
+ 'select': 'GET'})
mapper.resource("user", "users", controller=users.Controller(),
collection={'detail': 'GET'})
diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py
index 311e6bde9..6c6ee22a2 100644
--- a/nova/api/openstack/auth.py
+++ b/nova/api/openstack/auth.py
@@ -17,7 +17,6 @@
import datetime
import hashlib
-import json
import time
import webob.exc
@@ -25,11 +24,9 @@ import webob.dec
from nova import auth
from nova import context
-from nova import db
from nova import exception
from nova import flags
from nova import log as logging
-from nova import manager
from nova import utils
from nova import wsgi
from nova.api.openstack import faults
@@ -102,11 +99,11 @@ class AuthMiddleware(wsgi.Middleware):
token, user = self._authorize_user(username, key, req)
if user and token:
res = webob.Response()
- res.headers['X-Auth-Token'] = token.token_hash
+ res.headers['X-Auth-Token'] = token['token_hash']
res.headers['X-Server-Management-Url'] = \
- token.server_management_url
- res.headers['X-Storage-Url'] = token.storage_url
- res.headers['X-CDN-Management-Url'] = token.cdn_management_url
+ token['server_management_url']
+ res.headers['X-Storage-Url'] = token['storage_url']
+ res.headers['X-CDN-Management-Url'] = token['cdn_management_url']
res.content_type = 'text/plain'
res.status = '204'
LOG.debug(_("Successfully authenticated '%s'") % username)
@@ -130,11 +127,11 @@ class AuthMiddleware(wsgi.Middleware):
except exception.NotFound:
return None
if token:
- delta = datetime.datetime.now() - token.created_at
+ delta = datetime.datetime.utcnow() - token['created_at']
if delta.days >= 2:
- self.db.auth_token_destroy(ctxt, token.token_hash)
+ self.db.auth_token_destroy(ctxt, token['token_hash'])
else:
- return self.auth.get_user(token.user_id)
+ return self.auth.get_user(token['user_id'])
return None
def _authorize_user(self, username, key, req):
diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py
index 7ea7afef6..8e77b25fb 100644
--- a/nova/api/openstack/extensions.py
+++ b/nova/api/openstack/extensions.py
@@ -105,15 +105,14 @@ class ExtensionDescriptor(object):
actions = []
return actions
- def get_response_extensions(self):
- """List of extensions.ResponseExtension extension objects.
+ def get_request_extensions(self):
+ """List of extensions.RequestException extension objects.
- Response extensions are used to insert information into existing
- response data.
+ Request extensions are used to handle custom request data.
"""
- response_exts = []
- return response_exts
+ request_exts = []
+ return request_exts
class ActionExtensionController(common.OpenstackController):
@@ -137,7 +136,7 @@ class ActionExtensionController(common.OpenstackController):
return res
-class ResponseExtensionController(common.OpenstackController):
+class RequestExtensionController(common.OpenstackController):
def __init__(self, application):
self.application = application
@@ -148,20 +147,9 @@ class ResponseExtensionController(common.OpenstackController):
def process(self, req, *args, **kwargs):
res = req.get_response(self.application)
- content_type = req.best_match_content_type()
- # currently response handlers are un-ordered
+ # currently request handlers are un-ordered
for handler in self.handlers:
- res = handler(res)
- try:
- body = res.body
- headers = res.headers
- except AttributeError:
- default_xmlns = None
- body = self._serialize(res, content_type, default_xmlns)
- headers = {"Content-Type": content_type}
- res = webob.Response()
- res.body = body
- res.headers = headers
+ res = handler(req, res)
return res
@@ -226,24 +214,24 @@ class ExtensionMiddleware(wsgi.Middleware):
return action_controllers
- def _response_ext_controllers(self, application, ext_mgr, mapper):
- """Returns a dict of ResponseExtensionController-s by collection."""
- response_ext_controllers = {}
- for resp_ext in ext_mgr.get_response_extensions():
- if not resp_ext.key in response_ext_controllers.keys():
- controller = ResponseExtensionController(application)
- mapper.connect(resp_ext.url_route + '.:(format)',
+ def _request_ext_controllers(self, application, ext_mgr, mapper):
+ """Returns a dict of RequestExtensionController-s by collection."""
+ request_ext_controllers = {}
+ for req_ext in ext_mgr.get_request_extensions():
+ if not req_ext.key in request_ext_controllers.keys():
+ controller = RequestExtensionController(application)
+ mapper.connect(req_ext.url_route + '.:(format)',
action='process',
controller=controller,
- conditions=resp_ext.conditions)
+ conditions=req_ext.conditions)
- mapper.connect(resp_ext.url_route,
+ mapper.connect(req_ext.url_route,
action='process',
controller=controller,
- conditions=resp_ext.conditions)
- response_ext_controllers[resp_ext.key] = controller
+ conditions=req_ext.conditions)
+ request_ext_controllers[req_ext.key] = controller
- return response_ext_controllers
+ return request_ext_controllers
def __init__(self, application, ext_mgr=None):
@@ -271,13 +259,13 @@ class ExtensionMiddleware(wsgi.Middleware):
controller = action_controllers[action.collection]
controller.add_action(action.action_name, action.handler)
- # extended responses
- resp_controllers = self._response_ext_controllers(application, ext_mgr,
+ # extended requests
+ req_controllers = self._request_ext_controllers(application, ext_mgr,
mapper)
- for response_ext in ext_mgr.get_response_extensions():
- LOG.debug(_('Extended response: %s'), response_ext.key)
- controller = resp_controllers[response_ext.key]
- controller.add_handler(response_ext.handler)
+ for request_ext in ext_mgr.get_request_extensions():
+ LOG.debug(_('Extended request: %s'), request_ext.key)
+ controller = req_controllers[request_ext.key]
+ controller.add_handler(request_ext.handler)
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
mapper)
@@ -347,17 +335,17 @@ class ExtensionManager(object):
pass
return actions
- def get_response_extensions(self):
- """Returns a list of ResponseExtension objects."""
- response_exts = []
+ def get_request_extensions(self):
+ """Returns a list of RequestExtension objects."""
+ request_exts = []
for alias, ext in self.extensions.iteritems():
try:
- response_exts.extend(ext.get_response_extensions())
+ request_exts.extend(ext.get_request_extensions())
except AttributeError:
- # NOTE(dprince): Extension aren't required to have response
+ # NOTE(dprince): Extension aren't required to have request
# extensions
pass
- return response_exts
+ return request_exts
def _check_extension(self, extension):
"""Checks for required methods in extension objects."""
@@ -421,9 +409,13 @@ class ExtensionManager(object):
self.extensions[alias] = ext
-class ResponseExtension(object):
- """Add data to responses from core nova OpenStack API controllers."""
+class RequestExtension(object):
+ """Extend requests and responses of core nova OpenStack API controllers.
+ Provide a way to add data to responses and handle custom request data
+ that is sent to core nova OpenStack API controllers.
+
+ """
def __init__(self, method, url_route, handler):
self.url_route = url_route
self.handler = handler
diff --git a/nova/api/openstack/flavors.py b/nova/api/openstack/flavors.py
index 40787bd17..4c5971cf6 100644
--- a/nova/api/openstack/flavors.py
+++ b/nova/api/openstack/flavors.py
@@ -45,6 +45,9 @@ class Controller(common.OpenstackController):
items = self._get_flavors(req, is_detail=True)
return dict(flavors=items)
+ def _get_view_builder(self, req):
+ raise NotImplementedError()
+
def _get_flavors(self, req, is_detail=True):
"""Helper function that returns a list of flavor dicts."""
ctxt = req.environ['nova.context']
diff --git a/nova/api/openstack/limits.py b/nova/api/openstack/limits.py
index 47bc238f1..bd0250a7f 100644
--- a/nova/api/openstack/limits.py
+++ b/nova/api/openstack/limits.py
@@ -30,6 +30,7 @@ from collections import defaultdict
from webob.dec import wsgify
+from nova import quota
from nova import wsgi
from nova.api.openstack import common
from nova.api.openstack import faults
@@ -64,7 +65,8 @@ class LimitsController(common.OpenstackController):
"""
Return all global and rate limit information.
"""
- abs_limits = {}
+ context = req.environ['nova.context']
+ abs_limits = quota.get_project_quotas(context, context.project_id)
rate_limits = req.environ.get("nova.limits", [])
builder = self._get_view_builder(req)
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 547310613..5c10fc916 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -75,6 +75,21 @@ class Controller(common.OpenstackController):
""" Returns a list of server details for a given user """
return self._items(req, is_detail=True)
+ def _image_id_from_req_data(self, data):
+ raise NotImplementedError()
+
+ def _flavor_id_from_req_data(self, data):
+ raise NotImplementedError()
+
+ def _get_view_builder(self, req):
+ raise NotImplementedError()
+
+ def _limit_items(self, items, req):
+ raise NotImplementedError()
+
+ def _action_rebuild(self, info, request, instance_id):
+ raise NotImplementedError()
+
def _items(self, req, is_detail):
"""Returns a list of servers for a given user.
@@ -165,7 +180,8 @@ class Controller(common.OpenstackController):
key_name=key_name,
key_data=key_data,
metadata=env['server'].get('metadata', {}),
- injected_files=injected_files)
+ injected_files=injected_files,
+ admin_password=password)
except quota.QuotaError as error:
self._handle_quota_error(error)
@@ -175,8 +191,6 @@ class Controller(common.OpenstackController):
builder = self._get_view_builder(req)
server = builder.build(inst, is_detail=True)
server['server']['adminPass'] = password
- self.compute_api.set_admin_password(context, server['server']['id'],
- password)
return server
def _deserialize_create(self, request):
@@ -593,8 +607,8 @@ class ControllerV10(Controller):
def _parse_update(self, context, server_id, inst_dict, update_dict):
if 'adminPass' in inst_dict['server']:
- update_dict['admin_pass'] = inst_dict['server']['adminPass']
- self.compute_api.set_admin_password(context, server_id)
+ self.compute_api.set_admin_password(context, server_id,
+ inst_dict['server']['adminPass'])
def _action_rebuild(self, info, request, instance_id):
context = request.environ['nova.context']
@@ -743,8 +757,9 @@ class ServerCreateRequestXMLDeserializer(object):
"""Marshal the server attribute of a parsed request"""
server = {}
server_node = self._find_first_child_named(node, 'server')
- for attr in ["name", "imageId", "flavorId"]:
- server[attr] = server_node.getAttribute(attr)
+ for attr in ["name", "imageId", "flavorId", "imageRef", "flavorRef"]:
+ if server_node.getAttribute(attr):
+ server[attr] = server_node.getAttribute(attr)
metadata = self._extract_metadata(server_node)
if metadata is not None:
server["metadata"] = metadata
diff --git a/nova/api/openstack/views/limits.py b/nova/api/openstack/views/limits.py
index 552db39ee..33be12c0c 100644
--- a/nova/api/openstack/views/limits.py
+++ b/nova/api/openstack/views/limits.py
@@ -23,6 +23,15 @@ from nova.api.openstack import common
class ViewBuilder(object):
"""Openstack API base limits view builder."""
+ def _build_rate_limits(self, rate_limits):
+ raise NotImplementedError()
+
+ def _build_rate_limit(self, rate_limit):
+ raise NotImplementedError()
+
+ def _build_absolute_limits(self, absolute_limit):
+ raise NotImplementedError()
+
def build(self, rate_limits, absolute_limits):
rate_limits = self._build_rate_limits(rate_limits)
absolute_limits = self._build_absolute_limits(absolute_limits)
@@ -36,6 +45,34 @@ class ViewBuilder(object):
return output
+ def _build_absolute_limits(self, absolute_limits):
+ """Builder for absolute limits
+
+ absolute_limits should be given as a dict of limits.
+ For example: {"ram": 512, "gigabytes": 1024}.
+
+ """
+ limit_names = {
+ "ram": ["maxTotalRAMSize"],
+ "instances": ["maxTotalInstances"],
+ "cores": ["maxTotalCores"],
+ "metadata_items": ["maxServerMeta", "maxImageMeta"],
+ "injected_files": ["maxPersonality"],
+ "injected_file_content_bytes": ["maxPersonalitySize"],
+ }
+ limits = {}
+ for name, value in absolute_limits.iteritems():
+ if name in limit_names and value is not None:
+ for name in limit_names[name]:
+ limits[name] = value
+ return limits
+
+ def _build_rate_limits(self, rate_limits):
+ raise NotImplementedError()
+
+ def _build_rate_limit(self, rate_limit):
+ raise NotImplementedError()
+
class ViewBuilderV10(ViewBuilder):
"""Openstack API v1.0 limits view builder."""
@@ -54,9 +91,6 @@ class ViewBuilderV10(ViewBuilder):
"resetTime": rate_limit["resetTime"],
}
- def _build_absolute_limits(self, absolute_limit):
- return {}
-
class ViewBuilderV11(ViewBuilder):
"""Openstack API v1.1 limits view builder."""
@@ -95,6 +129,3 @@ class ViewBuilderV11(ViewBuilder):
"unit": rate_limit["unit"],
"next-available": rate_limit["resetTime"],
}
-
- def _build_absolute_limits(self, absolute_limit):
- return {}
diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py
index 227ffecdc..af73d8f6d 100644
--- a/nova/api/openstack/zones.py
+++ b/nova/api/openstack/zones.py
@@ -13,7 +13,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+import json
+import urlparse
+
+from nova import crypto
from nova import db
+from nova import exception
from nova import flags
from nova import log as logging
from nova.api.openstack import common
@@ -21,6 +26,12 @@ from nova.scheduler import api
FLAGS = flags.FLAGS
+flags.DEFINE_string('build_plan_encryption_key',
+ None,
+ '128bit (hex) encryption key for scheduler build plans.')
+
+
+LOG = logging.getLogger('nova.api.openstack.zones')
def _filter_keys(item, keys):
@@ -97,3 +108,35 @@ class Controller(common.OpenstackController):
zone_id = int(id)
zone = api.zone_update(context, zone_id, env["zone"])
return dict(zone=_scrub_zone(zone))
+
+ def select(self, req):
+ """Returns a weighted list of costs to create instances
+ of desired capabilities."""
+ ctx = req.environ['nova.context']
+ qs = req.environ['QUERY_STRING']
+ param_dict = urlparse.parse_qs(qs)
+ param_dict.pop("fresh", None)
+ # parse_qs returns a dict where the values are lists,
+ # since query strings can have multiple values for the
+ # same key. We need to convert that to single values.
+ for key in param_dict:
+ param_dict[key] = param_dict[key][0]
+ build_plan = api.select(ctx, specs=param_dict)
+ cooked = self._scrub_build_plan(build_plan)
+ return {"weights": cooked}
+
+ def _scrub_build_plan(self, build_plan):
+ """Remove all the confidential data and return a sanitized
+ version of the build plan. Include an encrypted full version
+ of the weighting entry so we can get back to it later."""
+ if not FLAGS.build_plan_encryption_key:
+ raise exception.FlagNotSet(flag='build_plan_encryption_key')
+
+ encryptor = crypto.encryptor(FLAGS.build_plan_encryption_key)
+ cooked = []
+ for entry in build_plan:
+ json_entry = json.dumps(entry)
+ cipher_text = encryptor(json_entry)
+ cooked.append(dict(weight=entry['weight'],
+ blob=cipher_text))
+ return cooked
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 971c0732f..1a406bea8 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -19,6 +19,7 @@
"""Handles all requests relating to instances (guest vms)."""
import datetime
+import eventlet
import re
import time
@@ -42,6 +43,8 @@ LOG = logging.getLogger('nova.compute.api')
FLAGS = flags.FLAGS
flags.DECLARE('vncproxy_topic', 'nova.vnc')
+flags.DEFINE_integer('find_host_timeout', 30,
+ 'Timeout after NN seconds when looking for a host.')
def generate_default_hostname(instance_id):
@@ -92,14 +95,15 @@ class API(base.Base):
"""
if injected_files is None:
return
- limit = quota.allowed_injected_files(context)
+ limit = quota.allowed_injected_files(context, len(injected_files))
if len(injected_files) > limit:
raise quota.QuotaError(code="OnsetFileLimitExceeded")
path_limit = quota.allowed_injected_file_path_bytes(context)
- content_limit = quota.allowed_injected_file_content_bytes(context)
for path, content in injected_files:
if len(path) > path_limit:
raise quota.QuotaError(code="OnsetFilePathLimitExceeded")
+ content_limit = quota.allowed_injected_file_content_bytes(
+ context, len(content))
if len(content) > content_limit:
raise quota.QuotaError(code="OnsetFileContentLimitExceeded")
@@ -131,7 +135,8 @@ class API(base.Base):
display_name='', display_description='',
key_name=None, key_data=None, security_group='default',
availability_zone=None, user_data=None, metadata={},
- injected_files=None):
+ injected_files=None,
+ admin_password=None):
"""Create the number and type of instances requested.
Verifies that quota and other arguments are valid.
@@ -146,9 +151,13 @@ class API(base.Base):
pid = context.project_id
LOG.warn(_("Quota exceeeded for %(pid)s,"
" tried to run %(min_count)s instances") % locals())
- raise quota.QuotaError(_("Instance quota exceeded. You can only "
- "run %s more instances of this type.") %
- num_instances, "InstanceLimitExceeded")
+ if num_instances <= 0:
+ message = _("Instance quota exceeded. You cannot run any "
+ "more instances of this type.")
+ else:
+ message = _("Instance quota exceeded. You can only run %s "
+ "more instances of this type.") % num_instances
+ raise quota.QuotaError(message, "InstanceLimitExceeded")
self._check_metadata_properties_quota(context, metadata)
self._check_injected_file_quota(context, injected_files)
@@ -248,13 +257,21 @@ class API(base.Base):
uid = context.user_id
LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's"
" instance %(instance_id)s") % locals())
+
+ # NOTE(sandy): For now we're just going to pass in the
+ # instance_type record to the scheduler. In a later phase
+ # we'll be ripping this whole for-loop out and deferring the
+ # creation of the Instance record. At that point all this will
+ # change.
rpc.cast(context,
FLAGS.scheduler_topic,
{"method": "run_instance",
"args": {"topic": FLAGS.compute_topic,
"instance_id": instance_id,
+ "instance_type": instance_type,
"availability_zone": availability_zone,
- "injected_files": injected_files}})
+ "injected_files": injected_files,
+ "admin_password": admin_password}})
for group_id in security_groups:
self.trigger_security_group_members_refresh(elevated, group_id)
@@ -484,7 +501,7 @@ class API(base.Base):
def _find_host(self, context, instance_id):
"""Find the host associated with an instance."""
- for attempts in xrange(10):
+ for attempts in xrange(FLAGS.find_host_timeout):
instance = self.get(context, instance_id)
host = instance["host"]
if host:
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index ae5b50ef3..d1e01f275 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -77,7 +77,8 @@ flags.DEFINE_integer("rescue_timeout", 0,
" Set to 0 to disable.")
flags.DEFINE_bool('auto_assign_floating_ip', False,
'Autoassigning floating ip to VM')
-
+flags.DEFINE_integer('host_state_interval', 120,
+ 'Interval in seconds for querying the host status')
LOG = logging.getLogger('nova.compute.manager')
@@ -131,6 +132,7 @@ class ComputeManager(manager.SchedulerDependentManager):
self.network_manager = utils.import_object(FLAGS.network_manager)
self.volume_manager = utils.import_object(FLAGS.volume_manager)
self.network_api = network.API()
+ self._last_host_check = 0
super(ComputeManager, self).__init__(service_name="compute",
*args, **kwargs)
@@ -219,6 +221,7 @@ class ComputeManager(manager.SchedulerDependentManager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
instance_ref.injected_files = kwargs.get('injected_files', [])
+ instance_ref.admin_pass = kwargs.get('admin_password', None)
if instance_ref['name'] in self.driver.list_instances():
raise exception.Error(_("Instance has already been created"))
LOG.audit(_("instance %s: starting..."), instance_id,
@@ -403,31 +406,49 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception
@checks_instance_lock
def set_admin_password(self, context, instance_id, new_pass=None):
- """Set the root/admin password for an instance on this host."""
+ """Set the root/admin password for an instance on this host.
+
+ This is generally only called by API password resets after an
+ image has been built.
+ """
+
context = context.elevated()
if new_pass is None:
# Generate a random password
new_pass = utils.generate_password(FLAGS.password_length)
- while True:
+ max_tries = 10
+
+ for i in xrange(max_tries):
instance_ref = self.db.instance_get(context, instance_id)
instance_id = instance_ref["id"]
instance_state = instance_ref["state"]
expected_state = power_state.RUNNING
if instance_state != expected_state:
- time.sleep(5)
- continue
+ raise exception.Error(_('Instance is not running'))
else:
try:
self.driver.set_admin_password(instance_ref, new_pass)
LOG.audit(_("Instance %s: Root password set"),
instance_ref["name"])
break
+ except NotImplementedError:
+ # NOTE(dprince): if the driver doesn't implement
+ # set_admin_password we break to avoid a loop
+ LOG.warn(_('set_admin_password is not implemented '
+ 'by this driver.'))
+ break
except Exception, e:
# Catch all here because this could be anything.
LOG.exception(e)
+ if i == max_tries - 1:
+ # At some point this exception may make it back
+ # to the API caller, and we don't want to reveal
+ # too much. The real exception is logged above
+ raise exception.Error(_('Internal error'))
+ time.sleep(1)
continue
@exception.wrap_exception
@@ -620,7 +641,7 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_type = self.db.instance_type_get_by_flavor_id(context,
migration_ref['new_flavor_id'])
self.db.instance_update(context, instance_id,
- dict(instance_type=instance_type['name'],
+ dict(instance_type_id=instance_type['id'],
memory_mb=instance_type['memory_mb'],
vcpus=instance_type['vcpus'],
local_gb=instance_type['local_gb']))
@@ -1094,6 +1115,13 @@ class ComputeManager(manager.SchedulerDependentManager):
error_list.append(ex)
try:
+ self._report_driver_status()
+ except Exception as ex:
+ LOG.warning(_("Error during report_driver_status(): %s"),
+ unicode(ex))
+ error_list.append(ex)
+
+ try:
self._poll_instance_states(context)
except Exception as ex:
LOG.warning(_("Error during instance poll: %s"),
@@ -1102,6 +1130,16 @@ class ComputeManager(manager.SchedulerDependentManager):
return error_list
+ def _report_driver_status(self):
+ curr_time = time.time()
+ if curr_time - self._last_host_check > FLAGS.host_state_interval:
+ self._last_host_check = curr_time
+ LOG.info(_("Updating host status"))
+ # This will grab info about the host and queue it
+ # to be sent to the Schedulers.
+ self.update_service_capabilities(
+ self.driver.get_host_stats(refresh=True))
+
def _poll_instance_states(self, context):
vm_instances = self.driver.list_instances_detail()
vm_instances = dict((vm.name, vm) for vm in vm_instances)
diff --git a/nova/crypto.py b/nova/crypto.py
index 14b9cbef6..bdc32482a 100644
--- a/nova/crypto.py
+++ b/nova/crypto.py
@@ -332,6 +332,51 @@ def mkcacert(subject='nova', years=1):
return cert, pk, pkey
+def _build_cipher(key, iv, encode=True):
+ """Make a 128bit AES CBC encode/decode Cipher object.
+ Padding is handled internally."""
+ operation = 1 if encode else 0
+ return M2Crypto.EVP.Cipher(alg='aes_128_cbc', key=key, iv=iv, op=operation)
+
+
+def encryptor(key, iv=None):
+ """Simple symmetric key encryption."""
+ key = base64.b64decode(key)
+ if iv is None:
+ iv = '\0' * 16
+ else:
+ iv = base64.b64decode(iv)
+
+ def encrypt(data):
+ cipher = _build_cipher(key, iv, encode=True)
+ v = cipher.update(data)
+ v = v + cipher.final()
+ del cipher
+ v = base64.b64encode(v)
+ return v
+
+ return encrypt
+
+
+def decryptor(key, iv=None):
+ """Simple symmetric key decryption."""
+ key = base64.b64decode(key)
+ if iv is None:
+ iv = '\0' * 16
+ else:
+ iv = base64.b64decode(iv)
+
+ def decrypt(data):
+ data = base64.b64decode(data)
+ cipher = _build_cipher(key, iv, encode=False)
+ v = cipher.update(data)
+ v = v + cipher.final()
+ del cipher
+ return v
+
+ return decrypt
+
+
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
diff --git a/nova/db/api.py b/nova/db/api.py
index f9a4b5b4b..310c0bb09 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -403,7 +403,7 @@ def instance_create(context, values):
def instance_data_get_for_project(context, project_id):
- """Get (instance_count, core_count) for project."""
+ """Get (instance_count, total_cores, total_ram) for project."""
return IMPL.instance_data_get_for_project(context, project_id)
@@ -756,24 +756,34 @@ def auth_token_create(context, token):
###################
-def quota_create(context, values):
- """Create a quota from the values dictionary."""
- return IMPL.quota_create(context, values)
+def quota_create(context, project_id, resource, limit):
+ """Create a quota for the given project and resource."""
+ return IMPL.quota_create(context, project_id, resource, limit)
-def quota_get(context, project_id):
+def quota_get(context, project_id, resource):
"""Retrieve a quota or raise if it does not exist."""
- return IMPL.quota_get(context, project_id)
+ return IMPL.quota_get(context, project_id, resource)
-def quota_update(context, project_id, values):
- """Update a quota from the values dictionary."""
- return IMPL.quota_update(context, project_id, values)
+def quota_get_all_by_project(context, project_id):
+ """Retrieve all quotas associated with a given project."""
+ return IMPL.quota_get_all_by_project(context, project_id)
-def quota_destroy(context, project_id):
+def quota_update(context, project_id, resource, limit):
+ """Update a quota or raise if it does not exist."""
+ return IMPL.quota_update(context, project_id, resource, limit)
+
+
+def quota_destroy(context, project_id, resource):
"""Destroy the quota or raise if it does not exist."""
- return IMPL.quota_destroy(context, project_id)
+ return IMPL.quota_destroy(context, project_id, resource)
+
+
+def quota_destroy_all_by_project(context, project_id):
+ """Destroy all quotas associated with a given project."""
+ return IMPL.quota_get_all_by_project(context, project_id)
###################
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 285b22a04..e4dda5c12 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -25,6 +25,7 @@ import warnings
from nova import db
from nova import exception
from nova import flags
+from nova import ipv6
from nova import utils
from nova.db.sqlalchemy import models
from nova.db.sqlalchemy.session import get_session
@@ -744,7 +745,7 @@ def fixed_ip_get_all_by_instance(context, instance_id):
@require_context
def fixed_ip_get_instance_v6(context, address):
session = get_session()
- mac = utils.to_mac(address)
+ mac = ipv6.to_mac(address)
result = session.query(models.Instance).\
filter_by(mac_address=mac).\
@@ -802,12 +803,13 @@ def instance_create(context, values):
def instance_data_get_for_project(context, project_id):
session = get_session()
result = session.query(func.count(models.Instance.id),
- func.sum(models.Instance.vcpus)).\
+ func.sum(models.Instance.vcpus),
+ func.sum(models.Instance.memory_mb)).\
filter_by(project_id=project_id).\
filter_by(deleted=False).\
first()
# NOTE(vish): convert None to 0
- return (result[0] or 0, result[1] or 0)
+ return (result[0] or 0, result[1] or 0, result[2] or 0)
@require_context
@@ -872,6 +874,7 @@ def instance_get_all(context):
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.network')).\
+ options(joinedload('metadata')).\
options(joinedload('instance_type')).\
filter_by(deleted=can_read_deleted(context)).\
all()
@@ -884,6 +887,7 @@ def instance_get_all_by_user(context, user_id):
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.network')).\
+ options(joinedload('metadata')).\
options(joinedload('instance_type')).\
filter_by(deleted=can_read_deleted(context)).\
filter_by(user_id=user_id).\
@@ -974,7 +978,8 @@ def instance_get_fixed_address_v6(context, instance_id):
network_ref = network_get_by_instance(context, instance_id)
prefix = network_ref.cidr_v6
mac = instance_ref.mac_address
- return utils.to_global_ipv6(prefix, mac)
+ project_id = instance_ref.project_id
+ return ipv6.to_global(prefix, mac, project_id)
@require_context
@@ -1495,46 +1500,72 @@ def auth_token_create(_context, token):
###################
-@require_admin_context
-def quota_get(context, project_id, session=None):
+@require_context
+def quota_get(context, project_id, resource, session=None):
if not session:
session = get_session()
-
result = session.query(models.Quota).\
filter_by(project_id=project_id).\
- filter_by(deleted=can_read_deleted(context)).\
+ filter_by(resource=resource).\
+ filter_by(deleted=False).\
first()
if not result:
raise exception.ProjectQuotaNotFound(project_id=project_id)
+ return result
+
+@require_context
+def quota_get_all_by_project(context, project_id):
+ session = get_session()
+ result = {'project_id': project_id}
+ rows = session.query(models.Quota).\
+ filter_by(project_id=project_id).\
+ filter_by(deleted=False).\
+ all()
+ for row in rows:
+ result[row.resource] = row.hard_limit
return result
@require_admin_context
-def quota_create(context, values):
+def quota_create(context, project_id, resource, limit):
quota_ref = models.Quota()
- quota_ref.update(values)
+ quota_ref.project_id = project_id
+ quota_ref.resource = resource
+ quota_ref.hard_limit = limit
quota_ref.save()
return quota_ref
@require_admin_context
-def quota_update(context, project_id, values):
+def quota_update(context, project_id, resource, limit):
session = get_session()
with session.begin():
- quota_ref = quota_get(context, project_id, session=session)
- quota_ref.update(values)
+ quota_ref = quota_get(context, project_id, resource, session=session)
+ quota_ref.hard_limit = limit
quota_ref.save(session=session)
@require_admin_context
-def quota_destroy(context, project_id):
+def quota_destroy(context, project_id, resource):
session = get_session()
with session.begin():
- quota_ref = quota_get(context, project_id, session=session)
+ quota_ref = quota_get(context, project_id, resource, session=session)
quota_ref.delete(session=session)
+@require_admin_context
+def quota_destroy_all_by_project(context, project_id):
+ session = get_session()
+ with session.begin():
+ quotas = session.query(models.Quota).\
+ filter_by(project_id=project_id).\
+ filter_by(deleted=False).\
+ all()
+ for quota_ref in quotas:
+ quota_ref.delete(session=session)
+
+
###################
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py b/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py
index 9e7ab3554..63bbaccc1 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py
@@ -17,15 +17,13 @@
# under the License.
## Table code mostly autogenerated by genmodel.py
-from sqlalchemy import *
-from migrate import *
-
+from sqlalchemy import Boolean, Column, DateTime, ForeignKey
+from sqlalchemy import ForeignKeyConstraint, Integer, MetaData, String
+from sqlalchemy import Table, Text
from nova import log as logging
-
meta = MetaData()
-
auth_tokens = Table('auth_tokens', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py
index 413536a59..9bb8a8ada 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py
@@ -16,15 +16,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-from sqlalchemy import *
-from migrate import *
-
+from sqlalchemy import Boolean, Column, DateTime, ForeignKey
+from sqlalchemy import Integer, MetaData, String, Table, Text
from nova import log as logging
-
meta = MetaData()
-
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of instances or services.
instances = Table('instances', meta,
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py b/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py
index 5ba7910f1..8e0de4d2b 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py
@@ -15,15 +15,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-from sqlalchemy import *
-from migrate import *
-
-from nova import log as logging
-
+from sqlalchemy import Column, Integer, MetaData, String, Table
meta = MetaData()
-
networks = Table('networks', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py b/nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py
index ade981687..0abea374c 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py
@@ -13,15 +13,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-from sqlalchemy import *
-from migrate import *
-
+from sqlalchemy import Boolean, Column, DateTime, Integer
+from sqlalchemy import MetaData, String, Table
from nova import log as logging
-
meta = MetaData()
-
#
# New Tables
#
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py b/nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py
index 4cb07e0d8..a1a86e3b4 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py
@@ -15,15 +15,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-from sqlalchemy import *
-from migrate import *
-
+from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer
+from sqlalchemy import MetaData, String, Table
from nova import log as logging
-
meta = MetaData()
-
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of instances or services.
instances = Table('instances', meta,
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py b/nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py
index 705fc8ff3..4627d3332 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py
@@ -15,11 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-from sqlalchemy import *
-from migrate import *
-
-from nova import log as logging
-
+from sqlalchemy import Column, Integer, MetaData, String, Table
meta = MetaData()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py b/nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py
index 427934d53..6f2668040 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py
@@ -13,15 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-from sqlalchemy import *
-from migrate import *
-
-from nova import log as logging
-
+from sqlalchemy import Column, Integer, MetaData, String, Table
meta = MetaData()
-
# Table stub-definitions
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of instances or services.
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py b/nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py
index 5e2cb69d9..63999f6ff 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py
@@ -13,15 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-from sqlalchemy import *
-from migrate import *
-
-from nova import api
-from nova import db
+from sqlalchemy import Boolean, Column, DateTime, Integer
+from sqlalchemy import MetaData, String, Table
from nova import log as logging
-import datetime
-
meta = MetaData()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py b/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py
index 4fda525f1..0f2d0079a 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py
@@ -15,12 +15,10 @@
# License for the specific language governing permissions and limitations
# under the License.from sqlalchemy import *
-from sqlalchemy import *
-from migrate import *
-
+from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer
+from sqlalchemy import MetaData, String, Table
from nova import log as logging
-
meta = MetaData()
# Just for the ForeignKey and column creation to succeed, these are not the
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py
index eb3066894..a5b80586e 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py
@@ -14,12 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-from sqlalchemy import *
-from sqlalchemy.sql import text
-from migrate import *
-
-from nova import log as logging
-
+from sqlalchemy import Column, Integer, MetaData, String, Table
meta = MetaData()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/011_live_migration.py b/nova/db/sqlalchemy/migrate_repo/versions/011_live_migration.py
index 23ccccb4e..b2b0256d2 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/011_live_migration.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/011_live_migration.py
@@ -16,10 +16,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-from migrate import *
+from sqlalchemy import Boolean, Column, DateTime, Integer, MetaData
+from sqlalchemy import Table, Text
from nova import log as logging
-from sqlalchemy import *
-
meta = MetaData()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py b/nova/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py
index e87085668..10d250522 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py
@@ -13,15 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-from sqlalchemy import *
-from migrate import *
-
-from nova import log as logging
-
+from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer
+from sqlalchemy import MetaData, String, Table
meta = MetaData()
-
# Table stub-definitions
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of instances or services.
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py b/nova/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py
index 3fb92e85c..7246839b7 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py
@@ -15,11 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.from sqlalchemy import *
-from sqlalchemy import *
-from migrate import *
-
-from nova import log as logging
-
+from sqlalchemy import Column, Integer, MetaData, Table
meta = MetaData()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py
index 334d1f255..62216be12 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py
@@ -14,16 +14,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-from sqlalchemy import *
-from sqlalchemy.sql import text
-from migrate import *
-
+from sqlalchemy import Column, Integer, MetaData, String, Table
#from nova import log as logging
-
meta = MetaData()
-
c_instance_type = Column('instance_type',
String(length=255, convert_unicode=False,
assert_unicode=None, unicode_error=None,
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py b/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py
index 29b26b3dd..375760c84 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py
@@ -15,14 +15,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-from sqlalchemy import *
-from sqlalchemy.sql import text
-from migrate import *
-
+from sqlalchemy import Boolean, Column, MetaData, Table
meta = MetaData()
-
c_auto_assigned = Column('auto_assigned', Boolean, default=False)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py
new file mode 100644
index 000000000..a2d8192ca
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py
@@ -0,0 +1,203 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Boolean, Column, DateTime, Integer
+from sqlalchemy import MetaData, String, Table
+
+import datetime
+
+meta = MetaData()
+
+resources = [
+ 'instances',
+ 'cores',
+ 'volumes',
+ 'gigabytes',
+ 'floating_ips',
+ 'metadata_items',
+]
+
+
+def old_style_quotas_table(name):
+ return Table(name, meta,
+ Column('id', Integer(), primary_key=True),
+ Column('created_at', DateTime(),
+ default=datetime.datetime.utcnow),
+ Column('updated_at', DateTime(),
+ onupdate=datetime.datetime.utcnow),
+ Column('deleted_at', DateTime()),
+ Column('deleted', Boolean(), default=False),
+ Column('project_id',
+ String(length=255, convert_unicode=False,
+ assert_unicode=None, unicode_error=None,
+ _warn_on_bytestring=False)),
+ Column('instances', Integer()),
+ Column('cores', Integer()),
+ Column('volumes', Integer()),
+ Column('gigabytes', Integer()),
+ Column('floating_ips', Integer()),
+ Column('metadata_items', Integer()),
+ )
+
+
+def new_style_quotas_table(name):
+ return Table(name, meta,
+ Column('id', Integer(), primary_key=True),
+ Column('created_at', DateTime(),
+ default=datetime.datetime.utcnow),
+ Column('updated_at', DateTime(),
+ onupdate=datetime.datetime.utcnow),
+ Column('deleted_at', DateTime()),
+ Column('deleted', Boolean(), default=False),
+ Column('project_id',
+ String(length=255, convert_unicode=False,
+ assert_unicode=None, unicode_error=None,
+ _warn_on_bytestring=False)),
+ Column('resource',
+ String(length=255, convert_unicode=False,
+ assert_unicode=None, unicode_error=None,
+ _warn_on_bytestring=False),
+ nullable=False),
+ Column('hard_limit', Integer(), nullable=True),
+ )
+
+
+def existing_quotas_table(migrate_engine):
+ return Table('quotas', meta, autoload=True, autoload_with=migrate_engine)
+
+
+def _assert_no_duplicate_project_ids(quotas):
+ project_ids = set()
+ message = ('There are multiple active quotas for project "%s" '
+ '(among others, possibly). '
+ 'Please resolve all ambiguous quotas before '
+ 'reattempting the migration.')
+ for quota in quotas:
+ assert quota.project_id not in project_ids, message % quota.project_id
+ project_ids.add(quota.project_id)
+
+
+def assert_old_quotas_have_no_active_duplicates(migrate_engine, quotas):
+ """Ensure that there are no duplicate non-deleted quota entries."""
+ select = quotas.select().where(quotas.c.deleted == False)
+ results = migrate_engine.execute(select)
+ _assert_no_duplicate_project_ids(list(results))
+
+
+def assert_new_quotas_have_no_active_duplicates(migrate_engine, quotas):
+ """Ensure that there are no duplicate non-deleted quota entries."""
+ for resource in resources:
+ select = quotas.select().\
+ where(quotas.c.deleted == False).\
+ where(quotas.c.resource == resource)
+ results = migrate_engine.execute(select)
+ _assert_no_duplicate_project_ids(list(results))
+
+
+def convert_forward(migrate_engine, old_quotas, new_quotas):
+ quotas = list(migrate_engine.execute(old_quotas.select()))
+ for quota in quotas:
+ for resource in resources:
+ hard_limit = getattr(quota, resource)
+ if hard_limit is None:
+ continue
+ insert = new_quotas.insert().values(
+ created_at=quota.created_at,
+ updated_at=quota.updated_at,
+ deleted_at=quota.deleted_at,
+ deleted=quota.deleted,
+ project_id=quota.project_id,
+ resource=resource,
+ hard_limit=hard_limit)
+ migrate_engine.execute(insert)
+
+
+def earliest(date1, date2):
+ if date1 is None and date2 is None:
+ return None
+ if date1 is None:
+ return date2
+ if date2 is None:
+ return date1
+ if date1 < date2:
+ return date1
+ return date2
+
+
+def latest(date1, date2):
+ if date1 is None and date2 is None:
+ return None
+ if date1 is None:
+ return date2
+ if date2 is None:
+ return date1
+ if date1 > date2:
+ return date1
+ return date2
+
+
+def convert_backward(migrate_engine, old_quotas, new_quotas):
+ quotas = {}
+ for quota in migrate_engine.execute(new_quotas.select()):
+ if (quota.resource not in resources
+ or quota.hard_limit is None or quota.deleted):
+ continue
+ if not quota.project_id in quotas:
+ quotas[quota.project_id] = {
+ 'project_id': quota.project_id,
+ 'created_at': quota.created_at,
+ 'updated_at': quota.updated_at,
+ quota.resource: quota.hard_limit
+ }
+ else:
+ quotas[quota.project_id]['created_at'] = earliest(
+ quota.created_at, quotas[quota.project_id]['created_at'])
+ quotas[quota.project_id]['updated_at'] = latest(
+ quota.updated_at, quotas[quota.project_id]['updated_at'])
+ quotas[quota.project_id][quota.resource] = quota.hard_limit
+
+ for quota in quotas.itervalues():
+ insert = old_quotas.insert().values(**quota)
+ migrate_engine.execute(insert)
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ old_quotas = existing_quotas_table(migrate_engine)
+ assert_old_quotas_have_no_active_duplicates(migrate_engine, old_quotas)
+
+ new_quotas = new_style_quotas_table('quotas_new')
+ new_quotas.create()
+ convert_forward(migrate_engine, old_quotas, new_quotas)
+ old_quotas.drop()
+ new_quotas.rename('quotas')
+
+
+def downgrade(migrate_engine):
+ # Operations to reverse the above upgrade go here.
+ meta.bind = migrate_engine
+
+ new_quotas = existing_quotas_table(migrate_engine)
+ assert_new_quotas_have_no_active_duplicates(migrate_engine, new_quotas)
+
+ old_quotas = old_style_quotas_table('quotas_old')
+ old_quotas.create()
+ convert_backward(migrate_engine, old_quotas, new_quotas)
+ new_quotas.drop()
+ old_quotas.rename('quotas')
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py b/nova/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py
new file mode 100644
index 000000000..cda890c94
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py
@@ -0,0 +1,68 @@
+from sqlalchemy import Column, Integer, MetaData, String, Table
+from nova import log as logging
+
+meta = MetaData()
+
+
+def upgrade(migrate_engine):
+ meta.bind = migrate_engine
+ instances = Table('instances', meta, autoload=True,
+ autoload_with=migrate_engine)
+
+ types = {}
+ for instance in migrate_engine.execute(instances.select()):
+ if instance.instance_type_id is None:
+ types[instance.id] = None
+ continue
+ try:
+ types[instance.id] = int(instance.instance_type_id)
+ except ValueError:
+ logging.warn("Instance %s did not have instance_type_id "
+ "converted to an integer because its value is %s" %
+ (instance.id, instance.instance_type_id))
+ types[instance.id] = None
+
+ integer_column = Column('instance_type_id_int', Integer(), nullable=True)
+ string_column = instances.c.instance_type_id
+
+ integer_column.create(instances)
+ for instance_id, instance_type_id in types.iteritems():
+ update = instances.update().\
+ where(instances.c.id == instance_id).\
+ values(instance_type_id_int=instance_type_id)
+ migrate_engine.execute(update)
+
+ string_column.alter(name='instance_type_id_str')
+ integer_column.alter(name='instance_type_id')
+ string_column.drop()
+
+
+def downgrade(migrate_engine):
+ meta.bind = migrate_engine
+ instances = Table('instances', meta, autoload=True,
+ autoload_with=migrate_engine)
+
+ integer_column = instances.c.instance_type_id
+ string_column = Column('instance_type_id_str',
+ String(length=255, convert_unicode=False,
+ assert_unicode=None, unicode_error=None,
+ _warn_on_bytestring=False),
+ nullable=True)
+
+ types = {}
+ for instance in migrate_engine.execute(instances.select()):
+ if instance.instance_type_id is None:
+ types[instance.id] = None
+ else:
+ types[instance.id] = str(instance.instance_type_id)
+
+ string_column.create(instances)
+ for instance_id, instance_type_id in types.iteritems():
+ update = instances.update().\
+ where(instances.c.id == instance_id).\
+ values(instance_type_id_str=instance_type_id)
+ migrate_engine.execute(update)
+
+ integer_column.alter(name='instance_type_id_int')
+ string_column.alter(name='instance_type_id')
+ integer_column.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py b/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py
new file mode 100644
index 000000000..a169afb40
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py
@@ -0,0 +1,60 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Column, Integer, MetaData, String, Table
+#from nova import log as logging
+
+meta = MetaData()
+
+c_manageent = Column('server_manageent_url',
+ String(length=255, convert_unicode=False,
+ assert_unicode=None, unicode_error=None,
+ _warn_on_bytestring=False),
+ nullable=True)
+
+c_management = Column('server_management_url',
+ String(length=255, convert_unicode=False,
+ assert_unicode=None, unicode_error=None,
+ _warn_on_bytestring=False),
+ nullable=True)
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ tokens = Table('auth_tokens', meta, autoload=True,
+ autoload_with=migrate_engine)
+
+ tokens.create_column(c_management)
+ migrate_engine.execute(tokens.update()
+ .values(server_management_url=tokens.c.server_manageent_url))
+
+ tokens.c.server_manageent_url.drop()
+
+
+def downgrade(migrate_engine):
+ meta.bind = migrate_engine
+
+ tokens = Table('auth_tokens', meta, autoload=True,
+ autoload_with=migrate_engine)
+
+ tokens.create_column(c_manageent)
+ migrate_engine.execute(tokens.update()
+ .values(server_manageent_url=tokens.c.server_management_url))
+
+ tokens.c.server_management_url.drop()
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 36a084a1d..1215448f8 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -209,7 +209,7 @@ class Instance(BASE, NovaBase):
hostname = Column(String(255))
host = Column(String(255)) # , ForeignKey('hosts.id'))
- instance_type_id = Column(String(255))
+ instance_type_id = Column(Integer)
user_data = Column(Text)
@@ -313,18 +313,20 @@ class Volume(BASE, NovaBase):
class Quota(BASE, NovaBase):
- """Represents quota overrides for a project."""
+ """Represents a single quota override for a project.
+
+ If there is no row for a given project id and resource, then
+ the default for the deployment is used. If the row is present
+ but the hard limit is Null, then the resource is unlimited.
+ """
+
__tablename__ = 'quotas'
id = Column(Integer, primary_key=True)
- project_id = Column(String(255))
+ project_id = Column(String(255), index=True)
- instances = Column(Integer)
- cores = Column(Integer)
- volumes = Column(Integer)
- gigabytes = Column(Integer)
- floating_ips = Column(Integer)
- metadata_items = Column(Integer)
+ resource = Column(String(255))
+ hard_limit = Column(Integer, nullable=True)
class ExportDevice(BASE, NovaBase):
@@ -493,7 +495,7 @@ class AuthToken(BASE, NovaBase):
__tablename__ = 'auth_tokens'
token_hash = Column(String(255), primary_key=True)
user_id = Column(String(255))
- server_manageent_url = Column(String(255))
+ server_management_url = Column(String(255))
storage_url = Column(String(255))
cdn_management_url = Column(String(255))
diff --git a/nova/exception.py b/nova/exception.py
index 9905fb19b..56c20d111 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -244,6 +244,10 @@ class InstanceUnacceptable(Invalid):
message = _("Instance %(instance_id)s is unacceptable") + ": %(reason)s"
+class InvalidEc2Id(Invalid):
+ message = _("Ec2 id %(ec2_id)s is unacceptable.")
+
+
class NotFound(NovaException):
message = _("Resource could not be found.")
@@ -251,6 +255,10 @@ class NotFound(NovaException):
super(NotFound, self).__init__(**kwargs)
+class FlagNotSet(NotFound):
+ message = _("Required flag %(flag)s not set.")
+
+
class InstanceNotFound(NotFound):
message = _("Instance %(instance_id)s could not be found.")
diff --git a/nova/flags.py b/nova/flags.py
index 519793643..9eaac5596 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -110,7 +110,7 @@ class FlagValues(gflags.FlagValues):
return name in self.__dict__['__dirty']
def ClearDirty(self):
- self.__dict__['__is_dirty'] = []
+ self.__dict__['__dirty'] = []
def WasAlreadyParsed(self):
return self.__dict__['__was_already_parsed']
@@ -119,11 +119,12 @@ class FlagValues(gflags.FlagValues):
if '__stored_argv' not in self.__dict__:
return
new_flags = FlagValues(self)
- for k in self.__dict__['__dirty']:
+ for k in self.FlagDict().iterkeys():
new_flags[k] = gflags.FlagValues.__getitem__(self, k)
+ new_flags.Reset()
new_flags(self.__dict__['__stored_argv'])
- for k in self.__dict__['__dirty']:
+ for k in new_flags.FlagDict().iterkeys():
setattr(self, k, getattr(new_flags, k))
self.ClearDirty()
@@ -369,6 +370,9 @@ DEFINE_string('host', socket.gethostname(),
DEFINE_string('node_availability_zone', 'nova',
'availability zone of this node')
+DEFINE_string('notification_driver',
+ 'nova.notifier.no_op_notifier',
+ 'Default driver for sending notifications')
DEFINE_list('memcached_servers', None,
'Memcached servers or None for in process cache.')
diff --git a/nova/tests/real_flags.py b/nova/ipv6/__init__.py
index 71da04992..da4567cfb 100644
--- a/nova/tests/real_flags.py
+++ b/nova/ipv6/__init__.py
@@ -1,8 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
+# Copyright (c) 2011 Openstack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -16,11 +14,4 @@
# License for the specific language governing permissions and limitations
# under the License.
-from nova import flags
-
-FLAGS = flags.FLAGS
-
-FLAGS.connection_type = 'libvirt'
-FLAGS.fake_rabbit = False
-FLAGS.fake_network = False
-FLAGS.verbose = False
+from nova.ipv6.api import *
diff --git a/nova/ipv6/account_identifier.py b/nova/ipv6/account_identifier.py
new file mode 100644
index 000000000..258678f0a
--- /dev/null
+++ b/nova/ipv6/account_identifier.py
@@ -0,0 +1,45 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""IPv6 address generation with account identifier embedded"""
+
+import hashlib
+import netaddr
+
+
+def to_global(prefix, mac, project_id):
+ project_hash = netaddr.IPAddress(int(hashlib.sha1(project_id).\
+ hexdigest()[:8], 16) << 32)
+ static_num = netaddr.IPAddress(0xff << 24)
+
+ try:
+ mac_suffix = netaddr.EUI(mac).words[3:]
+ int_addr = int(''.join(['%02x' % i for i in mac_suffix]), 16)
+ mac_addr = netaddr.IPAddress(int_addr)
+ maskIP = netaddr.IPNetwork(prefix).ip
+ return (project_hash ^ static_num ^ mac_addr | maskIP).format()
+ except TypeError:
+ raise TypeError(_('Bad mac for to_global_ipv6: %s') % mac)
+
+
+def to_mac(ipv6_address):
+ address = netaddr.IPAddress(ipv6_address)
+ mask1 = netaddr.IPAddress('::ff:ffff')
+ mac = netaddr.EUI(int(address & mask1)).words
+ return ':'.join(['02', '16', '3e'] + ['%02x' % i for i in mac[3:6]])
diff --git a/nova/ipv6/api.py b/nova/ipv6/api.py
new file mode 100644
index 000000000..da003645a
--- /dev/null
+++ b/nova/ipv6/api.py
@@ -0,0 +1,41 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Openstack, LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import flags
+from nova import utils
+
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('ipv6_backend',
+ 'rfc2462',
+ 'Backend to use for IPv6 generation')
+
+
+def reset_backend():
+ global IMPL
+ IMPL = utils.LazyPluggable(FLAGS['ipv6_backend'],
+ rfc2462='nova.ipv6.rfc2462',
+ account_identifier='nova.ipv6.account_identifier')
+
+
+def to_global(prefix, mac, project_id):
+ return IMPL.to_global(prefix, mac, project_id)
+
+
+def to_mac(ipv6_address):
+ return IMPL.to_mac(ipv6_address)
+
+reset_backend()
diff --git a/nova/ipv6/rfc2462.py b/nova/ipv6/rfc2462.py
new file mode 100644
index 000000000..0074efe98
--- /dev/null
+++ b/nova/ipv6/rfc2462.py
@@ -0,0 +1,42 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""RFC2462 style IPv6 address generation"""
+
+import netaddr
+
+
+def to_global(prefix, mac, project_id):
+ try:
+ mac64 = netaddr.EUI(mac).eui64().words
+ int_addr = int(''.join(['%02x' % i for i in mac64]), 16)
+ mac64_addr = netaddr.IPAddress(int_addr)
+ maskIP = netaddr.IPNetwork(prefix).ip
+ return (mac64_addr ^ netaddr.IPAddress('::0200:0:0:0') | maskIP).\
+ format()
+ except TypeError:
+ raise TypeError(_('Bad mac for to_global_ipv6: %s') % mac)
+
+
+def to_mac(ipv6_address):
+ address = netaddr.IPAddress(ipv6_address)
+ mask1 = netaddr.IPAddress('::ffff:ffff:ffff:ffff')
+ mask2 = netaddr.IPAddress('::0200:0:0:0')
+ mac64 = netaddr.EUI(int(address & mask1 ^ mask2)).words
+ return ':'.join(['%02x' % i for i in mac64[0:3] + mac64[5:8]])
diff --git a/nova/network/api.py b/nova/network/api.py
index 1d8193b28..e2eacdf42 100644
--- a/nova/network/api.py
+++ b/nova/network/api.py
@@ -16,9 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""
-Handles all requests relating to instances (guest vms).
-"""
+"""Handles all requests relating to instances (guest vms)."""
from nova import db
from nova import exception
@@ -28,6 +26,7 @@ from nova import quota
from nova import rpc
from nova.db import base
+
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.network')
@@ -37,19 +36,19 @@ class API(base.Base):
def allocate_floating_ip(self, context):
if quota.allowed_floating_ips(context, 1) < 1:
- LOG.warn(_("Quota exceeeded for %s, tried to allocate "
- "address"),
- context.project_id)
- raise quota.QuotaError(_("Address quota exceeded. You cannot "
- "allocate any more addresses"))
+ LOG.warn(_('Quota exceeeded for %s, tried to allocate '
+ 'address'),
+ context.project_id)
+ raise quota.QuotaError(_('Address quota exceeded. You cannot '
+ 'allocate any more addresses'))
# NOTE(vish): We don't know which network host should get the ip
# when we allocate, so just send it to any one. This
# will probably need to move into a network supervisor
# at some point.
return rpc.call(context,
FLAGS.network_topic,
- {"method": "allocate_floating_ip",
- "args": {"project_id": context.project_id}})
+ {'method': 'allocate_floating_ip',
+ 'args': {'project_id': context.project_id}})
def release_floating_ip(self, context, address,
affect_auto_assigned=False):
@@ -62,8 +61,8 @@ class API(base.Base):
# at some point.
rpc.cast(context,
FLAGS.network_topic,
- {"method": "deallocate_floating_ip",
- "args": {"floating_address": floating_ip['address']}})
+ {'method': 'deallocate_floating_ip',
+ 'args': {'floating_address': floating_ip['address']}})
def associate_floating_ip(self, context, floating_ip, fixed_ip,
affect_auto_assigned=False):
@@ -74,17 +73,17 @@ class API(base.Base):
return
# Check if the floating ip address is allocated
if floating_ip['project_id'] is None:
- raise exception.ApiError(_("Address (%s) is not allocated") %
+ raise exception.ApiError(_('Address (%s) is not allocated') %
floating_ip['address'])
# Check if the floating ip address is allocated to the same project
if floating_ip['project_id'] != context.project_id:
- LOG.warn(_("Address (%(address)s) is not allocated to your "
- "project (%(project)s)"),
+ LOG.warn(_('Address (%(address)s) is not allocated to your '
+ 'project (%(project)s)'),
{'address': floating_ip['address'],
'project': context.project_id})
- raise exception.ApiError(_("Address (%(address)s) is not "
- "allocated to your project"
- "(%(project)s)") %
+ raise exception.ApiError(_('Address (%(address)s) is not '
+ 'allocated to your project'
+ '(%(project)s)') %
{'address': floating_ip['address'],
'project': context.project_id})
# NOTE(vish): Perhaps we should just pass this on to compute and
@@ -92,9 +91,9 @@ class API(base.Base):
host = fixed_ip['network']['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.network_topic, host),
- {"method": "associate_floating_ip",
- "args": {"floating_address": floating_ip['address'],
- "fixed_address": fixed_ip['address']}})
+ {'method': 'associate_floating_ip',
+ 'args': {'floating_address': floating_ip['address'],
+ 'fixed_address': fixed_ip['address']}})
def disassociate_floating_ip(self, context, address,
affect_auto_assigned=False):
@@ -108,5 +107,5 @@ class API(base.Base):
host = floating_ip['fixed_ip']['network']['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.network_topic, host),
- {"method": "disassociate_floating_ip",
- "args": {"floating_address": floating_ip['address']}})
+ {'method': 'disassociate_floating_ip',
+ 'args': {'floating_address': floating_ip['address']}})
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index b50a4b4ea..815cd29c3 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -15,26 +15,27 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-"""
-Implements vlans, bridges, and iptables rules using linux utilities.
-"""
+"""Implements vlans, bridges, and iptables rules using linux utilities."""
+
+import calendar
import inspect
import os
-import calendar
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
+from IPy import IP
+
LOG = logging.getLogger("nova.linux_net")
def _bin_file(script):
- """Return the absolute path to scipt in the bin directory"""
- return os.path.abspath(os.path.join(__file__, "../../../bin", script))
+ """Return the absolute path to scipt in the bin directory."""
+ return os.path.abspath(os.path.join(__file__, '../../../bin', script))
FLAGS = flags.FLAGS
@@ -66,11 +67,13 @@ binary_name = os.path.basename(inspect.stack()[-1][1])
class IptablesRule(object):
- """An iptables rule
+ """An iptables rule.
You shouldn't need to use this class directly, it's only used by
- IptablesManager
+ IptablesManager.
+
"""
+
def __init__(self, chain, rule, wrap=True, top=False):
self.chain = chain
self.rule = rule
@@ -95,7 +98,7 @@ class IptablesRule(object):
class IptablesTable(object):
- """An iptables table"""
+ """An iptables table."""
def __init__(self):
self.rules = []
@@ -103,15 +106,16 @@ class IptablesTable(object):
self.unwrapped_chains = set()
def add_chain(self, name, wrap=True):
- """Adds a named chain to the table
+ """Adds a named chain to the table.
The chain name is wrapped to be unique for the component creating
it, so different components of Nova can safely create identically
named chains without interfering with one another.
At the moment, its wrapped name is <binary name>-<chain name>,
- so if nova-compute creates a chain named "OUTPUT", it'll actually
- end up named "nova-compute-OUTPUT".
+ so if nova-compute creates a chain named 'OUTPUT', it'll actually
+ end up named 'nova-compute-OUTPUT'.
+
"""
if wrap:
self.chains.add(name)
@@ -119,12 +123,13 @@ class IptablesTable(object):
self.unwrapped_chains.add(name)
def remove_chain(self, name, wrap=True):
- """Remove named chain
+ """Remove named chain.
This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
If the chain is not found, this is merely logged.
+
"""
if wrap:
chain_set = self.chains
@@ -132,7 +137,7 @@ class IptablesTable(object):
chain_set = self.unwrapped_chains
if name not in chain_set:
- LOG.debug(_("Attempted to remove chain %s which doesn't exist"),
+ LOG.debug(_('Attempted to remove chain %s which does not exist'),
name)
return
@@ -147,17 +152,18 @@ class IptablesTable(object):
self.rules = filter(lambda r: jump_snippet not in r.rule, self.rules)
def add_rule(self, chain, rule, wrap=True, top=False):
- """Add a rule to the table
+ """Add a rule to the table.
This is just like what you'd feed to iptables, just without
- the "-A <chain name>" bit at the start.
+ the '-A <chain name>' bit at the start.
However, if you need to jump to one of your wrapped chains,
prepend its name with a '$' which will ensure the wrapping
is applied correctly.
+
"""
if wrap and chain not in self.chains:
- raise ValueError(_("Unknown chain: %r") % chain)
+ raise ValueError(_('Unknown chain: %r') % chain)
if '$' in rule:
rule = ' '.join(map(self._wrap_target_chain, rule.split(' ')))
@@ -170,23 +176,24 @@ class IptablesTable(object):
return s
def remove_rule(self, chain, rule, wrap=True, top=False):
- """Remove a rule from a chain
+ """Remove a rule from a chain.
Note: The rule must be exactly identical to the one that was added.
You cannot switch arguments around like you can with the iptables
CLI tool.
+
"""
try:
self.rules.remove(IptablesRule(chain, rule, wrap, top))
except ValueError:
- LOG.debug(_("Tried to remove rule that wasn't there:"
- " %(chain)r %(rule)r %(wrap)r %(top)r"),
+ LOG.debug(_('Tried to remove rule that was not there:'
+ ' %(chain)r %(rule)r %(wrap)r %(top)r'),
{'chain': chain, 'rule': rule,
'top': top, 'wrap': wrap})
class IptablesManager(object):
- """Wrapper for iptables
+ """Wrapper for iptables.
See IptablesTable for some usage docs
@@ -205,7 +212,9 @@ class IptablesManager(object):
For ipv4, the builtin PREROUTING, OUTPUT, and POSTROUTING nat chains are
wrapped in the same was as the builtin filter chains. Additionally, there's
a snat chain that is applied after the POSTROUTING chain.
+
"""
+
def __init__(self, execute=None):
if not execute:
self.execute = _execute
@@ -267,11 +276,12 @@ class IptablesManager(object):
@utils.synchronized('iptables', external=True)
def apply(self):
- """Apply the current in-memory set of iptables rules
+ """Apply the current in-memory set of iptables rules.
This will blow away any rules left over from previous runs of the
same component of Nova, and replace them with our current set of
rules. This happens atomically, thanks to iptables-restore.
+
"""
s = [('iptables', self.ipv4)]
if FLAGS.use_ipv6:
@@ -348,63 +358,63 @@ class IptablesManager(object):
def metadata_forward():
- """Create forwarding rule for metadata"""
- iptables_manager.ipv4['nat'].add_rule("PREROUTING",
- "-s 0.0.0.0/0 -d 169.254.169.254/32 "
- "-p tcp -m tcp --dport 80 -j DNAT "
- "--to-destination %s:%s" % \
+ """Create forwarding rule for metadata."""
+ iptables_manager.ipv4['nat'].add_rule('PREROUTING',
+ '-s 0.0.0.0/0 -d 169.254.169.254/32 '
+ '-p tcp -m tcp --dport 80 -j DNAT '
+ '--to-destination %s:%s' % \
(FLAGS.ec2_dmz_host, FLAGS.ec2_port))
iptables_manager.apply()
def init_host():
- """Basic networking setup goes here"""
+ """Basic networking setup goes here."""
# NOTE(devcamcar): Cloud public SNAT entries and the default
# SNAT rule for outbound traffic.
- iptables_manager.ipv4['nat'].add_rule("snat",
- "-s %s -j SNAT --to-source %s" % \
+ iptables_manager.ipv4['nat'].add_rule('snat',
+ '-s %s -j SNAT --to-source %s' % \
(FLAGS.fixed_range,
FLAGS.routing_source_ip))
- iptables_manager.ipv4['nat'].add_rule("POSTROUTING",
- "-s %s -d %s -j ACCEPT" % \
+ iptables_manager.ipv4['nat'].add_rule('POSTROUTING',
+ '-s %s -d %s -j ACCEPT' % \
(FLAGS.fixed_range, FLAGS.dmz_cidr))
- iptables_manager.ipv4['nat'].add_rule("POSTROUTING",
- "-s %(range)s -d %(range)s "
- "-j ACCEPT" % \
+ iptables_manager.ipv4['nat'].add_rule('POSTROUTING',
+ '-s %(range)s -d %(range)s '
+ '-j ACCEPT' % \
{'range': FLAGS.fixed_range})
iptables_manager.apply()
def bind_floating_ip(floating_ip, check_exit_code=True):
- """Bind ip to public interface"""
+ """Bind ip to public interface."""
_execute('sudo', 'ip', 'addr', 'add', floating_ip,
'dev', FLAGS.public_interface,
check_exit_code=check_exit_code)
def unbind_floating_ip(floating_ip):
- """Unbind a public ip from public interface"""
+ """Unbind a public ip from public interface."""
_execute('sudo', 'ip', 'addr', 'del', floating_ip,
'dev', FLAGS.public_interface)
def ensure_metadata_ip():
- """Sets up local metadata ip"""
+ """Sets up local metadata ip."""
_execute('sudo', 'ip', 'addr', 'add', '169.254.169.254/32',
'scope', 'link', 'dev', 'lo', check_exit_code=False)
def ensure_vlan_forward(public_ip, port, private_ip):
- """Sets up forwarding rules for vlan"""
- iptables_manager.ipv4['filter'].add_rule("FORWARD",
- "-d %s -p udp "
- "--dport 1194 "
- "-j ACCEPT" % private_ip)
- iptables_manager.ipv4['nat'].add_rule("PREROUTING",
- "-d %s -p udp "
- "--dport %s -j DNAT --to %s:1194" %
+ """Sets up forwarding rules for vlan."""
+ iptables_manager.ipv4['filter'].add_rule('FORWARD',
+ '-d %s -p udp '
+ '--dport 1194 '
+ '-j ACCEPT' % private_ip)
+ iptables_manager.ipv4['nat'].add_rule('PREROUTING',
+ '-d %s -p udp '
+ '--dport %s -j DNAT --to %s:1194' %
(public_ip, port, private_ip))
iptables_manager.ipv4['nat'].add_rule("OUTPUT",
"-d %s -p udp "
@@ -414,37 +424,38 @@ def ensure_vlan_forward(public_ip, port, private_ip):
def ensure_floating_forward(floating_ip, fixed_ip):
- """Ensure floating ip forwarding rule"""
+ """Ensure floating ip forwarding rule."""
for chain, rule in floating_forward_rules(floating_ip, fixed_ip):
iptables_manager.ipv4['nat'].add_rule(chain, rule)
iptables_manager.apply()
def remove_floating_forward(floating_ip, fixed_ip):
- """Remove forwarding for floating ip"""
+ """Remove forwarding for floating ip."""
for chain, rule in floating_forward_rules(floating_ip, fixed_ip):
iptables_manager.ipv4['nat'].remove_rule(chain, rule)
iptables_manager.apply()
def floating_forward_rules(floating_ip, fixed_ip):
- return [("PREROUTING", "-d %s -j DNAT --to %s" % (floating_ip, fixed_ip)),
- ("OUTPUT", "-d %s -j DNAT --to %s" % (floating_ip, fixed_ip)),
- ("floating-snat",
- "-s %s -j SNAT --to %s" % (fixed_ip, floating_ip))]
+ return [('PREROUTING', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)),
+ ('OUTPUT', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)),
+ ('floating-snat',
+ '-s %s -j SNAT --to %s' % (fixed_ip, floating_ip))]
def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
- """Create a vlan and bridge unless they already exist"""
+ """Create a vlan and bridge unless they already exist."""
interface = ensure_vlan(vlan_num)
ensure_bridge(bridge, interface, net_attrs)
+@utils.synchronized('ensure_vlan', external=True)
def ensure_vlan(vlan_num):
- """Create a vlan unless it already exists"""
- interface = "vlan%s" % vlan_num
+ """Create a vlan unless it already exists."""
+ interface = 'vlan%s' % vlan_num
if not _device_exists(interface):
- LOG.debug(_("Starting VLAN inteface %s"), interface)
+ LOG.debug(_('Starting VLAN inteface %s'), interface)
_execute('sudo', 'vconfig', 'set_name_type', 'VLAN_PLUS_VID_NO_PAD')
_execute('sudo', 'vconfig', 'add', FLAGS.vlan_interface, vlan_num)
_execute('sudo', 'ip', 'link', 'set', interface, 'up')
@@ -464,12 +475,13 @@ def ensure_bridge(bridge, interface, net_attrs=None):
The code will attempt to move any ips that already exist on the interface
onto the bridge and reset the default gateway if necessary.
+
"""
if not _device_exists(bridge):
- LOG.debug(_("Starting Bridge interface for %s"), interface)
+ LOG.debug(_('Starting Bridge interface for %s'), interface)
_execute('sudo', 'brctl', 'addbr', bridge)
_execute('sudo', 'brctl', 'setfd', bridge, 0)
- # _execute("sudo brctl setageing %s 10" % bridge)
+ # _execute('sudo brctl setageing %s 10' % bridge)
_execute('sudo', 'brctl', 'stp', bridge, 'off')
_execute('sudo', 'ip', 'link', 'set', bridge, 'up')
if net_attrs:
@@ -477,15 +489,15 @@ def ensure_bridge(bridge, interface, net_attrs=None):
# bridge for it to respond to reqests properly
suffix = net_attrs['cidr'].rpartition('/')[2]
out, err = _execute('sudo', 'ip', 'addr', 'add',
- "%s/%s" %
+ '%s/%s' %
(net_attrs['gateway'], suffix),
'brd',
net_attrs['broadcast'],
'dev',
bridge,
check_exit_code=False)
- if err and err != "RTNETLINK answers: File exists\n":
- raise exception.Error("Failed to add ip: %s" % err)
+ if err and err != 'RTNETLINK answers: File exists\n':
+ raise exception.Error('Failed to add ip: %s' % err)
if(FLAGS.use_ipv6):
_execute('sudo', 'ip', '-f', 'inet6', 'addr',
'change', net_attrs['cidr_v6'],
@@ -501,17 +513,17 @@ def ensure_bridge(bridge, interface, net_attrs=None):
# interface, so we move any ips to the bridge
gateway = None
out, err = _execute('sudo', 'route', '-n')
- for line in out.split("\n"):
+ for line in out.split('\n'):
fields = line.split()
- if fields and fields[0] == "0.0.0.0" and fields[-1] == interface:
+ if fields and fields[0] == '0.0.0.0' and fields[-1] == interface:
gateway = fields[1]
_execute('sudo', 'route', 'del', 'default', 'gw', gateway,
'dev', interface, check_exit_code=False)
out, err = _execute('sudo', 'ip', 'addr', 'show', 'dev', interface,
'scope', 'global')
- for line in out.split("\n"):
+ for line in out.split('\n'):
fields = line.split()
- if fields and fields[0] == "inet":
+ if fields and fields[0] == 'inet':
params = fields[1:-1]
_execute(*_ip_bridge_cmd('del', params, fields[-1]))
_execute(*_ip_bridge_cmd('add', params, bridge))
@@ -522,18 +534,18 @@ def ensure_bridge(bridge, interface, net_attrs=None):
if (err and err != "device %s is already a member of a bridge; can't "
"enslave it to bridge %s.\n" % (interface, bridge)):
- raise exception.Error("Failed to add interface: %s" % err)
+ raise exception.Error('Failed to add interface: %s' % err)
- iptables_manager.ipv4['filter'].add_rule("FORWARD",
- "--in-interface %s -j ACCEPT" % \
+ iptables_manager.ipv4['filter'].add_rule('FORWARD',
+ '--in-interface %s -j ACCEPT' % \
bridge)
- iptables_manager.ipv4['filter'].add_rule("FORWARD",
- "--out-interface %s -j ACCEPT" % \
+ iptables_manager.ipv4['filter'].add_rule('FORWARD',
+ '--out-interface %s -j ACCEPT' % \
bridge)
def get_dhcp_leases(context, network_id):
- """Return a network's hosts config in dnsmasq leasefile format"""
+ """Return a network's hosts config in dnsmasq leasefile format."""
hosts = []
for fixed_ip_ref in db.network_get_associated_fixed_ips(context,
network_id):
@@ -542,7 +554,7 @@ def get_dhcp_leases(context, network_id):
def get_dhcp_hosts(context, network_id):
- """Get a string containing a network's hosts config in dhcp-host format"""
+ """Get network's hosts config in dhcp-host format."""
hosts = []
for fixed_ip_ref in db.network_get_associated_fixed_ips(context,
network_id):
@@ -555,10 +567,11 @@ def get_dhcp_hosts(context, network_id):
# aren't reloaded.
@utils.synchronized('dnsmasq_start')
def update_dhcp(context, network_id):
- """(Re)starts a dnsmasq server for a given network
+ """(Re)starts a dnsmasq server for a given network.
+
+ If a dnsmasq instance is already running then send a HUP
+ signal causing it to reload, otherwise spawn a new instance.
- if a dnsmasq instance is already running then send a HUP
- signal causing it to reload, otherwise spawn a new instance
"""
network_ref = db.network_get(context, network_id)
@@ -573,16 +586,16 @@ def update_dhcp(context, network_id):
# if dnsmasq is already running, then tell it to reload
if pid:
- out, _err = _execute('cat', "/proc/%d/cmdline" % pid,
+ out, _err = _execute('cat', '/proc/%d/cmdline' % pid,
check_exit_code=False)
if conffile in out:
try:
_execute('sudo', 'kill', '-HUP', pid)
return
except Exception as exc: # pylint: disable=W0703
- LOG.debug(_("Hupping dnsmasq threw %s"), exc)
+ LOG.debug(_('Hupping dnsmasq threw %s'), exc)
else:
- LOG.debug(_("Pid %d is stale, relaunching dnsmasq"), pid)
+ LOG.debug(_('Pid %d is stale, relaunching dnsmasq'), pid)
# FLAGFILE and DNSMASQ_INTERFACE in env
env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile,
@@ -625,18 +638,18 @@ interface %s
try:
_execute('sudo', 'kill', pid)
except Exception as exc: # pylint: disable=W0703
- LOG.debug(_("killing radvd threw %s"), exc)
+ LOG.debug(_('killing radvd threw %s'), exc)
else:
- LOG.debug(_("Pid %d is stale, relaunching radvd"), pid)
+ LOG.debug(_('Pid %d is stale, relaunching radvd'), pid)
command = _ra_cmd(network_ref)
_execute(*command)
db.network_update(context, network_id,
- {"gateway_v6":
+ {'gateway_v6':
utils.get_my_linklocal(network_ref['bridge'])})
def _host_lease(fixed_ip_ref):
- """Return a host string for an address in leasefile format"""
+ """Return a host string for an address in leasefile format."""
instance_ref = fixed_ip_ref['instance']
if instance_ref['updated_at']:
timestamp = instance_ref['updated_at']
@@ -645,39 +658,39 @@ def _host_lease(fixed_ip_ref):
seconds_since_epoch = calendar.timegm(timestamp.utctimetuple())
- return "%d %s %s %s *" % (seconds_since_epoch + FLAGS.dhcp_lease_time,
+ return '%d %s %s %s *' % (seconds_since_epoch + FLAGS.dhcp_lease_time,
instance_ref['mac_address'],
fixed_ip_ref['address'],
instance_ref['hostname'] or '*')
def _host_dhcp(fixed_ip_ref):
- """Return a host string for an address in dhcp-host format"""
+ """Return a host string for an address in dhcp-host format."""
instance_ref = fixed_ip_ref['instance']
- return "%s,%s.%s,%s" % (instance_ref['mac_address'],
+ return '%s,%s.%s,%s' % (instance_ref['mac_address'],
instance_ref['hostname'],
FLAGS.dhcp_domain,
fixed_ip_ref['address'])
def _execute(*cmd, **kwargs):
- """Wrapper around utils._execute for fake_network"""
+ """Wrapper around utils._execute for fake_network."""
if FLAGS.fake_network:
- LOG.debug("FAKE NET: %s", " ".join(map(str, cmd)))
- return "fake", 0
+ LOG.debug('FAKE NET: %s', ' '.join(map(str, cmd)))
+ return 'fake', 0
else:
return utils.execute(*cmd, **kwargs)
def _device_exists(device):
- """Check if ethernet device exists"""
+ """Check if ethernet device exists."""
(_out, err) = _execute('ip', 'link', 'show', 'dev', device,
check_exit_code=False)
return not err
def _dnsmasq_cmd(net):
- """Builds dnsmasq command"""
+ """Builds dnsmasq command."""
cmd = ['sudo', '-E', 'dnsmasq',
'--strict-order',
'--bind-interfaces',
@@ -687,6 +700,7 @@ def _dnsmasq_cmd(net):
'--listen-address=%s' % net['gateway'],
'--except-interface=lo',
'--dhcp-range=%s,static,120s' % net['dhcp_start'],
+ '--dhcp-lease-max=%s' % IP(net['cidr']).len(),
'--dhcp-hostsfile=%s' % _dhcp_file(net['bridge'], 'conf'),
'--dhcp-script=%s' % FLAGS.dhcpbridge,
'--leasefile-ro']
@@ -696,7 +710,7 @@ def _dnsmasq_cmd(net):
def _ra_cmd(net):
- """Builds radvd command"""
+ """Builds radvd command."""
cmd = ['sudo', '-E', 'radvd',
# '-u', 'nobody',
'-C', '%s' % _ra_file(net['bridge'], 'conf'),
@@ -705,44 +719,43 @@ def _ra_cmd(net):
def _stop_dnsmasq(network):
- """Stops the dnsmasq instance for a given network"""
+ """Stops the dnsmasq instance for a given network."""
pid = _dnsmasq_pid_for(network)
if pid:
try:
_execute('sudo', 'kill', '-TERM', pid)
except Exception as exc: # pylint: disable=W0703
- LOG.debug(_("Killing dnsmasq threw %s"), exc)
+ LOG.debug(_('Killing dnsmasq threw %s'), exc)
def _dhcp_file(bridge, kind):
- """Return path to a pid, leases or conf file for a bridge"""
-
+ """Return path to a pid, leases or conf file for a bridge."""
if not os.path.exists(FLAGS.networks_path):
os.makedirs(FLAGS.networks_path)
- return os.path.abspath("%s/nova-%s.%s" % (FLAGS.networks_path,
+ return os.path.abspath('%s/nova-%s.%s' % (FLAGS.networks_path,
bridge,
kind))
def _ra_file(bridge, kind):
- """Return path to a pid or conf file for a bridge"""
+ """Return path to a pid or conf file for a bridge."""
if not os.path.exists(FLAGS.networks_path):
os.makedirs(FLAGS.networks_path)
- return os.path.abspath("%s/nova-ra-%s.%s" % (FLAGS.networks_path,
+ return os.path.abspath('%s/nova-ra-%s.%s' % (FLAGS.networks_path,
bridge,
kind))
def _dnsmasq_pid_for(bridge):
- """Returns the pid for prior dnsmasq instance for a bridge
+ """Returns the pid for prior dnsmasq instance for a bridge.
- Returns None if no pid file exists
+ Returns None if no pid file exists.
- If machine has rebooted pid might be incorrect (caller should check)
- """
+ If machine has rebooted pid might be incorrect (caller should check).
+ """
pid_file = _dhcp_file(bridge, 'pid')
if os.path.exists(pid_file):
@@ -751,13 +764,13 @@ def _dnsmasq_pid_for(bridge):
def _ra_pid_for(bridge):
- """Returns the pid for prior radvd instance for a bridge
+ """Returns the pid for prior radvd instance for a bridge.
- Returns None if no pid file exists
+ Returns None if no pid file exists.
- If machine has rebooted pid might be incorrect (caller should check)
- """
+ If machine has rebooted pid might be incorrect (caller should check).
+ """
pid_file = _ra_file(bridge, 'pid')
if os.path.exists(pid_file):
@@ -766,8 +779,7 @@ def _ra_pid_for(bridge):
def _ip_bridge_cmd(action, params, device):
- """Build commands to add/del ips to bridges/devices"""
-
+ """Build commands to add/del ips to bridges/devices."""
cmd = ['sudo', 'ip', 'addr', action]
cmd.extend(params)
cmd.extend(['dev', device])
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 0dd7f2360..5a6fdde5a 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -16,8 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""
-Network Hosts are responsible for allocating ips and setting up network.
+"""Network Hosts are responsible for allocating ips and setting up network.
There are multiple backend drivers that handle specific types of networking
topologies. All of the network commands are issued to a subclass of
@@ -61,6 +60,8 @@ from nova import rpc
LOG = logging.getLogger("nova.network.manager")
+
+
FLAGS = flags.FLAGS
flags.DEFINE_string('flat_network_bridge', 'br100',
'Bridge for simple network instances')
@@ -111,7 +112,9 @@ class NetworkManager(manager.SchedulerDependentManager):
"""Implements common network manager functionality.
This class must be subclassed to support specific topologies.
+
"""
+
timeout_fixed_ips = True
def __init__(self, network_driver=None, *args, **kwargs):
@@ -122,9 +125,7 @@ class NetworkManager(manager.SchedulerDependentManager):
*args, **kwargs)
def init_host(self):
- """Do any initialization that needs to be run if this is a
- standalone service.
- """
+ """Do any initialization for a standalone service."""
self.driver.init_host()
self.driver.ensure_metadata_ip()
# Set up networking for the projects for which we're already
@@ -154,11 +155,11 @@ class NetworkManager(manager.SchedulerDependentManager):
self.host,
time)
if num:
- LOG.debug(_("Dissassociated %s stale fixed ip(s)"), num)
+ LOG.debug(_('Dissassociated %s stale fixed ip(s)'), num)
def set_network_host(self, context, network_id):
"""Safely sets the host of the network."""
- LOG.debug(_("setting network host"), context=context)
+ LOG.debug(_('setting network host'), context=context)
host = self.db.network_set_host(context,
network_id,
self.host)
@@ -224,39 +225,39 @@ class NetworkManager(manager.SchedulerDependentManager):
def lease_fixed_ip(self, context, mac, address):
"""Called by dhcp-bridge when ip is leased."""
- LOG.debug(_("Leasing IP %s"), address, context=context)
+ LOG.debug(_('Leasing IP %s'), address, context=context)
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
instance_ref = fixed_ip_ref['instance']
if not instance_ref:
- raise exception.Error(_("IP %s leased that isn't associated") %
+ raise exception.Error(_('IP %s leased that is not associated') %
address)
if instance_ref['mac_address'] != mac:
inst_addr = instance_ref['mac_address']
- raise exception.Error(_("IP %(address)s leased to bad"
- " mac %(inst_addr)s vs %(mac)s") % locals())
+ raise exception.Error(_('IP %(address)s leased to bad mac'
+ ' %(inst_addr)s vs %(mac)s') % locals())
now = datetime.datetime.utcnow()
self.db.fixed_ip_update(context,
fixed_ip_ref['address'],
{'leased': True,
'updated_at': now})
if not fixed_ip_ref['allocated']:
- LOG.warn(_("IP %s leased that was already deallocated"), address,
+ LOG.warn(_('IP %s leased that was already deallocated'), address,
context=context)
def release_fixed_ip(self, context, mac, address):
"""Called by dhcp-bridge when ip is released."""
- LOG.debug(_("Releasing IP %s"), address, context=context)
+ LOG.debug(_('Releasing IP %s'), address, context=context)
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
instance_ref = fixed_ip_ref['instance']
if not instance_ref:
- raise exception.Error(_("IP %s released that isn't associated") %
+ raise exception.Error(_('IP %s released that is not associated') %
address)
if instance_ref['mac_address'] != mac:
inst_addr = instance_ref['mac_address']
- raise exception.Error(_("IP %(address)s released from"
- " bad mac %(inst_addr)s vs %(mac)s") % locals())
+ raise exception.Error(_('IP %(address)s released from bad mac'
+ ' %(inst_addr)s vs %(mac)s') % locals())
if not fixed_ip_ref['leased']:
- LOG.warn(_("IP %s released that was not leased"), address,
+ LOG.warn(_('IP %s released that was not leased'), address,
context=context)
self.db.fixed_ip_update(context,
fixed_ip_ref['address'],
@@ -286,8 +287,8 @@ class NetworkManager(manager.SchedulerDependentManager):
return self.set_network_host(context, network_ref['id'])
host = rpc.call(context,
FLAGS.network_topic,
- {"method": "set_network_host",
- "args": {"network_id": network_ref['id']}})
+ {'method': 'set_network_host',
+ 'args': {'network_id': network_ref['id']}})
return host
def create_networks(self, context, cidr, num_networks, network_size,
@@ -302,7 +303,7 @@ class NetworkManager(manager.SchedulerDependentManager):
start = index * network_size
start_v6 = index * network_size_v6
significant_bits = 32 - int(math.log(network_size, 2))
- cidr = "%s/%s" % (fixed_net[start], significant_bits)
+ cidr = '%s/%s' % (fixed_net[start], significant_bits)
project_net = IPy.IP(cidr)
net = {}
net['bridge'] = FLAGS.flat_network_bridge
@@ -313,13 +314,13 @@ class NetworkManager(manager.SchedulerDependentManager):
net['broadcast'] = str(project_net.broadcast())
net['dhcp_start'] = str(project_net[2])
if num_networks > 1:
- net['label'] = "%s_%d" % (label, count)
+ net['label'] = '%s_%d' % (label, count)
else:
net['label'] = label
count += 1
if(FLAGS.use_ipv6):
- cidr_v6 = "%s/%s" % (fixed_net_v6[start_v6],
+ cidr_v6 = '%s/%s' % (fixed_net_v6[start_v6],
significant_bits_v6)
net['cidr_v6'] = cidr_v6
project_net_v6 = IPy.IP(cidr_v6)
@@ -386,13 +387,13 @@ class FlatManager(NetworkManager):
Metadata forwarding must be handled by the gateway, and since nova does
not do any setup in this mode, it must be done manually. Requests to
169.254.169.254 port 80 will need to be forwarded to the api server.
+
"""
+
timeout_fixed_ips = False
def init_host(self):
- """Do any initialization that needs to be run if this is a
- standalone service.
- """
+ """Do any initialization for a standalone service."""
#Fix for bug 723298 - do not call init_host on superclass
#Following code has been copied for NetworkManager.init_host
ctxt = context.get_admin_context()
@@ -433,12 +434,11 @@ class FlatDHCPManager(NetworkManager):
FlatDHCPManager will start up one dhcp server to give out addresses.
It never injects network settings into the guest. Otherwise it behaves
like FlatDHCPManager.
+
"""
def init_host(self):
- """Do any initialization that needs to be run if this is a
- standalone service.
- """
+ """Do any initialization for a standalone service."""
super(FlatDHCPManager, self).init_host()
self.driver.metadata_forward()
@@ -490,12 +490,11 @@ class VlanManager(NetworkManager):
A dhcp server is run for each subnet, so each project will have its own.
For this mode to be useful, each project will need a vpn to access the
instances in its subnet.
+
"""
def init_host(self):
- """Do any initialization that needs to be run if this is a
- standalone service.
- """
+ """Do any initialization for a standalone service."""
super(VlanManager, self).init_host()
self.driver.metadata_forward()
@@ -566,7 +565,7 @@ class VlanManager(NetworkManager):
net['vlan'] = vlan
net['bridge'] = 'br%s' % vlan
if(FLAGS.use_ipv6):
- cidr_v6 = "%s/%s" % (fixed_net_v6[start_v6],
+ cidr_v6 = '%s/%s' % (fixed_net_v6[start_v6],
significant_bits_v6)
net['cidr_v6'] = cidr_v6
@@ -600,8 +599,8 @@ class VlanManager(NetworkManager):
return self.set_network_host(context, network_ref['id'])
host = rpc.call(context,
FLAGS.network_topic,
- {"method": "set_network_host",
- "args": {"network_id": network_ref['id']}})
+ {'method': 'set_network_host',
+ 'args': {'network_id': network_ref['id']}})
return host
diff --git a/nova/network/vmwareapi_net.py b/nova/network/vmwareapi_net.py
index 9b2db7b8f..373060add 100644
--- a/nova/network/vmwareapi_net.py
+++ b/nova/network/vmwareapi_net.py
@@ -15,9 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""
-Implements vlans for vmwareapi.
-"""
+"""Implements vlans for vmwareapi."""
from nova import db
from nova import exception
@@ -27,8 +25,10 @@ from nova import utils
from nova.virt.vmwareapi_conn import VMWareAPISession
from nova.virt.vmwareapi import network_utils
+
LOG = logging.getLogger("nova.network.vmwareapi_net")
+
FLAGS = flags.FLAGS
flags.DEFINE_string('vlan_interface', 'vmnic0',
'Physical network adapter name in VMware ESX host for '
@@ -42,10 +42,10 @@ def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
host_username = FLAGS.vmwareapi_host_username
host_password = FLAGS.vmwareapi_host_password
if not host_ip or host_username is None or host_password is None:
- raise Exception(_("Must specify vmwareapi_host_ip,"
- "vmwareapi_host_username "
- "and vmwareapi_host_password to use"
- "connection_type=vmwareapi"))
+ raise Exception(_('Must specify vmwareapi_host_ip, '
+ 'vmwareapi_host_username '
+ 'and vmwareapi_host_password to use '
+ 'connection_type=vmwareapi'))
session = VMWareAPISession(host_ip, host_username, host_password,
FLAGS.vmwareapi_api_retry_count)
vlan_interface = FLAGS.vlan_interface
diff --git a/nova/network/xenapi_net.py b/nova/network/xenapi_net.py
index 8c22a7d4b..709ef7f34 100644
--- a/nova/network/xenapi_net.py
+++ b/nova/network/xenapi_net.py
@@ -15,9 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""
-Implements vlans, bridges, and iptables rules using linux utilities.
-"""
+"""Implements vlans, bridges, and iptables rules using linux utilities."""
import os
@@ -26,22 +24,24 @@ from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
-from nova.virt.xenapi_conn import XenAPISession
+from nova.virt import xenapi_conn
from nova.virt.xenapi import network_utils
+
LOG = logging.getLogger("nova.xenapi_net")
+
FLAGS = flags.FLAGS
def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
"""Create a vlan and bridge unless they already exist."""
# Open xenapi session
- LOG.debug("ENTERING ensure_vlan_bridge in xenapi net")
+ LOG.debug('ENTERING ensure_vlan_bridge in xenapi net')
url = FLAGS.xenapi_connection_url
username = FLAGS.xenapi_connection_username
password = FLAGS.xenapi_connection_password
- session = XenAPISession(url, username, password)
+ session = xenapi_conn.XenAPISession(url, username, password)
# Check whether bridge already exists
# Retrieve network whose name_label is "bridge"
network_ref = network_utils.NetworkHelper.find_network_with_name_label(
@@ -50,14 +50,14 @@ def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
if network_ref is None:
# If bridge does not exists
# 1 - create network
- description = "network for nova bridge %s" % bridge
+ description = 'network for nova bridge %s' % bridge
network_rec = {'name_label': bridge,
'name_description': description,
'other_config': {}}
network_ref = session.call_xenapi('network.create', network_rec)
# 2 - find PIF for VLAN
- expr = 'field "device" = "%s" and \
- field "VLAN" = "-1"' % FLAGS.vlan_interface
+ expr = "field 'device' = '%s' and \
+ field 'VLAN' = '-1'" % FLAGS.vlan_interface
pifs = session.call_xenapi('PIF.get_all_records_where', expr)
pif_ref = None
# Multiple PIF are ok: we are dealing with a pool
diff --git a/nova/notifier/__init__.py b/nova/notifier/__init__.py
new file mode 100644
index 000000000..482d54e4f
--- /dev/null
+++ b/nova/notifier/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
diff --git a/nova/notifier/api.py b/nova/notifier/api.py
new file mode 100644
index 000000000..a3e7a039e
--- /dev/null
+++ b/nova/notifier/api.py
@@ -0,0 +1,83 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.import datetime
+
+import datetime
+import uuid
+
+from nova import flags
+from nova import utils
+
+
+FLAGS = flags.FLAGS
+
+flags.DEFINE_string('default_notification_level', 'INFO',
+ 'Default notification level for outgoing notifications')
+
+WARN = 'WARN'
+INFO = 'INFO'
+ERROR = 'ERROR'
+CRITICAL = 'CRITICAL'
+DEBUG = 'DEBUG'
+
+log_levels = (DEBUG, WARN, INFO, ERROR, CRITICAL)
+
+
+class BadPriorityException(Exception):
+ pass
+
+
+def notify(publisher_id, event_type, priority, payload):
+ """
+ Sends a notification using the specified driver
+
+ Notify parameters:
+
+ publisher_id - the source worker_type.host of the message
+ event_type - the literal type of event (ex. Instance Creation)
+ priority - patterned after the enumeration of Python logging levels in
+ the set (DEBUG, WARN, INFO, ERROR, CRITICAL)
+ payload - A python dictionary of attributes
+
+ Outgoing message format includes the above parameters, and appends the
+ following:
+
+ message_id - a UUID representing the id for this notification
+ timestamp - the GMT timestamp the notification was sent at
+
+ The composite message will be constructed as a dictionary of the above
+ attributes, which will then be sent via the transport mechanism defined
+ by the driver.
+
+ Message example:
+
+ {'message_id': str(uuid.uuid4()),
+ 'publisher_id': 'compute.host1',
+ 'timestamp': datetime.datetime.utcnow(),
+ 'priority': 'WARN',
+ 'event_type': 'compute.create_instance',
+ 'payload': {'instance_id': 12, ... }}
+
+ """
+ if priority not in log_levels:
+ raise BadPriorityException(
+ _('%s not in valid priorities' % priority))
+ driver = utils.import_object(FLAGS.notification_driver)
+ msg = dict(message_id=str(uuid.uuid4()),
+ publisher_id=publisher_id,
+ event_type=event_type,
+ priority=priority,
+ payload=payload,
+ timestamp=str(datetime.datetime.utcnow()))
+ driver.notify(msg)
diff --git a/nova/notifier/log_notifier.py b/nova/notifier/log_notifier.py
new file mode 100644
index 000000000..25dfc693b
--- /dev/null
+++ b/nova/notifier/log_notifier.py
@@ -0,0 +1,34 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+
+from nova import flags
+from nova import log as logging
+
+
+FLAGS = flags.FLAGS
+
+
+def notify(message):
+ """Notifies the recipient of the desired event given the model.
+ Log notifications using nova's default logging system"""
+
+ priority = message.get('priority',
+ FLAGS.default_notification_level)
+ priority = priority.lower()
+ logger = logging.getLogger(
+ 'nova.notification.%s' % message['event_type'])
+ getattr(logger, priority)(json.dumps(message))
diff --git a/nova/notifier/no_op_notifier.py b/nova/notifier/no_op_notifier.py
new file mode 100644
index 000000000..029710505
--- /dev/null
+++ b/nova/notifier/no_op_notifier.py
@@ -0,0 +1,19 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+def notify(message):
+ """Notifies the recipient of the desired event given the model"""
+ pass
diff --git a/nova/notifier/rabbit_notifier.py b/nova/notifier/rabbit_notifier.py
new file mode 100644
index 000000000..d46670b58
--- /dev/null
+++ b/nova/notifier/rabbit_notifier.py
@@ -0,0 +1,36 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import nova.context
+
+from nova import flags
+from nova import rpc
+
+
+FLAGS = flags.FLAGS
+
+flags.DEFINE_string('notification_topic', 'notifications',
+ 'RabbitMQ topic used for Nova notifications')
+
+
+def notify(message):
+ """Sends a notification to the RabbitMQ"""
+ context = nova.context.get_admin_context()
+ priority = message.get('priority',
+ FLAGS.default_notification_level)
+ priority = priority.lower()
+ topic = '%s.%s' % (FLAGS.notification_topic, priority)
+ rpc.cast(context, topic, message)
diff --git a/nova/quota.py b/nova/quota.py
index d8b5d9a93..58766e846 100644
--- a/nova/quota.py
+++ b/nova/quota.py
@@ -28,6 +28,8 @@ flags.DEFINE_integer('quota_instances', 10,
'number of instances allowed per project')
flags.DEFINE_integer('quota_cores', 20,
'number of instance cores allowed per project')
+flags.DEFINE_integer('quota_ram', 50 * 1024,
+ 'megabytes of instance ram allowed per project')
flags.DEFINE_integer('quota_volumes', 10,
'number of volumes allowed per project')
flags.DEFINE_integer('quota_gigabytes', 1000,
@@ -44,82 +46,116 @@ flags.DEFINE_integer('quota_max_injected_file_path_bytes', 255,
'number of bytes allowed per injected file path')
-def get_quota(context, project_id):
- rval = {'instances': FLAGS.quota_instances,
- 'cores': FLAGS.quota_cores,
- 'volumes': FLAGS.quota_volumes,
- 'gigabytes': FLAGS.quota_gigabytes,
- 'floating_ips': FLAGS.quota_floating_ips,
- 'metadata_items': FLAGS.quota_metadata_items}
-
- try:
- quota = db.quota_get(context, project_id)
- for key in rval.keys():
- if quota[key] is not None:
- rval[key] = quota[key]
- except exception.NotFound:
- pass
+def _get_default_quotas():
+ defaults = {
+ 'instances': FLAGS.quota_instances,
+ 'cores': FLAGS.quota_cores,
+ 'ram': FLAGS.quota_ram,
+ 'volumes': FLAGS.quota_volumes,
+ 'gigabytes': FLAGS.quota_gigabytes,
+ 'floating_ips': FLAGS.quota_floating_ips,
+ 'metadata_items': FLAGS.quota_metadata_items,
+ 'injected_files': FLAGS.quota_max_injected_files,
+ 'injected_file_content_bytes':
+ FLAGS.quota_max_injected_file_content_bytes,
+ }
+ # -1 in the quota flags means unlimited
+ for key in defaults.keys():
+ if defaults[key] == -1:
+ defaults[key] = None
+ return defaults
+
+
+def get_project_quotas(context, project_id):
+ rval = _get_default_quotas()
+ quota = db.quota_get_all_by_project(context, project_id)
+ for key in rval.keys():
+ if key in quota:
+ rval[key] = quota[key]
return rval
-def allowed_instances(context, num_instances, instance_type):
- """Check quota and return min(num_instances, allowed_instances)."""
+def _get_request_allotment(requested, used, quota):
+ if quota is None:
+ return requested
+ return quota - used
+
+
+def allowed_instances(context, requested_instances, instance_type):
+ """Check quota and return min(requested_instances, allowed_instances)."""
project_id = context.project_id
context = context.elevated()
- used_instances, used_cores = db.instance_data_get_for_project(context,
- project_id)
- quota = get_quota(context, project_id)
- allowed_instances = quota['instances'] - used_instances
- allowed_cores = quota['cores'] - used_cores
- num_cores = num_instances * instance_type['vcpus']
+ requested_cores = requested_instances * instance_type['vcpus']
+ requested_ram = requested_instances * instance_type['memory_mb']
+ usage = db.instance_data_get_for_project(context, project_id)
+ used_instances, used_cores, used_ram = usage
+ quota = get_project_quotas(context, project_id)
+ allowed_instances = _get_request_allotment(requested_instances,
+ used_instances,
+ quota['instances'])
+ allowed_cores = _get_request_allotment(requested_cores, used_cores,
+ quota['cores'])
+ allowed_ram = _get_request_allotment(requested_ram, used_ram, quota['ram'])
allowed_instances = min(allowed_instances,
- int(allowed_cores // instance_type['vcpus']))
- return min(num_instances, allowed_instances)
+ allowed_cores // instance_type['vcpus'],
+ allowed_ram // instance_type['memory_mb'])
+ return min(requested_instances, allowed_instances)
-def allowed_volumes(context, num_volumes, size):
- """Check quota and return min(num_volumes, allowed_volumes)."""
+def allowed_volumes(context, requested_volumes, size):
+ """Check quota and return min(requested_volumes, allowed_volumes)."""
project_id = context.project_id
context = context.elevated()
+ size = int(size)
+ requested_gigabytes = requested_volumes * size
used_volumes, used_gigabytes = db.volume_data_get_for_project(context,
project_id)
- quota = get_quota(context, project_id)
- allowed_volumes = quota['volumes'] - used_volumes
- allowed_gigabytes = quota['gigabytes'] - used_gigabytes
- size = int(size)
- num_gigabytes = num_volumes * size
+ quota = get_project_quotas(context, project_id)
+ allowed_volumes = _get_request_allotment(requested_volumes, used_volumes,
+ quota['volumes'])
+ allowed_gigabytes = _get_request_allotment(requested_gigabytes,
+ used_gigabytes,
+ quota['gigabytes'])
allowed_volumes = min(allowed_volumes,
int(allowed_gigabytes // size))
- return min(num_volumes, allowed_volumes)
+ return min(requested_volumes, allowed_volumes)
-def allowed_floating_ips(context, num_floating_ips):
- """Check quota and return min(num_floating_ips, allowed_floating_ips)."""
+def allowed_floating_ips(context, requested_floating_ips):
+ """Check quota and return min(requested, allowed) floating ips."""
project_id = context.project_id
context = context.elevated()
used_floating_ips = db.floating_ip_count_by_project(context, project_id)
- quota = get_quota(context, project_id)
- allowed_floating_ips = quota['floating_ips'] - used_floating_ips
- return min(num_floating_ips, allowed_floating_ips)
+ quota = get_project_quotas(context, project_id)
+ allowed_floating_ips = _get_request_allotment(requested_floating_ips,
+ used_floating_ips,
+ quota['floating_ips'])
+ return min(requested_floating_ips, allowed_floating_ips)
-def allowed_metadata_items(context, num_metadata_items):
- """Check quota; return min(num_metadata_items,allowed_metadata_items)."""
- project_id = context.project_id
- context = context.elevated()
- quota = get_quota(context, project_id)
- num_allowed_metadata_items = quota['metadata_items']
- return min(num_metadata_items, num_allowed_metadata_items)
+def _calculate_simple_quota(context, resource, requested):
+ """Check quota for resource; return min(requested, allowed)."""
+ quota = get_project_quotas(context, context.project_id)
+ allowed = _get_request_allotment(requested, 0, quota[resource])
+ return min(requested, allowed)
+
+
+def allowed_metadata_items(context, requested_metadata_items):
+ """Return the number of metadata items allowed."""
+ return _calculate_simple_quota(context, 'metadata_items',
+ requested_metadata_items)
-def allowed_injected_files(context):
+def allowed_injected_files(context, requested_injected_files):
"""Return the number of injected files allowed."""
- return FLAGS.quota_max_injected_files
+ return _calculate_simple_quota(context, 'injected_files',
+ requested_injected_files)
-def allowed_injected_file_content_bytes(context):
+def allowed_injected_file_content_bytes(context, requested_bytes):
"""Return the number of bytes allowed per injected file content."""
- return FLAGS.quota_max_injected_file_content_bytes
+ resource = 'injected_file_content_bytes'
+ return _calculate_simple_quota(context, resource, requested_bytes)
def allowed_injected_file_path_bytes(context):
diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py
index 816ae5513..55f8e0a6d 100644
--- a/nova/scheduler/api.py
+++ b/nova/scheduler/api.py
@@ -81,6 +81,12 @@ def get_zone_capabilities(context):
return _call_scheduler('get_zone_capabilities', context=context)
+def select(context, specs=None):
+ """Returns a list of hosts."""
+ return _call_scheduler('select', context=context,
+ params={"specs": specs})
+
+
def update_service_capabilities(context, service_name, host, capabilities):
"""Send an update to all the scheduler services informing them
of the capabilities of this service."""
@@ -105,6 +111,45 @@ def _process(func, zone):
return func(nova, zone)
+def call_zone_method(context, method, errors_to_ignore=None, *args, **kwargs):
+ """Returns a list of (zone, call_result) objects."""
+ if not isinstance(errors_to_ignore, (list, tuple)):
+ # This will also handle the default None
+ errors_to_ignore = [errors_to_ignore]
+
+ pool = greenpool.GreenPool()
+ results = []
+ for zone in db.zone_get_all(context):
+ try:
+ nova = novaclient.OpenStack(zone.username, zone.password,
+ zone.api_url)
+ nova.authenticate()
+ except novaclient.exceptions.BadRequest, e:
+ url = zone.api_url
+ LOG.warn(_("Failed request to zone; URL=%(url)s: %(e)s")
+ % locals())
+ #TODO (dabo) - add logic for failure counts per zone,
+ # with escalation after a given number of failures.
+ continue
+ zone_method = getattr(nova.zones, method)
+
+ def _error_trap(*args, **kwargs):
+ try:
+ return zone_method(*args, **kwargs)
+ except Exception as e:
+ if type(e) in errors_to_ignore:
+ return None
+ # TODO (dabo) - want to be able to re-raise here.
+ # Returning a string now; raising was causing issues.
+ # raise e
+ return "ERROR", "%s" % e
+
+ res = pool.spawn(_error_trap, *args, **kwargs)
+ results.append((zone, res))
+ pool.waitall()
+ return [(zone.id, res.wait()) for zone, res in results]
+
+
def child_zone_helper(zone_list, func):
"""Fire off a command to each zone in the list.
The return is [novaclient return objects] from each child zone.
diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py
index ed8e65c77..483f3225c 100644
--- a/nova/scheduler/host_filter.py
+++ b/nova/scheduler/host_filter.py
@@ -96,8 +96,8 @@ class FlavorFilter(HostFilter):
selected_hosts = []
for host, services in zone_manager.service_states.iteritems():
capabilities = services.get('compute', {})
- host_ram_mb = capabilities['host_memory']['free']
- disk_bytes = capabilities['disk']['available']
+ host_ram_mb = capabilities['host_memory_free']
+ disk_bytes = capabilities['disk_available']
if host_ram_mb >= instance_type['memory_mb'] and \
disk_bytes >= instance_type['local_gb']:
selected_hosts.append((host, capabilities))
@@ -106,16 +106,16 @@ class FlavorFilter(HostFilter):
#host entries (currently) are like:
# {'host_name-description': 'Default install of XenServer',
# 'host_hostname': 'xs-mini',
-# 'host_memory': {'total': 8244539392,
-# 'overhead': 184225792,
-# 'free': 3868327936,
-# 'free-computed': 3840843776},
+# 'host_memory_total': 8244539392,
+# 'host_memory_overhead': 184225792,
+# 'host_memory_free': 3868327936,
+# 'host_memory_free_computed': 3840843776},
# 'host_other-config': {},
# 'host_ip_address': '192.168.1.109',
# 'host_cpu_info': {},
-# 'disk': {'available': 32954957824,
-# 'total': 50394562560,
-# 'used': 17439604736},
+# 'disk_available': 32954957824,
+# 'disk_total': 50394562560,
+# 'disk_used': 17439604736},
# 'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f',
# 'host_name-label': 'xs-mini'}
@@ -221,8 +221,8 @@ class JsonFilter(HostFilter):
required_ram = instance_type['memory_mb']
required_disk = instance_type['local_gb']
query = ['and',
- ['>=', '$compute.host_memory.free', required_ram],
- ['>=', '$compute.disk.available', required_disk]
+ ['>=', '$compute.host_memory_free', required_ram],
+ ['>=', '$compute.disk_available', required_disk]
]
return (self._full_name(), json.dumps(query))
diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py
new file mode 100644
index 000000000..b3d230bd2
--- /dev/null
+++ b/nova/scheduler/zone_aware_scheduler.py
@@ -0,0 +1,119 @@
+# Copyright (c) 2011 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+The Zone Aware Scheduler is a base class Scheduler for creating instances
+across zones. There are two expansion points to this class for:
+1. Assigning Weights to hosts for requested instances
+2. Filtering Hosts based on required instance capabilities
+"""
+
+import operator
+
+from nova import log as logging
+from nova.scheduler import api
+from nova.scheduler import driver
+
+LOG = logging.getLogger('nova.scheduler.zone_aware_scheduler')
+
+
+class ZoneAwareScheduler(driver.Scheduler):
+ """Base class for creating Zone Aware Schedulers."""
+
+ def _call_zone_method(self, context, method, specs):
+ """Call novaclient zone method. Broken out for testing."""
+ return api.call_zone_method(context, method, specs=specs)
+
+ def schedule_run_instance(self, context, topic='compute', specs={},
+ *args, **kwargs):
+ """This method is called from nova.compute.api to provision
+ an instance. However we need to look at the parameters being
+ passed in to see if this is a request to:
+ 1. Create a Build Plan and then provision, or
+ 2. Use the Build Plan information in the request parameters
+ to simply create the instance (either in this zone or
+ a child zone)."""
+
+ if 'blob' in specs:
+ return self.provision_instance(context, topic, specs)
+
+ # Create build plan and provision ...
+ build_plan = self.select(context, specs)
+ for item in build_plan:
+ self.provision_instance(context, topic, item)
+
+ def provision_instance(context, topic, item):
+ """Create the requested instance in this Zone or a child zone."""
+ pass
+
+ def select(self, context, *args, **kwargs):
+ """Select returns a list of weights and zone/host information
+ corresponding to the best hosts to service the request. Any
+ child zone information has been encrypted so as not to reveal
+ anything about the children."""
+ return self._schedule(context, "compute", *args, **kwargs)
+
+ def schedule(self, context, topic, *args, **kwargs):
+ """The schedule() contract requires we return the one
+ best-suited host for this request.
+ """
+ res = self._schedule(context, topic, *args, **kwargs)
+ # TODO(sirp): should this be a host object rather than a weight-dict?
+ if not res:
+ raise driver.NoValidHost(_('No hosts were available'))
+ return res[0]
+
+ def _schedule(self, context, topic, *args, **kwargs):
+ """Returns a list of hosts that meet the required specs,
+ ordered by their fitness.
+ """
+
+ #TODO(sandy): extract these from args.
+ num_instances = 1
+ specs = {}
+
+ # Filter local hosts based on requirements ...
+ host_list = self.filter_hosts(num_instances, specs)
+
+ # then weigh the selected hosts.
+ # weighted = [{weight=weight, name=hostname}, ...]
+ weighted = self.weigh_hosts(num_instances, specs, host_list)
+
+ # Next, tack on the best weights from the child zones ...
+ child_results = self._call_zone_method(context, "select",
+ specs=specs)
+ for child_zone, result in child_results:
+ for weighting in result:
+ # Remember the child_zone so we can get back to
+ # it later if needed. This implicitly builds a zone
+ # path structure.
+ host_dict = {
+ "weight": weighting["weight"],
+ "child_zone": child_zone,
+ "child_blob": weighting["blob"]}
+ weighted.append(host_dict)
+
+ weighted.sort(key=operator.itemgetter('weight'))
+ return weighted
+
+ def filter_hosts(self, num, specs):
+ """Derived classes must override this method and return
+ a list of hosts in [(hostname, capability_dict)] format."""
+ raise NotImplemented()
+
+ def weigh_hosts(self, num, specs, hosts):
+ """Derived classes must override this method and return
+ a lists of hosts in [{weight, hostname}] format."""
+ raise NotImplemented()
diff --git a/nova/service.py b/nova/service.py
index 2532b9df2..ab1238c3b 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -240,6 +240,10 @@ class WsgiService(object):
def wait(self):
self.wsgi_app.wait()
+ def get_socket_info(self, api_name):
+ """Returns the (host, port) that an API was started on."""
+ return self.wsgi_app.socket_info[api_name]
+
class ApiService(WsgiService):
"""Class for our nova-api service."""
@@ -318,8 +322,10 @@ def _run_wsgi(paste_config_file, apis):
logging.debug(_('App Config: %(api)s\n%(config)r') % locals())
logging.info(_('Running %s API'), api)
app = wsgi.load_paste_app(paste_config_file, api)
- apps.append((app, getattr(FLAGS, '%s_listen_port' % api),
- getattr(FLAGS, '%s_listen' % api)))
+ apps.append((app,
+ getattr(FLAGS, '%s_listen_port' % api),
+ getattr(FLAGS, '%s_listen' % api),
+ api))
if len(apps) == 0:
logging.error(_('No known API applications configured in %s.'),
paste_config_file)
diff --git a/nova/tests/api/openstack/extensions/foxinsocks.py b/nova/tests/api/openstack/extensions/foxinsocks.py
index 0860b51ac..dbdd0928a 100644
--- a/nova/tests/api/openstack/extensions/foxinsocks.py
+++ b/nova/tests/api/openstack/extensions/foxinsocks.py
@@ -63,31 +63,33 @@ class Foxinsocks(object):
self._delete_tweedle))
return actions
- def get_response_extensions(self):
- response_exts = []
+ def get_request_extensions(self):
+ request_exts = []
- def _goose_handler(res):
+ def _goose_handler(req, res):
#NOTE: This only handles JSON responses.
# You can use content type header to test for XML.
data = json.loads(res.body)
- data['flavor']['googoose'] = "Gooey goo for chewy chewing!"
- return data
+ data['flavor']['googoose'] = req.GET.get('chewing')
+ res.body = json.dumps(data)
+ return res
- resp_ext = extensions.ResponseExtension('GET', '/v1.1/flavors/:(id)',
+ req_ext1 = extensions.RequestExtension('GET', '/v1.1/flavors/:(id)',
_goose_handler)
- response_exts.append(resp_ext)
+ request_exts.append(req_ext1)
- def _bands_handler(res):
+ def _bands_handler(req, res):
#NOTE: This only handles JSON responses.
# You can use content type header to test for XML.
data = json.loads(res.body)
data['big_bands'] = 'Pig Bands!'
- return data
+ res.body = json.dumps(data)
+ return res
- resp_ext2 = extensions.ResponseExtension('GET', '/v1.1/flavors/:(id)',
+ req_ext2 = extensions.RequestExtension('GET', '/v1.1/flavors/:(id)',
_bands_handler)
- response_exts.append(resp_ext2)
- return response_exts
+ request_exts.append(req_ext2)
+ return request_exts
def _add_tweedle(self, input_dict, req, id):
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index 8b0729c35..bf51239e6 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -228,6 +228,9 @@ class FakeToken(object):
# FIXME(sirp): let's not use id here
id = 0
+ def __getitem__(self, key):
+ return getattr(self, key)
+
def __init__(self, **kwargs):
FakeToken.id += 1
self.id = FakeToken.id
diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py
index 481d34ed1..544298602 100644
--- a/nova/tests/api/openstack/test_extensions.py
+++ b/nova/tests/api/openstack/test_extensions.py
@@ -45,10 +45,10 @@ class StubController(nova.wsgi.Controller):
class StubExtensionManager(object):
- def __init__(self, resource_ext=None, action_ext=None, response_ext=None):
+ def __init__(self, resource_ext=None, action_ext=None, request_ext=None):
self.resource_ext = resource_ext
self.action_ext = action_ext
- self.response_ext = response_ext
+ self.request_ext = request_ext
def get_name(self):
return "Tweedle Beetle Extension"
@@ -71,11 +71,11 @@ class StubExtensionManager(object):
action_exts.append(self.action_ext)
return action_exts
- def get_response_extensions(self):
- response_exts = []
- if self.response_ext:
- response_exts.append(self.response_ext)
- return response_exts
+ def get_request_extensions(self):
+ request_extensions = []
+ if self.request_ext:
+ request_extensions.append(self.request_ext)
+ return request_extensions
class ExtensionControllerTest(unittest.TestCase):
@@ -183,10 +183,10 @@ class ActionExtensionTest(unittest.TestCase):
self.assertEqual(404, response.status_int)
-class ResponseExtensionTest(unittest.TestCase):
+class RequestExtensionTest(unittest.TestCase):
def setUp(self):
- super(ResponseExtensionTest, self).setUp()
+ super(RequestExtensionTest, self).setUp()
self.stubs = stubout.StubOutForTesting()
fakes.FakeAuthManager.reset_fake_data()
fakes.FakeAuthDatabase.data = {}
@@ -195,42 +195,39 @@ class ResponseExtensionTest(unittest.TestCase):
def tearDown(self):
self.stubs.UnsetAll()
- super(ResponseExtensionTest, self).tearDown()
+ super(RequestExtensionTest, self).tearDown()
def test_get_resources_with_stub_mgr(self):
- test_resp = "Gooey goo for chewy chewing!"
-
- def _resp_handler(res):
+ def _req_handler(req, res):
# only handle JSON responses
data = json.loads(res.body)
- data['flavor']['googoose'] = test_resp
- return data
+ data['flavor']['googoose'] = req.GET.get('chewing')
+ res.body = json.dumps(data)
+ return res
- resp_ext = extensions.ResponseExtension('GET',
+ req_ext = extensions.RequestExtension('GET',
'/v1.1/flavors/:(id)',
- _resp_handler)
+ _req_handler)
- manager = StubExtensionManager(None, None, resp_ext)
+ manager = StubExtensionManager(None, None, req_ext)
app = fakes.wsgi_app()
ext_midware = extensions.ExtensionMiddleware(app, manager)
- request = webob.Request.blank("/v1.1/flavors/1")
+ request = webob.Request.blank("/v1.1/flavors/1?chewing=bluegoo")
request.environ['api.version'] = '1.1'
response = request.get_response(ext_midware)
self.assertEqual(200, response.status_int)
response_data = json.loads(response.body)
- self.assertEqual(test_resp, response_data['flavor']['googoose'])
+ self.assertEqual('bluegoo', response_data['flavor']['googoose'])
def test_get_resources_with_mgr(self):
- test_resp = "Gooey goo for chewy chewing!"
-
app = fakes.wsgi_app()
ext_midware = extensions.ExtensionMiddleware(app)
- request = webob.Request.blank("/v1.1/flavors/1")
+ request = webob.Request.blank("/v1.1/flavors/1?chewing=newblue")
request.environ['api.version'] = '1.1'
response = request.get_response(ext_midware)
self.assertEqual(200, response.status_int)
response_data = json.loads(response.body)
- self.assertEqual(test_resp, response_data['flavor']['googoose'])
+ self.assertEqual('newblue', response_data['flavor']['googoose'])
self.assertEqual("Pig Bands!", response_data['big_bands'])
diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py
index 45bd4d501..1bbe96612 100644
--- a/nova/tests/api/openstack/test_limits.py
+++ b/nova/tests/api/openstack/test_limits.py
@@ -27,6 +27,7 @@ import webob
from xml.dom.minidom import parseString
+import nova.context
from nova.api.openstack import limits
@@ -47,6 +48,13 @@ class BaseLimitTestSuite(unittest.TestCase):
self.time = 0.0
self.stubs = stubout.StubOutForTesting()
self.stubs.Set(limits.Limit, "_get_time", self._get_time)
+ self.absolute_limits = {}
+
+ def stub_get_project_quotas(context, project_id):
+ return self.absolute_limits
+
+ self.stubs.Set(nova.quota, "get_project_quotas",
+ stub_get_project_quotas)
def tearDown(self):
"""Run after each test."""
@@ -75,6 +83,8 @@ class LimitsControllerV10Test(BaseLimitTestSuite):
"action": "index",
"controller": "",
})
+ context = nova.context.RequestContext('testuser', 'testproject')
+ request.environ["nova.context"] = context
return request
def _populate_limits(self, request):
@@ -86,6 +96,18 @@ class LimitsControllerV10Test(BaseLimitTestSuite):
request.environ["nova.limits"] = _limits
return request
+ def _setup_absolute_limits(self):
+ self.absolute_limits = {
+ 'instances': 5,
+ 'cores': 8,
+ 'ram': 2 ** 13,
+ 'volumes': 21,
+ 'gigabytes': 34,
+ 'metadata_items': 55,
+ 'injected_files': 89,
+ 'injected_file_content_bytes': 144,
+ }
+
def test_empty_index_json(self):
"""Test getting empty limit details in JSON."""
request = self._get_index_request()
@@ -103,6 +125,7 @@ class LimitsControllerV10Test(BaseLimitTestSuite):
"""Test getting limit details in JSON."""
request = self._get_index_request()
request = self._populate_limits(request)
+ self._setup_absolute_limits()
response = request.get_response(self.controller)
expected = {
"limits": {
@@ -124,7 +147,15 @@ class LimitsControllerV10Test(BaseLimitTestSuite):
"remaining": 5,
"unit": "HOUR",
}],
- "absolute": {},
+ "absolute": {
+ "maxTotalInstances": 5,
+ "maxTotalCores": 8,
+ "maxTotalRAMSize": 2 ** 13,
+ "maxServerMeta": 55,
+ "maxImageMeta": 55,
+ "maxPersonality": 89,
+ "maxPersonalitySize": 144,
+ },
},
}
body = json.loads(response.body)
@@ -188,6 +219,8 @@ class LimitsControllerV11Test(BaseLimitTestSuite):
"action": "index",
"controller": "",
})
+ context = nova.context.RequestContext('testuser', 'testproject')
+ request.environ["nova.context"] = context
return request
def _populate_limits(self, request):
@@ -218,6 +251,11 @@ class LimitsControllerV11Test(BaseLimitTestSuite):
"""Test getting limit details in JSON."""
request = self._get_index_request()
request = self._populate_limits(request)
+ self.absolute_limits = {
+ 'ram': 512,
+ 'instances': 5,
+ 'cores': 21,
+ }
response = request.get_response(self.controller)
expected = {
"limits": {
@@ -257,12 +295,59 @@ class LimitsControllerV11Test(BaseLimitTestSuite):
},
],
- "absolute": {},
+ "absolute": {
+ "maxTotalRAMSize": 512,
+ "maxTotalInstances": 5,
+ "maxTotalCores": 21,
+ },
},
}
body = json.loads(response.body)
self.assertEqual(expected, body)
+ def _test_index_absolute_limits_json(self, expected):
+ request = self._get_index_request()
+ response = request.get_response(self.controller)
+ body = json.loads(response.body)
+ self.assertEqual(expected, body['limits']['absolute'])
+
+ def test_index_ignores_extra_absolute_limits_json(self):
+ self.absolute_limits = {'unknown_limit': 9001}
+ self._test_index_absolute_limits_json({})
+
+ def test_index_absolute_ram_json(self):
+ self.absolute_limits = {'ram': 1024}
+ self._test_index_absolute_limits_json({'maxTotalRAMSize': 1024})
+
+ def test_index_absolute_cores_json(self):
+ self.absolute_limits = {'cores': 17}
+ self._test_index_absolute_limits_json({'maxTotalCores': 17})
+
+ def test_index_absolute_instances_json(self):
+ self.absolute_limits = {'instances': 19}
+ self._test_index_absolute_limits_json({'maxTotalInstances': 19})
+
+ def test_index_absolute_metadata_json(self):
+ # NOTE: both server metadata and image metadata are overloaded
+ # into metadata_items
+ self.absolute_limits = {'metadata_items': 23}
+ expected = {
+ 'maxServerMeta': 23,
+ 'maxImageMeta': 23,
+ }
+ self._test_index_absolute_limits_json(expected)
+
+ def test_index_absolute_injected_files(self):
+ self.absolute_limits = {
+ 'injected_files': 17,
+ 'injected_file_content_bytes': 86753,
+ }
+ expected = {
+ 'maxPersonality': 17,
+ 'maxPersonalitySize': 86753,
+ }
+ self._test_index_absolute_limits_json(expected)
+
class LimitMiddlewareTest(BaseLimitTestSuite):
"""
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index 89edece42..fbde5c9ce 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -138,6 +138,16 @@ def find_host(self, context, instance_id):
return "nova"
+class MockSetAdminPassword(object):
+ def __init__(self):
+ self.instance_id = None
+ self.password = None
+
+ def __call__(self, context, instance_id, password):
+ self.instance_id = instance_id
+ self.password = password
+
+
class ServersTest(test.TestCase):
def setUp(self):
@@ -183,7 +193,7 @@ class ServersTest(test.TestCase):
self.assertEqual(res_dict['server']['id'], 1)
self.assertEqual(res_dict['server']['name'], 'server1')
- def test_get_server_by_id_v11(self):
+ def test_get_server_by_id_v1_1(self):
req = webob.Request.blank('/v1.1/servers/1')
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
@@ -246,7 +256,7 @@ class ServersTest(test.TestCase):
self.assertEqual(len(addresses["private"]), 1)
self.assertEqual(addresses["private"][0], private)
- def test_get_server_addresses_V10(self):
+ def test_get_server_addresses_v1_0(self):
private = '192.168.0.3'
public = ['1.2.3.4']
new_return_server = return_server_with_addresses(private, public)
@@ -257,7 +267,7 @@ class ServersTest(test.TestCase):
self.assertEqual(res_dict, {
'addresses': {'public': public, 'private': [private]}})
- def test_get_server_addresses_xml_V10(self):
+ def test_get_server_addresses_xml_v1_0(self):
private_expected = "192.168.0.3"
public_expected = ["1.2.3.4"]
new_return_server = return_server_with_addresses(private_expected,
@@ -276,7 +286,7 @@ class ServersTest(test.TestCase):
(ip,) = private.getElementsByTagName('ip')
self.assertEquals(ip.getAttribute('addr'), private_expected)
- def test_get_server_addresses_public_V10(self):
+ def test_get_server_addresses_public_v1_0(self):
private = "192.168.0.3"
public = ["1.2.3.4"]
new_return_server = return_server_with_addresses(private, public)
@@ -286,7 +296,7 @@ class ServersTest(test.TestCase):
res_dict = json.loads(res.body)
self.assertEqual(res_dict, {'public': public})
- def test_get_server_addresses_private_V10(self):
+ def test_get_server_addresses_private_v1_0(self):
private = "192.168.0.3"
public = ["1.2.3.4"]
new_return_server = return_server_with_addresses(private, public)
@@ -296,7 +306,7 @@ class ServersTest(test.TestCase):
res_dict = json.loads(res.body)
self.assertEqual(res_dict, {'private': [private]})
- def test_get_server_addresses_public_xml_V10(self):
+ def test_get_server_addresses_public_xml_v1_0(self):
private = "192.168.0.3"
public = ["1.2.3.4"]
new_return_server = return_server_with_addresses(private, public)
@@ -310,7 +320,7 @@ class ServersTest(test.TestCase):
(ip,) = public_node.getElementsByTagName('ip')
self.assertEquals(ip.getAttribute('addr'), public[0])
- def test_get_server_addresses_private_xml_V10(self):
+ def test_get_server_addresses_private_xml_v1_0(self):
private = "192.168.0.3"
public = ["1.2.3.4"]
new_return_server = return_server_with_addresses(private, public)
@@ -324,7 +334,7 @@ class ServersTest(test.TestCase):
(ip,) = private_node.getElementsByTagName('ip')
self.assertEquals(ip.getAttribute('addr'), private)
- def test_get_server_by_id_with_addresses_v11(self):
+ def test_get_server_by_id_with_addresses_v1_1(self):
private = "192.168.0.3"
public = ["1.2.3.4"]
new_return_server = return_server_with_addresses(private, public)
@@ -354,7 +364,7 @@ class ServersTest(test.TestCase):
self.assertEqual(s.get('imageId', None), None)
i += 1
- def test_get_server_list_v11(self):
+ def test_get_server_list_v1_1(self):
req = webob.Request.blank('/v1.1/servers')
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
@@ -576,16 +586,16 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
- def test_create_instance_v11(self):
+ def test_create_instance_v1_1(self):
self._setup_for_create_instance()
- imageRef = 'http://localhost/v1.1/images/2'
- flavorRef = 'http://localhost/v1.1/flavors/3'
+ image_ref = 'http://localhost/v1.1/images/2'
+ flavor_ref = 'http://localhost/v1.1/flavors/3'
body = {
'server': {
'name': 'server_test',
- 'imageRef': imageRef,
- 'flavorRef': flavorRef,
+ 'imageRef': image_ref,
+ 'flavorRef': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
@@ -605,17 +615,17 @@ class ServersTest(test.TestCase):
self.assertEqual(16, len(server['adminPass']))
self.assertEqual('server_test', server['name'])
self.assertEqual(1, server['id'])
- self.assertEqual(flavorRef, server['flavorRef'])
- self.assertEqual(imageRef, server['imageRef'])
+ self.assertEqual(flavor_ref, server['flavorRef'])
+ self.assertEqual(image_ref, server['imageRef'])
self.assertEqual(res.status_int, 200)
- def test_create_instance_v11_bad_href(self):
+ def test_create_instance_v1_1_bad_href(self):
self._setup_for_create_instance()
- imageRef = 'http://localhost/v1.1/images/asdf'
- flavorRef = 'http://localhost/v1.1/flavors/3'
+ image_ref = 'http://localhost/v1.1/images/asdf'
+ flavor_ref = 'http://localhost/v1.1/flavors/3'
body = dict(server=dict(
- name='server_test', imageRef=imageRef, flavorRef=flavorRef,
+ name='server_test', imageRef=image_ref, flavorRef=flavor_ref,
metadata={'hello': 'world', 'open': 'stack'},
personality={}))
req = webob.Request.blank('/v1.1/servers')
@@ -625,17 +635,17 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
- def test_create_instance_v11_local_href(self):
+ def test_create_instance_v1_1_local_href(self):
self._setup_for_create_instance()
- imageRef = 'http://localhost/v1.1/images/2'
- imageRefLocal = '2'
- flavorRef = 'http://localhost/v1.1/flavors/3'
+ image_ref = 'http://localhost/v1.1/images/2'
+ image_ref_local = '2'
+ flavor_ref = 'http://localhost/v1.1/flavors/3'
body = {
'server': {
'name': 'server_test',
- 'imageRef': imageRefLocal,
- 'flavorRef': flavorRef,
+ 'imageRef': image_ref_local,
+ 'flavorRef': flavor_ref,
},
}
@@ -648,11 +658,11 @@ class ServersTest(test.TestCase):
server = json.loads(res.body)['server']
self.assertEqual(1, server['id'])
- self.assertEqual(flavorRef, server['flavorRef'])
- self.assertEqual(imageRef, server['imageRef'])
+ self.assertEqual(flavor_ref, server['flavorRef'])
+ self.assertEqual(image_ref, server['imageRef'])
self.assertEqual(res.status_int, 200)
- def test_create_instance_with_admin_pass_v10(self):
+ def test_create_instance_with_admin_pass_v1_0(self):
self._setup_for_create_instance()
body = {
@@ -673,16 +683,16 @@ class ServersTest(test.TestCase):
self.assertNotEqual(res['server']['adminPass'],
body['server']['adminPass'])
- def test_create_instance_with_admin_pass_v11(self):
+ def test_create_instance_with_admin_pass_v1_1(self):
self._setup_for_create_instance()
- imageRef = 'http://localhost/v1.1/images/2'
- flavorRef = 'http://localhost/v1.1/flavors/3'
+ image_ref = 'http://localhost/v1.1/images/2'
+ flavor_ref = 'http://localhost/v1.1/flavors/3'
body = {
'server': {
'name': 'server_test',
- 'imageRef': imageRef,
- 'flavorRef': flavorRef,
+ 'imageRef': image_ref,
+ 'flavorRef': flavor_ref,
'adminPass': 'testpass',
},
}
@@ -695,16 +705,16 @@ class ServersTest(test.TestCase):
server = json.loads(res.body)['server']
self.assertEqual(server['adminPass'], body['server']['adminPass'])
- def test_create_instance_with_empty_admin_pass_v11(self):
+ def test_create_instance_with_empty_admin_pass_v1_1(self):
self._setup_for_create_instance()
- imageRef = 'http://localhost/v1.1/images/2'
- flavorRef = 'http://localhost/v1.1/flavors/3'
+ image_ref = 'http://localhost/v1.1/images/2'
+ flavor_ref = 'http://localhost/v1.1/flavors/3'
body = {
'server': {
'name': 'server_test',
- 'imageRef': imageRef,
- 'flavorRef': flavorRef,
+ 'imageRef': image_ref,
+ 'flavorRef': flavor_ref,
'adminPass': '',
},
}
@@ -758,14 +768,13 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
- def test_update_server_v10(self):
+ def test_update_server_v1_0(self):
inst_dict = dict(name='server_test', adminPass='bacon')
self.body = json.dumps(dict(server=inst_dict))
def server_update(context, id, params):
filtered_dict = dict(
- display_name='server_test',
- admin_pass='bacon',
+ display_name='server_test'
)
self.assertEqual(params, filtered_dict)
return filtered_dict
@@ -773,6 +782,8 @@ class ServersTest(test.TestCase):
self.stubs.Set(nova.db.api, 'instance_update',
server_update)
self.stubs.Set(nova.compute.api.API, "_find_host", find_host)
+ mock_method = MockSetAdminPassword()
+ self.stubs.Set(nova.compute.api.API, 'set_admin_password', mock_method)
req = webob.Request.blank('/v1.0/servers/1')
req.method = 'PUT'
@@ -780,8 +791,10 @@ class ServersTest(test.TestCase):
req.body = self.body
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 204)
+ self.assertEqual(mock_method.instance_id, '1')
+ self.assertEqual(mock_method.password, 'bacon')
- def test_update_server_adminPass_ignored_v11(self):
+ def test_update_server_adminPass_ignored_v1_1(self):
inst_dict = dict(name='server_test', adminPass='bacon')
self.body = json.dumps(dict(server=inst_dict))
@@ -822,7 +835,7 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 501)
- def test_server_backup_schedule_deprecated_v11(self):
+ def test_server_backup_schedule_deprecated_v1_1(self):
req = webob.Request.blank('/v1.1/servers/1/backup_schedule')
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 404)
@@ -996,16 +1009,6 @@ class ServersTest(test.TestCase):
self.assertEqual(res.status_int, 501)
def test_server_change_password_v1_1(self):
-
- class MockSetAdminPassword(object):
- def __init__(self):
- self.instance_id = None
- self.password = None
-
- def __call__(self, context, instance_id, password):
- self.instance_id = instance_id
- self.password = password
-
mock_method = MockSetAdminPassword()
self.stubs.Set(nova.compute.api.API, 'set_admin_password', mock_method)
body = {'changePassword': {'adminPass': '1234pass'}}
@@ -1113,7 +1116,7 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
- def test_server_rebuild_accepted_minimum_v11(self):
+ def test_server_rebuild_accepted_minimum_v1_1(self):
body = {
"rebuild": {
"imageRef": "http://localhost/images/2",
@@ -1128,7 +1131,7 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
- def test_server_rebuild_rejected_when_building_v11(self):
+ def test_server_rebuild_rejected_when_building_v1_1(self):
body = {
"rebuild": {
"imageRef": "http://localhost/images/2",
@@ -1147,7 +1150,7 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 409)
- def test_server_rebuild_accepted_with_metadata_v11(self):
+ def test_server_rebuild_accepted_with_metadata_v1_1(self):
body = {
"rebuild": {
"imageRef": "http://localhost/images/2",
@@ -1165,7 +1168,7 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
- def test_server_rebuild_accepted_with_bad_metadata_v11(self):
+ def test_server_rebuild_accepted_with_bad_metadata_v1_1(self):
body = {
"rebuild": {
"imageRef": "http://localhost/images/2",
@@ -1181,7 +1184,7 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
- def test_server_rebuild_bad_entity_v11(self):
+ def test_server_rebuild_bad_entity_v1_1(self):
body = {
"rebuild": {
"imageId": 2,
@@ -1196,7 +1199,7 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
- def test_server_rebuild_bad_personality_v11(self):
+ def test_server_rebuild_bad_personality_v1_1(self):
body = {
"rebuild": {
"imageRef": "http://localhost/images/2",
@@ -1215,7 +1218,7 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
- def test_server_rebuild_personality_v11(self):
+ def test_server_rebuild_personality_v1_1(self):
body = {
"rebuild": {
"imageRef": "http://localhost/images/2",
@@ -1654,6 +1657,19 @@ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==""",
request = self.deserializer.deserialize(serial_request)
self.assertEqual(request, expected)
+ def test_request_xmlser_with_flavor_image_ref(self):
+ serial_request = """
+ <server xmlns="http://docs.openstack.org/compute/api/v1.1"
+ name="new-server-test"
+ imageRef="http://localhost:8774/v1.1/images/1"
+ flavorRef="http://localhost:8774/v1.1/flavors/1">
+ </server>"""
+ request = self.deserializer.deserialize(serial_request)
+ self.assertEquals(request["server"]["flavorRef"],
+ "http://localhost:8774/v1.1/flavors/1")
+ self.assertEquals(request["server"]["imageRef"],
+ "http://localhost:8774/v1.1/images/1")
+
class TestServerInstanceCreation(test.TestCase):
diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py
index 5d5799b59..fa2e05033 100644
--- a/nova/tests/api/openstack/test_zones.py
+++ b/nova/tests/api/openstack/test_zones.py
@@ -20,6 +20,8 @@ import json
import nova.db
from nova import context
+from nova import crypto
+from nova import exception
from nova import flags
from nova import test
from nova.api.openstack import zones
@@ -79,6 +81,18 @@ def zone_capabilities(method, context):
return dict()
+GLOBAL_BUILD_PLAN = [
+ dict(name='host1', weight=10, ip='10.0.0.1', zone='zone1'),
+ dict(name='host2', weight=9, ip='10.0.0.2', zone='zone2'),
+ dict(name='host3', weight=8, ip='10.0.0.3', zone='zone3'),
+ dict(name='host4', weight=7, ip='10.0.0.4', zone='zone4'),
+ ]
+
+
+def zone_select(context, specs):
+ return GLOBAL_BUILD_PLAN
+
+
class ZonesTest(test.TestCase):
def setUp(self):
super(ZonesTest, self).setUp()
@@ -190,3 +204,31 @@ class ZonesTest(test.TestCase):
self.assertEqual(res_dict['zone']['name'], 'darksecret')
self.assertEqual(res_dict['zone']['cap1'], 'a;b')
self.assertEqual(res_dict['zone']['cap2'], 'c;d')
+
+ def test_zone_select(self):
+ FLAGS.build_plan_encryption_key = 'c286696d887c9aa0611bbb3e2025a45a'
+ self.stubs.Set(api, 'select', zone_select)
+
+ req = webob.Request.blank('/v1.0/zones/select')
+
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(res.status_int, 200)
+
+ self.assertTrue('weights' in res_dict)
+
+ for item in res_dict['weights']:
+ blob = item['blob']
+ decrypt = crypto.decryptor(FLAGS.build_plan_encryption_key)
+ secret_item = json.loads(decrypt(blob))
+ found = False
+ for original_item in GLOBAL_BUILD_PLAN:
+ if original_item['name'] != secret_item['name']:
+ continue
+ found = True
+ for key in ('weight', 'ip', 'zone'):
+ self.assertEqual(secret_item[key], original_item[key])
+
+ self.assertTrue(found)
+ self.assertEqual(len(item), 2)
+ self.assertTrue('weight' in item)
diff --git a/nova/tests/db/fakes.py b/nova/tests/db/fakes.py
index 58d251b1e..8bdea359a 100644
--- a/nova/tests/db/fakes.py
+++ b/nova/tests/db/fakes.py
@@ -124,7 +124,6 @@ def stub_out_db_instance_api(stubs, injected=True):
return FakeModel(vlan_network_fields)
else:
return FakeModel(flat_network_fields)
- return FakeModel(network_fields)
def fake_network_get_all_by_instance(context, instance_id):
# Even instance numbers are on vlan networks
diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py
index 5d7ca98b5..ecefc464a 100644
--- a/nova/tests/fake_flags.py
+++ b/nova/tests/fake_flags.py
@@ -21,24 +21,24 @@ from nova import flags
FLAGS = flags.FLAGS
flags.DECLARE('volume_driver', 'nova.volume.manager')
-FLAGS.volume_driver = 'nova.volume.driver.FakeISCSIDriver'
-FLAGS.connection_type = 'fake'
-FLAGS.fake_rabbit = True
+FLAGS['volume_driver'].SetDefault('nova.volume.driver.FakeISCSIDriver')
+FLAGS['connection_type'].SetDefault('fake')
+FLAGS['fake_rabbit'].SetDefault(True)
flags.DECLARE('auth_driver', 'nova.auth.manager')
-FLAGS.auth_driver = 'nova.auth.dbdriver.DbDriver'
+FLAGS['auth_driver'].SetDefault('nova.auth.dbdriver.DbDriver')
flags.DECLARE('network_size', 'nova.network.manager')
flags.DECLARE('num_networks', 'nova.network.manager')
flags.DECLARE('fake_network', 'nova.network.manager')
-FLAGS.network_size = 8
-FLAGS.num_networks = 2
-FLAGS.fake_network = True
-FLAGS.image_service = 'nova.image.local.LocalImageService'
+FLAGS['network_size'].SetDefault(8)
+FLAGS['num_networks'].SetDefault(2)
+FLAGS['fake_network'].SetDefault(True)
+FLAGS['image_service'].SetDefault('nova.image.local.LocalImageService')
flags.DECLARE('num_shelves', 'nova.volume.driver')
flags.DECLARE('blades_per_shelf', 'nova.volume.driver')
flags.DECLARE('iscsi_num_targets', 'nova.volume.driver')
-FLAGS.num_shelves = 2
-FLAGS.blades_per_shelf = 4
-FLAGS.iscsi_num_targets = 8
-FLAGS.verbose = True
-FLAGS.sqlite_db = "tests.sqlite"
-FLAGS.use_ipv6 = True
+FLAGS['num_shelves'].SetDefault(2)
+FLAGS['blades_per_shelf'].SetDefault(4)
+FLAGS['iscsi_num_targets'].SetDefault(8)
+FLAGS['verbose'].SetDefault(True)
+FLAGS['sqlite_db'].SetDefault("tests.sqlite")
+FLAGS['use_ipv6'].SetDefault(True)
diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py
index 2e5d67017..bc98921f0 100644
--- a/nova/tests/integrated/integrated_helpers.py
+++ b/nova/tests/integrated/integrated_helpers.py
@@ -160,7 +160,7 @@ class _IntegratedTestBase(test.TestCase):
#self.start_service('network')
self.start_service('scheduler')
- self.auth_url = self._start_api_service()
+ self._start_api_service()
self.context = IntegratedUnitTestContext(self.auth_url)
@@ -174,8 +174,10 @@ class _IntegratedTestBase(test.TestCase):
if not api_service:
raise Exception("API Service was None")
- auth_url = 'http://localhost:8774/v1.1'
- return auth_url
+ self.api_service = api_service
+
+ host, port = api_service.get_socket_info('osapi')
+ self.auth_url = 'http://%s:%s/v1.1' % (host, port)
def tearDown(self):
self.context.cleanup()
@@ -184,6 +186,11 @@ class _IntegratedTestBase(test.TestCase):
def _get_flags(self):
"""An opportunity to setup flags, before the services are started."""
f = {}
+
+ # Auto-assign ports to allow concurrent tests
+ f['ec2_listen_port'] = 0
+ f['osapi_listen_port'] = 0
+
f['image_service'] = 'nova.image.fake.FakeImageService'
f['fake_network'] = True
return f
diff --git a/nova/tests/network/base.py b/nova/tests/network/base.py
index 988a1de72..b06271c99 100644
--- a/nova/tests/network/base.py
+++ b/nova/tests/network/base.py
@@ -25,6 +25,7 @@ from nova import context
from nova import db
from nova import exception
from nova import flags
+from nova import ipv6
from nova import log as logging
from nova import test
from nova import utils
@@ -117,15 +118,15 @@ class NetworkTestCase(test.TestCase):
context.get_admin_context(),
instance_ref['id'])
self.assertEqual(instance_ref['mac_address'],
- utils.to_mac(address_v6))
+ ipv6.to_mac(address_v6))
instance_ref2 = db.fixed_ip_get_instance_v6(
context.get_admin_context(),
address_v6)
self.assertEqual(instance_ref['id'], instance_ref2['id'])
self.assertEqual(address_v6,
- utils.to_global_ipv6(
- network_ref['cidr_v6'],
- instance_ref['mac_address']))
+ ipv6.to_global(network_ref['cidr_v6'],
+ instance_ref['mac_address'],
+ 'test'))
self._deallocate_address(0, address)
db.instance_destroy(context.get_admin_context(),
instance_ref['id'])
diff --git a/nova/tests/public_key/dummy.fingerprint b/nova/tests/public_key/dummy.fingerprint
new file mode 100644
index 000000000..715bca27a
--- /dev/null
+++ b/nova/tests/public_key/dummy.fingerprint
@@ -0,0 +1 @@
+1c:87:d1:d9:32:fd:62:3c:78:2b:c0:ad:c0:15:88:df
diff --git a/nova/tests/public_key/dummy.pub b/nova/tests/public_key/dummy.pub
new file mode 100644
index 000000000..d4cf2bc0d
--- /dev/null
+++ b/nova/tests/public_key/dummy.pub
@@ -0,0 +1 @@
+ssh-dss AAAAB3NzaC1kc3MAAACBAMGJlY9XEIm2X234pdO5yFWMp2JuOQx8U0E815IVXhmKxYCBK9ZakgZOIQmPbXoGYyV+mziDPp6HJ0wKYLQxkwLEFr51fAZjWQvRss0SinURRuLkockDfGFtD4pYJthekr/rlqMKlBSDUSpGq8jUWW60UJ18FGooFpxR7ESqQRx/AAAAFQC96LRglaUeeP+E8U/yblEJocuiWwAAAIA3XiMR8Skiz/0aBm5K50SeQznQuMJTyzt9S9uaz5QZWiFu69hOyGSFGw8fqgxEkXFJIuHobQQpGYQubLW0NdaYRqyE/Vud3JUJUb8Texld6dz8vGemyB5d1YvtSeHIo8/BGv2msOqR3u5AZTaGCBD9DhpSGOKHEdNjTtvpPd8S8gAAAIBociGZ5jf09iHLVENhyXujJbxfGRPsyNTyARJfCOGl0oFV6hEzcQyw8U/ePwjgvjc2UizMWLl8tsb2FXKHRdc2v+ND3Us+XqKQ33X3ADP4FZ/+Oj213gMyhCmvFTP0u5FmHog9My4CB7YcIWRuUR42WlhQ2IfPvKwUoTk3R+T6Og== www-data@mk
diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py
index fa0e56597..7c0331eff 100644
--- a/nova/tests/test_api.py
+++ b/nova/tests/test_api.py
@@ -28,10 +28,12 @@ import StringIO
import webob
from nova import context
+from nova import exception
from nova import test
from nova.api import ec2
-from nova.api.ec2 import cloud
from nova.api.ec2 import apirequest
+from nova.api.ec2 import cloud
+from nova.api.ec2 import ec2utils
from nova.auth import manager
@@ -101,6 +103,21 @@ class XmlConversionTestCase(test.TestCase):
self.assertEqual(conv('-0'), 0)
+class Ec2utilsTestCase(test.TestCase):
+ def test_ec2_id_to_id(self):
+ self.assertEqual(ec2utils.ec2_id_to_id('i-0000001e'), 30)
+ self.assertEqual(ec2utils.ec2_id_to_id('ami-1d'), 29)
+
+ def test_bad_ec2_id(self):
+ self.assertRaises(exception.InvalidEc2Id,
+ ec2utils.ec2_id_to_id,
+ 'badone')
+
+ def test_id_to_ec2_id(self):
+ self.assertEqual(ec2utils.id_to_ec2_id(30), 'i-0000001e')
+ self.assertEqual(ec2utils.id_to_ec2_id(29, 'ami-%08x'), 'ami-0000001d')
+
+
class ApiEc2TestCase(test.TestCase):
"""Unit test for the cloud controller on an EC2 API"""
def setUp(self):
@@ -207,6 +224,29 @@ class ApiEc2TestCase(test.TestCase):
self.manager.delete_project(project)
self.manager.delete_user(user)
+ def test_create_duplicate_key_pair(self):
+ """Test that, after successfully generating a keypair,
+ requesting a second keypair with the same name fails sanely"""
+ self.expect_http()
+ self.mox.ReplayAll()
+ keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \
+ for x in range(random.randint(4, 8)))
+ user = self.manager.create_user('fake', 'fake', 'fake')
+ project = self.manager.create_project('fake', 'fake', 'fake')
+ # NOTE(vish): create depends on pool, so call helper directly
+ self.ec2.create_key_pair('test')
+
+ try:
+ self.ec2.create_key_pair('test')
+ except EC2ResponseError, e:
+ if e.code == 'KeyPairExists':
+ pass
+ else:
+ self.fail("Unexpected EC2ResponseError: %s "
+ "(expected KeyPairExists)" % e.code)
+ else:
+ self.fail('Exception not raised.')
+
def test_get_all_security_groups(self):
"""Test that we can retrieve security groups"""
self.expect_http()
diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py
index f271c03f2..54c0454de 100644
--- a/nova/tests/test_cloud.py
+++ b/nova/tests/test_cloud.py
@@ -279,6 +279,26 @@ class CloudTestCase(test.TestCase):
user_group=['all'])
self.assertEqual(True, result['is_public'])
+ def test_deregister_image(self):
+ deregister_image = self.cloud.deregister_image
+
+ def fake_delete(self, context, id):
+ return None
+
+ self.stubs.Set(local.LocalImageService, 'delete', fake_delete)
+ # valid image
+ result = deregister_image(self.context, 'ami-00000001')
+ self.assertEqual(result['imageId'], 'ami-00000001')
+ # invalid image
+ self.stubs.UnsetAll()
+
+ def fake_detail_empty(self, context):
+ return []
+
+ self.stubs.Set(local.LocalImageService, 'detail', fake_detail_empty)
+ self.assertRaises(exception.ImageNotFound, deregister_image,
+ self.context, 'ami-bad001')
+
def test_console_output(self):
instance_type = FLAGS.default_instance_type
max_count = 1
@@ -334,45 +354,40 @@ class CloudTestCase(test.TestCase):
self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys))
self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys))
+ def test_import_public_key(self):
+ # test when user provides all values
+ result1 = self.cloud.import_public_key(self.context,
+ 'testimportkey1',
+ 'mytestpubkey',
+ 'mytestfprint')
+ self.assertTrue(result1)
+ keydata = db.key_pair_get(self.context,
+ self.context.user.id,
+ 'testimportkey1')
+ self.assertEqual('mytestpubkey', keydata['public_key'])
+ self.assertEqual('mytestfprint', keydata['fingerprint'])
+ # test when user omits fingerprint
+ pubkey_path = os.path.join(os.path.dirname(__file__), 'public_key')
+ f = open(pubkey_path + '/dummy.pub', 'r')
+ dummypub = f.readline().rstrip()
+ f.close
+ f = open(pubkey_path + '/dummy.fingerprint', 'r')
+ dummyfprint = f.readline().rstrip()
+ f.close
+ result2 = self.cloud.import_public_key(self.context,
+ 'testimportkey2',
+ dummypub)
+ self.assertTrue(result2)
+ keydata = db.key_pair_get(self.context,
+ self.context.user.id,
+ 'testimportkey2')
+ self.assertEqual(dummypub, keydata['public_key'])
+ self.assertEqual(dummyfprint, keydata['fingerprint'])
+
def test_delete_key_pair(self):
self._create_key('test')
self.cloud.delete_key_pair(self.context, 'test')
- def test_run_instances(self):
- if FLAGS.connection_type == 'fake':
- LOG.debug(_("Can't test instances without a real virtual env."))
- return
- image_id = FLAGS.default_image
- instance_type = FLAGS.default_instance_type
- max_count = 1
- kwargs = {'image_id': image_id,
- 'instance_type': instance_type,
- 'max_count': max_count}
- rv = self.cloud.run_instances(self.context, **kwargs)
- # TODO: check for proper response
- instance_id = rv['reservationSet'][0].keys()[0]
- instance = rv['reservationSet'][0][instance_id][0]
- LOG.debug(_("Need to watch instance %s until it's running..."),
- instance['instance_id'])
- while True:
- greenthread.sleep(1)
- info = self.cloud._get_instance(instance['instance_id'])
- LOG.debug(info['state'])
- if info['state'] == power_state.RUNNING:
- break
- self.assert_(rv)
-
- if FLAGS.connection_type != 'fake':
- time.sleep(45) # Should use boto for polling here
- for reservations in rv['reservationSet']:
- # for res_id in reservations.keys():
- # LOG.debug(reservations[res_id])
- # for instance in reservations[res_id]:
- for instance in reservations[reservations.keys()[0]]:
- instance_id = instance['instance_id']
- LOG.debug(_("Terminating instance %s"), instance_id)
- rv = self.compute.terminate_instance(instance_id)
-
def test_terminate_instances(self):
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
'image_id': 1,
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index 393110791..9170837b6 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -21,6 +21,7 @@ Tests For Compute
import datetime
import mox
+import stubout
from nova import compute
from nova import context
@@ -52,6 +53,10 @@ class FakeTime(object):
self.counter += t
+def nop_report_driver_status(self):
+ pass
+
+
class ComputeTestCase(test.TestCase):
"""Test case for compute"""
def setUp(self):
@@ -329,6 +334,28 @@ class ComputeTestCase(test.TestCase):
self.compute.terminate_instance(self.context, instance_id)
+ def test_finish_resize(self):
+ """Contrived test to ensure finish_resize doesn't raise anything"""
+
+ def fake(*args, **kwargs):
+ pass
+
+ self.stubs.Set(self.compute.driver, 'finish_resize', fake)
+ context = self.context.elevated()
+ instance_id = self._create_instance()
+ self.compute.prep_resize(context, instance_id, 1)
+ migration_ref = db.migration_get_by_instance_and_status(context,
+ instance_id, 'pre-migrating')
+ try:
+ self.compute.finish_resize(context, instance_id,
+ int(migration_ref['id']), {})
+ except KeyError, e:
+ # Only catch key errors. We want other reasons for the test to
+ # fail to actually error out so we don't obscure anything
+ self.fail()
+
+ self.compute.terminate_instance(self.context, instance_id)
+
def test_resize_instance(self):
"""Ensure instance can be migrated/resized"""
instance_id = self._create_instance()
@@ -649,6 +676,10 @@ class ComputeTestCase(test.TestCase):
def test_run_kill_vm(self):
"""Detect when a vm is terminated behind the scenes"""
+ self.stubs = stubout.StubOutForTesting()
+ self.stubs.Set(compute_manager.ComputeManager,
+ '_report_driver_status', nop_report_driver_status)
+
instance_id = self._create_instance()
self.compute.run_instance(self.context, instance_id)
diff --git a/nova/tests/test_crypto.py b/nova/tests/test_crypto.py
new file mode 100644
index 000000000..945d78794
--- /dev/null
+++ b/nova/tests/test_crypto.py
@@ -0,0 +1,48 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests for Crypto module.
+"""
+
+from nova import crypto
+from nova import test
+
+
+class SymmetricKeyTestCase(test.TestCase):
+ """Test case for Encrypt/Decrypt"""
+ def test_encrypt_decrypt(self):
+ key = 'c286696d887c9aa0611bbb3e2025a45a'
+ plain_text = "The quick brown fox jumped over the lazy dog."
+
+ # No IV supplied (all 0's)
+ encrypt = crypto.encryptor(key)
+ cipher_text = encrypt(plain_text)
+ self.assertNotEquals(plain_text, cipher_text)
+
+ decrypt = crypto.decryptor(key)
+ plain = decrypt(cipher_text)
+
+ self.assertEquals(plain_text, plain)
+
+ # IV supplied ...
+ iv = '562e17996d093d28ddb3ba695a2e6f58'
+ encrypt = crypto.encryptor(key, iv)
+ cipher_text = encrypt(plain_text)
+ self.assertNotEquals(plain_text, cipher_text)
+
+ decrypt = crypto.decryptor(key, iv)
+ plain = decrypt(cipher_text)
+
+ self.assertEquals(plain_text, plain)
diff --git a/nova/tests/test_flags.py b/nova/tests/test_flags.py
index 707300fcf..05319d91f 100644
--- a/nova/tests/test_flags.py
+++ b/nova/tests/test_flags.py
@@ -91,6 +91,20 @@ class FlagsTestCase(test.TestCase):
self.assert_('runtime_answer' in self.global_FLAGS)
self.assertEqual(self.global_FLAGS.runtime_answer, 60)
+ def test_long_vs_short_flags(self):
+ flags.DEFINE_string('duplicate_answer_long', 'val', 'desc',
+ flag_values=self.global_FLAGS)
+ argv = ['flags_test', '--duplicate_answer=60', 'extra_arg']
+ args = self.global_FLAGS(argv)
+
+ self.assert_('duplicate_answer' not in self.global_FLAGS)
+ self.assert_(self.global_FLAGS.duplicate_answer_long, 60)
+
+ flags.DEFINE_integer('duplicate_answer', 60, 'desc',
+ flag_values=self.global_FLAGS)
+ self.assertEqual(self.global_FLAGS.duplicate_answer, 60)
+ self.assertEqual(self.global_FLAGS.duplicate_answer_long, 'val')
+
def test_flag_leak_left(self):
self.assertEqual(FLAGS.flags_unittest, 'foo')
FLAGS.flags_unittest = 'bar'
diff --git a/nova/tests/test_host_filter.py b/nova/tests/test_host_filter.py
index 31e40ae1d..c029d41e6 100644
--- a/nova/tests/test_host_filter.py
+++ b/nova/tests/test_host_filter.py
@@ -43,16 +43,16 @@ class HostFilterTestCase(test.TestCase):
# which means ... don't go above 10 hosts.
return {'host_name-description': 'XenServer %s' % multiplier,
'host_hostname': 'xs-%s' % multiplier,
- 'host_memory': {'total': 100,
- 'overhead': 10,
- 'free': 10 + multiplier * 10,
- 'free-computed': 10 + multiplier * 10},
+ 'host_memory_total': 100,
+ 'host_memory_overhead': 10,
+ 'host_memory_free': 10 + multiplier * 10,
+ 'host_memory_free-computed': 10 + multiplier * 10,
'host_other-config': {},
'host_ip_address': '192.168.1.%d' % (100 + multiplier),
'host_cpu_info': {},
- 'disk': {'available': 100 + multiplier * 100,
- 'total': 1000,
- 'used': 0},
+ 'disk_available': 100 + multiplier * 100,
+ 'disk_total': 1000,
+ 'disk_used': 0,
'host_uuid': 'xxx-%d' % multiplier,
'host_name-label': 'xs-%s' % multiplier}
@@ -131,12 +131,12 @@ class HostFilterTestCase(test.TestCase):
raw = ['or',
['and',
- ['<', '$compute.host_memory.free', 30],
- ['<', '$compute.disk.available', 300]
+ ['<', '$compute.host_memory_free', 30],
+ ['<', '$compute.disk_available', 300]
],
['and',
- ['>', '$compute.host_memory.free', 70],
- ['>', '$compute.disk.available', 700]
+ ['>', '$compute.host_memory_free', 70],
+ ['>', '$compute.disk_available', 700]
]
]
cooked = json.dumps(raw)
@@ -149,7 +149,7 @@ class HostFilterTestCase(test.TestCase):
self.assertEquals('host%02d' % index, host)
raw = ['not',
- ['=', '$compute.host_memory.free', 30],
+ ['=', '$compute.host_memory_free', 30],
]
cooked = json.dumps(raw)
hosts = driver.filter_hosts(self.zone_manager, cooked)
@@ -160,7 +160,7 @@ class HostFilterTestCase(test.TestCase):
for index, host in zip([1, 2, 4, 5, 6, 7, 8, 9, 10], just_hosts):
self.assertEquals('host%02d' % index, host)
- raw = ['in', '$compute.host_memory.free', 20, 40, 60, 80, 100]
+ raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100]
cooked = json.dumps(raw)
hosts = driver.filter_hosts(self.zone_manager, cooked)
diff --git a/nova/tests/test_ipv6.py b/nova/tests/test_ipv6.py
new file mode 100644
index 000000000..11dc2ec98
--- /dev/null
+++ b/nova/tests/test_ipv6.py
@@ -0,0 +1,60 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 OpenStack LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Test suite for IPv6."""
+
+from nova import flags
+from nova import ipv6
+from nova import log as logging
+from nova import test
+
+LOG = logging.getLogger('nova.tests.test_ipv6')
+
+FLAGS = flags.FLAGS
+
+import sys
+
+
+class IPv6RFC2462TestCase(test.TestCase):
+ """Unit tests for IPv6 rfc2462 backend operations."""
+ def setUp(self):
+ super(IPv6RFC2462TestCase, self).setUp()
+ self.flags(ipv6_backend='rfc2462')
+ ipv6.reset_backend()
+
+ def test_to_global(self):
+ addr = ipv6.to_global('2001:db8::', '02:16:3e:33:44:55', 'test')
+ self.assertEquals(addr, '2001:db8::16:3eff:fe33:4455')
+
+ def test_to_mac(self):
+ mac = ipv6.to_mac('2001:db8::216:3eff:fe33:4455')
+ self.assertEquals(mac, '00:16:3e:33:44:55')
+
+
+class IPv6AccountIdentiferTestCase(test.TestCase):
+ """Unit tests for IPv6 account_identifier backend operations."""
+ def setUp(self):
+ super(IPv6AccountIdentiferTestCase, self).setUp()
+ self.flags(ipv6_backend='account_identifier')
+ ipv6.reset_backend()
+
+ def test_to_global(self):
+ addr = ipv6.to_global('2001:db8::', '02:16:3e:33:44:55', 'test')
+ self.assertEquals(addr, '2001:db8::a94a:8fe5:ff33:4455')
+
+ def test_to_mac(self):
+ mac = ipv6.to_mac('2001:db8::a94a:8fe5:ff33:4455')
+ self.assertEquals(mac, '02:16:3e:33:44:55')
diff --git a/nova/tests/test_virt.py b/nova/tests/test_libvirt.py
index c4fcc21cc..d9316ab4f 100644
--- a/nova/tests/test_virt.py
+++ b/nova/tests/test_libvirt.py
@@ -32,7 +32,8 @@ from nova import utils
from nova.api.ec2 import cloud
from nova.auth import manager
from nova.compute import power_state
-from nova.virt import libvirt_conn
+from nova.virt.libvirt import connection
+from nova.virt.libvirt import firewall
libvirt = None
FLAGS = flags.FLAGS
@@ -83,7 +84,7 @@ class CacheConcurrencyTestCase(test.TestCase):
def test_same_fname_concurrency(self):
"""Ensures that the same fname cache runs at a sequentially"""
- conn = libvirt_conn.LibvirtConnection
+ conn = connection.LibvirtConnection
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
eventlet.spawn(conn._cache_image, _concurrency,
@@ -104,7 +105,7 @@ class CacheConcurrencyTestCase(test.TestCase):
def test_different_fname_concurrency(self):
"""Ensures that two different fname caches are concurrent"""
- conn = libvirt_conn.LibvirtConnection
+ conn = connection.LibvirtConnection
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
eventlet.spawn(conn._cache_image, _concurrency,
@@ -125,7 +126,7 @@ class CacheConcurrencyTestCase(test.TestCase):
class LibvirtConnTestCase(test.TestCase):
def setUp(self):
super(LibvirtConnTestCase, self).setUp()
- libvirt_conn._late_load_cheetah()
+ connection._late_load_cheetah()
self.flags(fake_call=True)
self.manager = manager.AuthManager()
@@ -172,8 +173,8 @@ class LibvirtConnTestCase(test.TestCase):
return False
global libvirt
libvirt = __import__('libvirt')
- libvirt_conn.libvirt = __import__('libvirt')
- libvirt_conn.libxml2 = __import__('libxml2')
+ connection.libvirt = __import__('libvirt')
+ connection.libxml2 = __import__('libxml2')
return True
def create_fake_libvirt_mock(self, **kwargs):
@@ -183,7 +184,7 @@ class LibvirtConnTestCase(test.TestCase):
class FakeLibvirtConnection(object):
pass
- # A fake libvirt_conn.IptablesFirewallDriver
+ # A fake connection.IptablesFirewallDriver
class FakeIptablesFirewallDriver(object):
def __init__(self, **kwargs):
@@ -199,11 +200,11 @@ class LibvirtConnTestCase(test.TestCase):
for key, val in kwargs.items():
fake.__setattr__(key, val)
- # Inevitable mocks for libvirt_conn.LibvirtConnection
- self.mox.StubOutWithMock(libvirt_conn.utils, 'import_class')
- libvirt_conn.utils.import_class(mox.IgnoreArg()).AndReturn(fakeip)
- self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn')
- libvirt_conn.LibvirtConnection._conn = fake
+ # Inevitable mocks for connection.LibvirtConnection
+ self.mox.StubOutWithMock(connection.utils, 'import_class')
+ connection.utils.import_class(mox.IgnoreArg()).AndReturn(fakeip)
+ self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
+ connection.LibvirtConnection._conn = fake
def create_service(self, **kwargs):
service_ref = {'host': kwargs.get('host', 'dummy'),
@@ -215,7 +216,7 @@ class LibvirtConnTestCase(test.TestCase):
return db.service_create(context.get_admin_context(), service_ref)
def test_preparing_xml_info(self):
- conn = libvirt_conn.LibvirtConnection(True)
+ conn = connection.LibvirtConnection(True)
instance_ref = db.instance_create(self.context, self.test_instance)
result = conn._prepare_xml_info(instance_ref, False)
@@ -230,7 +231,7 @@ class LibvirtConnTestCase(test.TestCase):
self.assertTrue(len(result['nics']) == 2)
def test_get_nic_for_xml_v4(self):
- conn = libvirt_conn.LibvirtConnection(True)
+ conn = connection.LibvirtConnection(True)
network, mapping = _create_network_info()[0]
self.flags(use_ipv6=False)
params = conn._get_nic_for_xml(network, mapping)['extra_params']
@@ -238,7 +239,7 @@ class LibvirtConnTestCase(test.TestCase):
self.assertTrue(params.find('PROJMASKV6') == -1)
def test_get_nic_for_xml_v6(self):
- conn = libvirt_conn.LibvirtConnection(True)
+ conn = connection.LibvirtConnection(True)
network, mapping = _create_network_info()[0]
self.flags(use_ipv6=True)
params = conn._get_nic_for_xml(network, mapping)['extra_params']
@@ -345,7 +346,7 @@ class LibvirtConnTestCase(test.TestCase):
def test_multi_nic(self):
instance_data = dict(self.test_instance)
network_info = _create_network_info(2)
- conn = libvirt_conn.LibvirtConnection(True)
+ conn = connection.LibvirtConnection(True)
instance_ref = db.instance_create(self.context, instance_data)
xml = conn.to_xml(instance_ref, False, network_info)
tree = xml_to_tree(xml)
@@ -376,7 +377,7 @@ class LibvirtConnTestCase(test.TestCase):
'instance_id': instance_ref['id']})
self.flags(libvirt_type='lxc')
- conn = libvirt_conn.LibvirtConnection(True)
+ conn = connection.LibvirtConnection(True)
uri = conn.get_uri()
self.assertEquals(uri, 'lxc:///')
@@ -482,7 +483,7 @@ class LibvirtConnTestCase(test.TestCase):
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
- conn = libvirt_conn.LibvirtConnection(True)
+ conn = connection.LibvirtConnection(True)
uri = conn.get_uri()
self.assertEquals(uri, expected_uri)
@@ -509,7 +510,7 @@ class LibvirtConnTestCase(test.TestCase):
FLAGS.libvirt_uri = testuri
for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems():
FLAGS.libvirt_type = libvirt_type
- conn = libvirt_conn.LibvirtConnection(True)
+ conn = connection.LibvirtConnection(True)
uri = conn.get_uri()
self.assertEquals(uri, testuri)
db.instance_destroy(user_context, instance_ref['id'])
@@ -533,13 +534,13 @@ class LibvirtConnTestCase(test.TestCase):
self.create_fake_libvirt_mock(getVersion=getVersion,
getType=getType,
listDomainsID=listDomainsID)
- self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection,
+ self.mox.StubOutWithMock(connection.LibvirtConnection,
'get_cpu_info')
- libvirt_conn.LibvirtConnection.get_cpu_info().AndReturn('cpuinfo')
+ connection.LibvirtConnection.get_cpu_info().AndReturn('cpuinfo')
# Start test
self.mox.ReplayAll()
- conn = libvirt_conn.LibvirtConnection(False)
+ conn = connection.LibvirtConnection(False)
conn.update_available_resource(self.context, 'dummy')
service_ref = db.service_get(self.context, service_ref['id'])
compute_node = service_ref['compute_node'][0]
@@ -573,7 +574,7 @@ class LibvirtConnTestCase(test.TestCase):
self.create_fake_libvirt_mock()
self.mox.ReplayAll()
- conn = libvirt_conn.LibvirtConnection(False)
+ conn = connection.LibvirtConnection(False)
self.assertRaises(exception.ComputeServiceUnavailable,
conn.update_available_resource,
self.context, 'dummy')
@@ -608,7 +609,7 @@ class LibvirtConnTestCase(test.TestCase):
# Start test
self.mox.ReplayAll()
try:
- conn = libvirt_conn.LibvirtConnection(False)
+ conn = connection.LibvirtConnection(False)
conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
conn.firewall_driver.setattr('instance_filter_exists', fake_none)
@@ -657,7 +658,7 @@ class LibvirtConnTestCase(test.TestCase):
# Start test
self.mox.ReplayAll()
- conn = libvirt_conn.LibvirtConnection(False)
+ conn = connection.LibvirtConnection(False)
self.assertRaises(libvirt.libvirtError,
conn._live_migration,
self.context, instance_ref, 'dest', '',
@@ -686,7 +687,7 @@ class LibvirtConnTestCase(test.TestCase):
# Start test
self.mox.ReplayAll()
- conn = libvirt_conn.LibvirtConnection(False)
+ conn = connection.LibvirtConnection(False)
conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
@@ -705,12 +706,12 @@ class LibvirtConnTestCase(test.TestCase):
try:
conn.spawn(instance, network_info)
except Exception, e:
- count = (0 <= e.message.find('Unexpected method call'))
+ count = (0 <= str(e.message).find('Unexpected method call'))
self.assertTrue(count)
def test_get_host_ip_addr(self):
- conn = libvirt_conn.LibvirtConnection(False)
+ conn = connection.LibvirtConnection(False)
ip = conn.get_host_ip_addr()
self.assertEquals(ip, FLAGS.my_ip)
@@ -734,7 +735,7 @@ class IptablesFirewallTestCase(test.TestCase):
class FakeLibvirtConnection(object):
pass
self.fake_libvirt_connection = FakeLibvirtConnection()
- self.fw = libvirt_conn.IptablesFirewallDriver(
+ self.fw = firewall.IptablesFirewallDriver(
get_connection=lambda: self.fake_libvirt_connection)
def tearDown(self):
@@ -912,7 +913,7 @@ class IptablesFirewallTestCase(test.TestCase):
self.assertEquals(len(rulesv4), 2)
self.assertEquals(len(rulesv6), 0)
- def multinic_iptables_test(self):
+ def test_multinic_iptables(self):
ipv4_rules_per_network = 2
ipv6_rules_per_network = 3
networks_count = 5
@@ -932,6 +933,16 @@ class IptablesFirewallTestCase(test.TestCase):
self.assertEquals(ipv6_network_rules,
ipv6_rules_per_network * networks_count)
+ def test_do_refresh_security_group_rules(self):
+ instance_ref = self._create_instance_ref()
+ self.mox.StubOutWithMock(self.fw,
+ 'add_filters_for_instance',
+ use_mock_anything=True)
+ self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg())
+ self.fw.instances[instance_ref['id']] = instance_ref
+ self.mox.ReplayAll()
+ self.fw.do_refresh_security_group_rules("fake")
+
class NWFilterTestCase(test.TestCase):
def setUp(self):
@@ -948,7 +959,7 @@ class NWFilterTestCase(test.TestCase):
self.fake_libvirt_connection = Mock()
- self.fw = libvirt_conn.NWFilterFirewall(
+ self.fw = firewall.NWFilterFirewall(
lambda: self.fake_libvirt_connection)
def tearDown(self):
diff --git a/nova/tests/test_notifier.py b/nova/tests/test_notifier.py
new file mode 100644
index 000000000..b6b0fcc68
--- /dev/null
+++ b/nova/tests/test_notifier.py
@@ -0,0 +1,117 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import nova
+
+from nova import context
+from nova import flags
+from nova import rpc
+import nova.notifier.api
+from nova.notifier.api import notify
+from nova.notifier import no_op_notifier
+from nova.notifier import rabbit_notifier
+from nova import test
+
+import stubout
+
+
+class NotifierTestCase(test.TestCase):
+ """Test case for notifications"""
+ def setUp(self):
+ super(NotifierTestCase, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(NotifierTestCase, self).tearDown()
+
+ def test_send_notification(self):
+ self.notify_called = False
+
+ def mock_notify(cls, *args):
+ self.notify_called = True
+
+ self.stubs.Set(nova.notifier.no_op_notifier, 'notify',
+ mock_notify)
+
+ class Mock(object):
+ pass
+ notify('publisher_id', 'event_type',
+ nova.notifier.api.WARN, dict(a=3))
+ self.assertEqual(self.notify_called, True)
+
+ def test_verify_message_format(self):
+ """A test to ensure changing the message format is prohibitively
+ annoying"""
+
+ def message_assert(message):
+ fields = [('publisher_id', 'publisher_id'),
+ ('event_type', 'event_type'),
+ ('priority', 'WARN'),
+ ('payload', dict(a=3))]
+ for k, v in fields:
+ self.assertEqual(message[k], v)
+ self.assertTrue(len(message['message_id']) > 0)
+ self.assertTrue(len(message['timestamp']) > 0)
+
+ self.stubs.Set(nova.notifier.no_op_notifier, 'notify',
+ message_assert)
+ notify('publisher_id', 'event_type',
+ nova.notifier.api.WARN, dict(a=3))
+
+ def test_send_rabbit_notification(self):
+ self.stubs.Set(nova.flags.FLAGS, 'notification_driver',
+ 'nova.notifier.rabbit_notifier')
+ self.mock_cast = False
+
+ def mock_cast(cls, *args):
+ self.mock_cast = True
+
+ class Mock(object):
+ pass
+
+ self.stubs.Set(nova.rpc, 'cast', mock_cast)
+ notify('publisher_id', 'event_type',
+ nova.notifier.api.WARN, dict(a=3))
+
+ self.assertEqual(self.mock_cast, True)
+
+ def test_invalid_priority(self):
+ def mock_cast(cls, *args):
+ pass
+
+ class Mock(object):
+ pass
+
+ self.stubs.Set(nova.rpc, 'cast', mock_cast)
+ self.assertRaises(nova.notifier.api.BadPriorityException,
+ notify, 'publisher_id',
+ 'event_type', 'not a priority', dict(a=3))
+
+ def test_rabbit_priority_queue(self):
+ self.stubs.Set(nova.flags.FLAGS, 'notification_driver',
+ 'nova.notifier.rabbit_notifier')
+ self.stubs.Set(nova.flags.FLAGS, 'notification_topic',
+ 'testnotify')
+
+ self.test_topic = None
+
+ def mock_cast(context, topic, msg):
+ self.test_topic = topic
+
+ self.stubs.Set(nova.rpc, 'cast', mock_cast)
+ notify('publisher_id',
+ 'event_type', 'DEBUG', dict(a=3))
+ self.assertEqual(self.test_topic, 'testnotify.debug')
diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py
index 39a123158..916fca55e 100644
--- a/nova/tests/test_quota.py
+++ b/nova/tests/test_quota.py
@@ -96,28 +96,121 @@ class QuotaTestCase(test.TestCase):
num_instances = quota.allowed_instances(self.context, 100,
self._get_instance_type('m1.small'))
self.assertEqual(num_instances, 2)
- db.quota_create(self.context, {'project_id': self.project.id,
- 'instances': 10})
+ db.quota_create(self.context, self.project.id, 'instances', 10)
num_instances = quota.allowed_instances(self.context, 100,
self._get_instance_type('m1.small'))
self.assertEqual(num_instances, 4)
- db.quota_update(self.context, self.project.id, {'cores': 100})
+ db.quota_create(self.context, self.project.id, 'cores', 100)
num_instances = quota.allowed_instances(self.context, 100,
self._get_instance_type('m1.small'))
self.assertEqual(num_instances, 10)
+ db.quota_create(self.context, self.project.id, 'ram', 3 * 2048)
+ num_instances = quota.allowed_instances(self.context, 100,
+ self._get_instance_type('m1.small'))
+ self.assertEqual(num_instances, 3)
# metadata_items
too_many_items = FLAGS.quota_metadata_items + 1000
num_metadata_items = quota.allowed_metadata_items(self.context,
too_many_items)
self.assertEqual(num_metadata_items, FLAGS.quota_metadata_items)
- db.quota_update(self.context, self.project.id, {'metadata_items': 5})
+ db.quota_create(self.context, self.project.id, 'metadata_items', 5)
num_metadata_items = quota.allowed_metadata_items(self.context,
too_many_items)
self.assertEqual(num_metadata_items, 5)
# Cleanup
- db.quota_destroy(self.context, self.project.id)
+ db.quota_destroy_all_by_project(self.context, self.project.id)
+
+ def test_unlimited_instances(self):
+ FLAGS.quota_instances = 2
+ FLAGS.quota_ram = -1
+ FLAGS.quota_cores = -1
+ instance_type = self._get_instance_type('m1.small')
+ num_instances = quota.allowed_instances(self.context, 100,
+ instance_type)
+ self.assertEqual(num_instances, 2)
+ db.quota_create(self.context, self.project.id, 'instances', None)
+ num_instances = quota.allowed_instances(self.context, 100,
+ instance_type)
+ self.assertEqual(num_instances, 100)
+ num_instances = quota.allowed_instances(self.context, 101,
+ instance_type)
+ self.assertEqual(num_instances, 101)
+
+ def test_unlimited_ram(self):
+ FLAGS.quota_instances = -1
+ FLAGS.quota_ram = 2 * 2048
+ FLAGS.quota_cores = -1
+ instance_type = self._get_instance_type('m1.small')
+ num_instances = quota.allowed_instances(self.context, 100,
+ instance_type)
+ self.assertEqual(num_instances, 2)
+ db.quota_create(self.context, self.project.id, 'ram', None)
+ num_instances = quota.allowed_instances(self.context, 100,
+ instance_type)
+ self.assertEqual(num_instances, 100)
+ num_instances = quota.allowed_instances(self.context, 101,
+ instance_type)
+ self.assertEqual(num_instances, 101)
+
+ def test_unlimited_cores(self):
+ FLAGS.quota_instances = -1
+ FLAGS.quota_ram = -1
+ FLAGS.quota_cores = 2
+ instance_type = self._get_instance_type('m1.small')
+ num_instances = quota.allowed_instances(self.context, 100,
+ instance_type)
+ self.assertEqual(num_instances, 2)
+ db.quota_create(self.context, self.project.id, 'cores', None)
+ num_instances = quota.allowed_instances(self.context, 100,
+ instance_type)
+ self.assertEqual(num_instances, 100)
+ num_instances = quota.allowed_instances(self.context, 101,
+ instance_type)
+ self.assertEqual(num_instances, 101)
+
+ def test_unlimited_volumes(self):
+ FLAGS.quota_volumes = 10
+ FLAGS.quota_gigabytes = -1
+ volumes = quota.allowed_volumes(self.context, 100, 1)
+ self.assertEqual(volumes, 10)
+ db.quota_create(self.context, self.project.id, 'volumes', None)
+ volumes = quota.allowed_volumes(self.context, 100, 1)
+ self.assertEqual(volumes, 100)
+ volumes = quota.allowed_volumes(self.context, 101, 1)
+ self.assertEqual(volumes, 101)
+
+ def test_unlimited_gigabytes(self):
+ FLAGS.quota_volumes = -1
+ FLAGS.quota_gigabytes = 10
+ volumes = quota.allowed_volumes(self.context, 100, 1)
+ self.assertEqual(volumes, 10)
+ db.quota_create(self.context, self.project.id, 'gigabytes', None)
+ volumes = quota.allowed_volumes(self.context, 100, 1)
+ self.assertEqual(volumes, 100)
+ volumes = quota.allowed_volumes(self.context, 101, 1)
+ self.assertEqual(volumes, 101)
+
+ def test_unlimited_floating_ips(self):
+ FLAGS.quota_floating_ips = 10
+ floating_ips = quota.allowed_floating_ips(self.context, 100)
+ self.assertEqual(floating_ips, 10)
+ db.quota_create(self.context, self.project.id, 'floating_ips', None)
+ floating_ips = quota.allowed_floating_ips(self.context, 100)
+ self.assertEqual(floating_ips, 100)
+ floating_ips = quota.allowed_floating_ips(self.context, 101)
+ self.assertEqual(floating_ips, 101)
+
+ def test_unlimited_metadata_items(self):
+ FLAGS.quota_metadata_items = 10
+ items = quota.allowed_metadata_items(self.context, 100)
+ self.assertEqual(items, 10)
+ db.quota_create(self.context, self.project.id, 'metadata_items', None)
+ items = quota.allowed_metadata_items(self.context, 100)
+ self.assertEqual(items, 100)
+ items = quota.allowed_metadata_items(self.context, 101)
+ self.assertEqual(items, 101)
def test_too_many_instances(self):
instance_ids = []
@@ -203,10 +296,47 @@ class QuotaTestCase(test.TestCase):
image_id='fake',
metadata=metadata)
- def test_allowed_injected_files(self):
- self.assertEqual(
- quota.allowed_injected_files(self.context),
- FLAGS.quota_max_injected_files)
+ def test_default_allowed_injected_files(self):
+ FLAGS.quota_max_injected_files = 55
+ self.assertEqual(quota.allowed_injected_files(self.context, 100), 55)
+
+ def test_overridden_allowed_injected_files(self):
+ FLAGS.quota_max_injected_files = 5
+ db.quota_create(self.context, self.project.id, 'injected_files', 77)
+ self.assertEqual(quota.allowed_injected_files(self.context, 100), 77)
+
+ def test_unlimited_default_allowed_injected_files(self):
+ FLAGS.quota_max_injected_files = -1
+ self.assertEqual(quota.allowed_injected_files(self.context, 100), 100)
+
+ def test_unlimited_db_allowed_injected_files(self):
+ FLAGS.quota_max_injected_files = 5
+ db.quota_create(self.context, self.project.id, 'injected_files', None)
+ self.assertEqual(quota.allowed_injected_files(self.context, 100), 100)
+
+ def test_default_allowed_injected_file_content_bytes(self):
+ FLAGS.quota_max_injected_file_content_bytes = 12345
+ limit = quota.allowed_injected_file_content_bytes(self.context, 23456)
+ self.assertEqual(limit, 12345)
+
+ def test_overridden_allowed_injected_file_content_bytes(self):
+ FLAGS.quota_max_injected_file_content_bytes = 12345
+ db.quota_create(self.context, self.project.id,
+ 'injected_file_content_bytes', 5678)
+ limit = quota.allowed_injected_file_content_bytes(self.context, 23456)
+ self.assertEqual(limit, 5678)
+
+ def test_unlimited_default_allowed_injected_file_content_bytes(self):
+ FLAGS.quota_max_injected_file_content_bytes = -1
+ limit = quota.allowed_injected_file_content_bytes(self.context, 23456)
+ self.assertEqual(limit, 23456)
+
+ def test_unlimited_db_allowed_injected_file_content_bytes(self):
+ FLAGS.quota_max_injected_file_content_bytes = 12345
+ db.quota_create(self.context, self.project.id,
+ 'injected_file_content_bytes', None)
+ limit = quota.allowed_injected_file_content_bytes(self.context, 23456)
+ self.assertEqual(limit, 23456)
def _create_with_injected_files(self, files):
api = compute.API(image_service=self.StubImageService())
@@ -233,11 +363,6 @@ class QuotaTestCase(test.TestCase):
self.assertRaises(quota.QuotaError,
self._create_with_injected_files, files)
- def test_allowed_injected_file_content_bytes(self):
- self.assertEqual(
- quota.allowed_injected_file_content_bytes(self.context),
- FLAGS.quota_max_injected_file_content_bytes)
-
def test_max_injected_file_content_bytes(self):
max = FLAGS.quota_max_injected_file_content_bytes
content = ''.join(['a' for i in xrange(max)])
diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py
index 968ef9d6c..54b3f80fb 100644
--- a/nova/tests/test_scheduler.py
+++ b/nova/tests/test_scheduler.py
@@ -912,7 +912,8 @@ class SimpleDriverTestCase(test.TestCase):
class FakeZone(object):
- def __init__(self, api_url, username, password):
+ def __init__(self, id, api_url, username, password):
+ self.id = id
self.api_url = api_url
self.username = username
self.password = password
@@ -920,7 +921,7 @@ class FakeZone(object):
def zone_get_all(context):
return [
- FakeZone('http://example.com', 'bob', 'xxx'),
+ FakeZone(1, 'http://example.com', 'bob', 'xxx'),
]
@@ -1037,7 +1038,7 @@ class FakeNovaClient(object):
class DynamicNovaClientTest(test.TestCase):
def test_issue_novaclient_command_found(self):
- zone = FakeZone('http://example.com', 'bob', 'xxx')
+ zone = FakeZone(1, 'http://example.com', 'bob', 'xxx')
self.assertEquals(api._issue_novaclient_command(
FakeNovaClient(FakeServerCollection()),
zone, "servers", "get", 100).a, 10)
@@ -1051,7 +1052,7 @@ class DynamicNovaClientTest(test.TestCase):
zone, "servers", "pause", 100), None)
def test_issue_novaclient_command_not_found(self):
- zone = FakeZone('http://example.com', 'bob', 'xxx')
+ zone = FakeZone(1, 'http://example.com', 'bob', 'xxx')
self.assertEquals(api._issue_novaclient_command(
FakeNovaClient(FakeEmptyServerCollection()),
zone, "servers", "get", 100), None)
@@ -1063,3 +1064,55 @@ class DynamicNovaClientTest(test.TestCase):
self.assertEquals(api._issue_novaclient_command(
FakeNovaClient(FakeEmptyServerCollection()),
zone, "servers", "any", "name"), None)
+
+
+class FakeZonesProxy(object):
+ def do_something(*args, **kwargs):
+ return 42
+
+ def raises_exception(*args, **kwargs):
+ raise Exception('testing')
+
+
+class FakeNovaClientOpenStack(object):
+ def __init__(self, *args, **kwargs):
+ self.zones = FakeZonesProxy()
+
+ def authenticate(self):
+ pass
+
+
+class CallZoneMethodTest(test.TestCase):
+ def setUp(self):
+ super(CallZoneMethodTest, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ self.stubs.Set(db, 'zone_get_all', zone_get_all)
+ self.stubs.Set(novaclient, 'OpenStack', FakeNovaClientOpenStack)
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(CallZoneMethodTest, self).tearDown()
+
+ def test_call_zone_method(self):
+ context = {}
+ method = 'do_something'
+ results = api.call_zone_method(context, method)
+ expected = [(1, 42)]
+ self.assertEqual(expected, results)
+
+ def test_call_zone_method_not_present(self):
+ context = {}
+ method = 'not_present'
+ self.assertRaises(AttributeError, api.call_zone_method,
+ context, method)
+
+ def test_call_zone_method_generates_exception(self):
+ context = {}
+ method = 'raises_exception'
+ results = api.call_zone_method(context, method)
+
+ # FIXME(sirp): for now the _error_trap code is catching errors and
+ # converting them to a ("ERROR", "string") tuples. The code (and this
+ # test) should eventually handle real exceptions.
+ expected = [(1, ('ERROR', 'testing'))]
+ self.assertEqual(expected, results)
diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py
index e7b5c826e..8f7e83c3e 100644
--- a/nova/tests/test_utils.py
+++ b/nova/tests/test_utils.py
@@ -17,9 +17,9 @@
import os
import tempfile
+from nova import exception
from nova import test
from nova import utils
-from nova import exception
class ExecuteTestCase(test.TestCase):
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 375480a2e..be1e35697 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -16,7 +16,9 @@
"""Test suite for XenAPI."""
+import eventlet
import functools
+import json
import os
import re
import stubout
@@ -197,6 +199,28 @@ class XenAPIVMTestCase(test.TestCase):
self.context = context.RequestContext('fake', 'fake', False)
self.conn = xenapi_conn.get_connection(False)
+ def test_parallel_builds(self):
+ stubs.stubout_loopingcall_delay(self.stubs)
+
+ def _do_build(id, proj, user, *args):
+ values = {
+ 'id': id,
+ 'project_id': proj,
+ 'user_id': user,
+ 'image_id': 1,
+ 'kernel_id': 2,
+ 'ramdisk_id': 3,
+ 'instance_type_id': '3', # m1.large
+ 'mac_address': 'aa:bb:cc:dd:ee:ff',
+ 'os_type': 'linux'}
+ instance = db.instance_create(self.context, values)
+ self.conn.spawn(instance)
+
+ gt1 = eventlet.spawn(_do_build, 1, self.project.id, self.user.id)
+ gt2 = eventlet.spawn(_do_build, 2, self.project.id, self.user.id)
+ gt1.wait()
+ gt2.wait()
+
def test_list_instances_0(self):
instances = self.conn.list_instances()
self.assertEquals(instances, [])
@@ -665,3 +689,52 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_VHD
self.fake_instance.kernel_id = None
self.assert_disk_type(vm_utils.ImageType.DISK_VHD)
+
+
+class FakeXenApi(object):
+ """Fake XenApi for testing HostState."""
+
+ class FakeSR(object):
+ def get_record(self, ref):
+ return {'virtual_allocation': 10000,
+ 'physical_utilisation': 20000}
+
+ SR = FakeSR()
+
+
+class FakeSession(object):
+ """Fake Session class for HostState testing."""
+
+ def async_call_plugin(self, *args):
+ return None
+
+ def wait_for_task(self, *args):
+ vm = {'total': 10,
+ 'overhead': 20,
+ 'free': 30,
+ 'free-computed': 40}
+ return json.dumps({'host_memory': vm})
+
+ def get_xenapi(self):
+ return FakeXenApi()
+
+
+class HostStateTestCase(test.TestCase):
+ """Tests HostState, which holds metrics from XenServer that get
+ reported back to the Schedulers."""
+
+ def _fake_safe_find_sr(self, session):
+ """None SR ref since we're ignoring it in FakeSR."""
+ return None
+
+ def test_host_state(self):
+ self.stubs = stubout.StubOutForTesting()
+ self.stubs.Set(vm_utils, 'safe_find_sr', self._fake_safe_find_sr)
+ host_state = xenapi_conn.HostState(FakeSession())
+ stats = host_state._stats
+ self.assertEquals(stats['disk_total'], 10000)
+ self.assertEquals(stats['disk_used'], 20000)
+ self.assertEquals(stats['host_memory_total'], 10)
+ self.assertEquals(stats['host_memory_overhead'], 20)
+ self.assertEquals(stats['host_memory_free'], 30)
+ self.assertEquals(stats['host_memory_free_computed'], 40)
diff --git a/nova/tests/test_zone_aware_scheduler.py b/nova/tests/test_zone_aware_scheduler.py
new file mode 100644
index 000000000..fdcde34c9
--- /dev/null
+++ b/nova/tests/test_zone_aware_scheduler.py
@@ -0,0 +1,119 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Zone Aware Scheduler.
+"""
+
+from nova import test
+from nova.scheduler import driver
+from nova.scheduler import zone_aware_scheduler
+from nova.scheduler import zone_manager
+
+
+class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler):
+ def filter_hosts(self, num, specs):
+ # NOTE(sirp): this is returning [(hostname, services)]
+ return self.zone_manager.service_states.items()
+
+ def weigh_hosts(self, num, specs, hosts):
+ fake_weight = 99
+ weighted = []
+ for hostname, caps in hosts:
+ weighted.append(dict(weight=fake_weight, name=hostname))
+ return weighted
+
+
+class FakeZoneManager(zone_manager.ZoneManager):
+ def __init__(self):
+ self.service_states = {
+ 'host1': {
+ 'compute': {'ram': 1000}
+ },
+ 'host2': {
+ 'compute': {'ram': 2000}
+ },
+ 'host3': {
+ 'compute': {'ram': 3000}
+ }
+ }
+
+
+class FakeEmptyZoneManager(zone_manager.ZoneManager):
+ def __init__(self):
+ self.service_states = {}
+
+
+def fake_empty_call_zone_method(context, method, specs):
+ return []
+
+
+def fake_call_zone_method(context, method, specs):
+ return [
+ ('zone1', [
+ dict(weight=1, blob='AAAAAAA'),
+ dict(weight=111, blob='BBBBBBB'),
+ dict(weight=112, blob='CCCCCCC'),
+ dict(weight=113, blob='DDDDDDD'),
+ ]),
+ ('zone2', [
+ dict(weight=120, blob='EEEEEEE'),
+ dict(weight=2, blob='FFFFFFF'),
+ dict(weight=122, blob='GGGGGGG'),
+ dict(weight=123, blob='HHHHHHH'),
+ ]),
+ ('zone3', [
+ dict(weight=130, blob='IIIIIII'),
+ dict(weight=131, blob='JJJJJJJ'),
+ dict(weight=132, blob='KKKKKKK'),
+ dict(weight=3, blob='LLLLLLL'),
+ ]),
+ ]
+
+
+class ZoneAwareSchedulerTestCase(test.TestCase):
+ """Test case for Zone Aware Scheduler."""
+
+ def test_zone_aware_scheduler(self):
+ """
+ Create a nested set of FakeZones, ensure that a select call returns the
+ appropriate build plan.
+ """
+ sched = FakeZoneAwareScheduler()
+ self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
+
+ zm = FakeZoneManager()
+ sched.set_zone_manager(zm)
+
+ fake_context = {}
+ build_plan = sched.select(fake_context, {})
+
+ self.assertEqual(15, len(build_plan))
+
+ hostnames = [plan_item['name']
+ for plan_item in build_plan if 'name' in plan_item]
+ self.assertEqual(3, len(hostnames))
+
+ def test_empty_zone_aware_scheduler(self):
+ """
+ Ensure empty hosts & child_zones result in NoValidHosts exception.
+ """
+ sched = FakeZoneAwareScheduler()
+ self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method)
+
+ zm = FakeEmptyZoneManager()
+ sched.set_zone_manager(zm)
+
+ fake_context = {}
+ self.assertRaises(driver.NoValidHost, sched.schedule, fake_context, {})
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index 205f6c902..4833ccb07 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -16,6 +16,7 @@
"""Stubouts, mocks and fixtures for the test suite"""
+import eventlet
from nova.virt import xenapi_conn
from nova.virt.xenapi import fake
from nova.virt.xenapi import volume_utils
@@ -28,29 +29,6 @@ def stubout_instance_snapshot(stubs):
@classmethod
def fake_fetch_image(cls, session, instance_id, image, user, project,
type):
- # Stubout wait_for_task
- def fake_wait_for_task(self, task, id):
- class FakeEvent:
-
- def send(self, value):
- self.rv = value
-
- def wait(self):
- return self.rv
-
- done = FakeEvent()
- self._poll_task(id, task, done)
- rv = done.wait()
- return rv
-
- def fake_loop(self):
- pass
-
- stubs.Set(xenapi_conn.XenAPISession, 'wait_for_task',
- fake_wait_for_task)
-
- stubs.Set(xenapi_conn.XenAPISession, '_stop_loop', fake_loop)
-
from nova.virt.xenapi.fake import create_vdi
name_label = "instance-%s" % instance_id
#TODO: create fake SR record
@@ -63,11 +41,6 @@ def stubout_instance_snapshot(stubs):
stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image)
- def fake_parse_xmlrpc_value(val):
- return val
-
- stubs.Set(xenapi_conn, '_parse_xmlrpc_value', fake_parse_xmlrpc_value)
-
def fake_wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
original_parent_uuid):
from nova.virt.xenapi.fake import create_vdi
@@ -144,6 +117,16 @@ def stubout_loopingcall_start(stubs):
stubs.Set(utils.LoopingCall, 'start', fake_start)
+def stubout_loopingcall_delay(stubs):
+ def fake_start(self, interval, now=True):
+ self._running = True
+ eventlet.sleep(1)
+ self.f(*self.args, **self.kw)
+ # This would fail before parallel xenapi calls were fixed
+ assert self._running == False
+ stubs.Set(utils.LoopingCall, 'start', fake_start)
+
+
class FakeSessionForVMTests(fake.SessionBase):
""" Stubs out a XenAPISession for VM tests """
def __init__(self, uri):
diff --git a/nova/utils.py b/nova/utils.py
index 80bf1197f..361fc9873 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -232,9 +232,12 @@ def default_flagfile(filename='nova.conf'):
# turn relative filename into an absolute path
script_dir = os.path.dirname(inspect.stack()[-1][1])
filename = os.path.abspath(os.path.join(script_dir, filename))
- if os.path.exists(filename):
- flagfile = ['--flagfile=%s' % filename]
- sys.argv = sys.argv[:1] + flagfile + sys.argv[1:]
+ if not os.path.exists(filename):
+ filename = "./nova.conf"
+ if not os.path.exists(filename):
+ filename = '/etc/nova/nova.conf'
+ flagfile = ['--flagfile=%s' % filename]
+ sys.argv = sys.argv[:1] + flagfile + sys.argv[1:]
def debug(arg):
@@ -303,26 +306,6 @@ def get_my_linklocal(interface):
" :%(ex)s") % locals())
-def to_global_ipv6(prefix, mac):
- try:
- mac64 = netaddr.EUI(mac).eui64().words
- int_addr = int(''.join(['%02x' % i for i in mac64]), 16)
- mac64_addr = netaddr.IPAddress(int_addr)
- maskIP = netaddr.IPNetwork(prefix).ip
- return (mac64_addr ^ netaddr.IPAddress('::0200:0:0:0') | maskIP).\
- format()
- except TypeError:
- raise TypeError(_('Bad mac for to_global_ipv6: %s') % mac)
-
-
-def to_mac(ipv6_address):
- address = netaddr.IPAddress(ipv6_address)
- mask1 = netaddr.IPAddress('::ffff:ffff:ffff:ffff')
- mask2 = netaddr.IPAddress('::0200:0:0:0')
- mac64 = netaddr.EUI(int(address & mask1 ^ mask2)).words
- return ':'.join(['%02x' % i for i in mac64[0:3] + mac64[5:8]])
-
-
def utcnow():
"""Overridable version of datetime.datetime.utcnow."""
if utcnow.override_time:
@@ -459,6 +442,8 @@ class LoopingCall(object):
try:
while self._running:
self.f(*self.args, **self.kw)
+ if not self._running:
+ break
greenthread.sleep(interval)
except LoopingCallDone, e:
self.stop()
diff --git a/nova/virt/connection.py b/nova/virt/connection.py
index 99a8849f1..aeec17c98 100644
--- a/nova/virt/connection.py
+++ b/nova/virt/connection.py
@@ -27,9 +27,9 @@ from nova import utils
from nova.virt import driver
from nova.virt import fake
from nova.virt import hyperv
-from nova.virt import libvirt_conn
from nova.virt import vmwareapi_conn
from nova.virt import xenapi_conn
+from nova.virt.libvirt import connection as libvirt_conn
LOG = logging.getLogger("nova.virt.connection")
diff --git a/nova/virt/disk.py b/nova/virt/disk.py
index ddea1a1f7..f8aea1f34 100644
--- a/nova/virt/disk.py
+++ b/nova/virt/disk.py
@@ -81,34 +81,36 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False):
else:
mapped_device = device
- # We can only loopback mount raw images. If the device isn't there,
- # it's normally because it's a .vmdk or a .vdi etc
- if not os.path.exists(mapped_device):
- raise exception.Error('Mapped device was not found (we can'
- ' only inject raw disk images): %s' %
- mapped_device)
-
- # Configure ext2fs so that it doesn't auto-check every N boots
- out, err = utils.execute('sudo', 'tune2fs',
- '-c', 0, '-i', 0, mapped_device)
-
- tmpdir = tempfile.mkdtemp()
try:
- # mount loopback to dir
- out, err = utils.execute(
- 'sudo', 'mount', mapped_device, tmpdir)
- if err:
- raise exception.Error(_('Failed to mount filesystem: %s')
- % err)
-
+ # We can only loopback mount raw images. If the device isn't there,
+ # it's normally because it's a .vmdk or a .vdi etc
+ if not os.path.exists(mapped_device):
+ raise exception.Error('Mapped device was not found (we can'
+ ' only inject raw disk images): %s' %
+ mapped_device)
+
+ # Configure ext2fs so that it doesn't auto-check every N boots
+ out, err = utils.execute('sudo', 'tune2fs',
+ '-c', 0, '-i', 0, mapped_device)
+
+ tmpdir = tempfile.mkdtemp()
try:
- inject_data_into_fs(tmpdir, key, net, utils.execute)
+ # mount loopback to dir
+ out, err = utils.execute(
+ 'sudo', 'mount', mapped_device, tmpdir)
+ if err:
+ raise exception.Error(_('Failed to mount filesystem: %s')
+ % err)
+
+ try:
+ inject_data_into_fs(tmpdir, key, net, utils.execute)
+ finally:
+ # unmount device
+ utils.execute('sudo', 'umount', mapped_device)
finally:
- # unmount device
- utils.execute('sudo', 'umount', mapped_device)
+ # remove temporary directory
+ utils.execute('rmdir', tmpdir)
finally:
- # remove temporary directory
- utils.execute('rmdir', tmpdir)
if not partition is None:
# remove partitions
utils.execute('sudo', 'kpartx', '-d', device)
diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py
index 9026e737e..1142e97a4 100644
--- a/nova/virt/hyperv.py
+++ b/nova/virt/hyperv.py
@@ -486,3 +486,11 @@ class HyperVConnection(driver.ComputeDriver):
def update_available_resource(self, ctxt, host):
"""This method is supported only by libvirt."""
return
+
+ def update_host_status(self):
+ """See xenapi_conn.py implementation."""
+ pass
+
+ def get_host_stats(self, refresh=False):
+ """See xenapi_conn.py implementation."""
+ pass
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 2e3f2ee4d..02c898fda 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -21,19 +21,10 @@
Handling of VM disk images.
"""
-import os.path
-import shutil
-import sys
-import time
-import urllib2
-import urlparse
-
from nova import context
from nova import flags
from nova import log as logging
from nova import utils
-from nova.auth import manager
-from nova.auth import signer
FLAGS = flags.FLAGS
@@ -52,66 +43,6 @@ def fetch(image_id, path, _user, _project):
return metadata
-# NOTE(vish): The methods below should be unnecessary, but I'm leaving
-# them in case the glance client does not work on windows.
-def _fetch_image_no_curl(url, path, headers):
- request = urllib2.Request(url)
- for (k, v) in headers.iteritems():
- request.add_header(k, v)
-
- def urlretrieve(urlfile, fpath):
- chunk = 1 * 1024 * 1024
- f = open(fpath, "wb")
- while 1:
- data = urlfile.read(chunk)
- if not data:
- break
- f.write(data)
-
- urlopened = urllib2.urlopen(request)
- urlretrieve(urlopened, path)
- LOG.debug(_("Finished retreving %(url)s -- placed in %(path)s") % locals())
-
-
-def _fetch_s3_image(image, path, user, project):
- url = image_url(image)
-
- # This should probably move somewhere else, like e.g. a download_as
- # method on User objects and at the same time get rewritten to use
- # a web client.
- headers = {}
- headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
-
- (_, _, url_path, _, _, _) = urlparse.urlparse(url)
- access = manager.AuthManager().get_access_key(user, project)
- signature = signer.Signer(user.secret.encode()).s3_authorization(headers,
- 'GET',
- url_path)
- headers['Authorization'] = 'AWS %s:%s' % (access, signature)
-
- if sys.platform.startswith('win'):
- return _fetch_image_no_curl(url, path, headers)
- else:
- cmd = ['/usr/bin/curl', '--fail', '--silent', url]
- for (k, v) in headers.iteritems():
- cmd += ['-H', '\'%s: %s\'' % (k, v)]
-
- cmd += ['-o', path]
- return utils.execute(*cmd)
-
-
-def _fetch_local_image(image, path, user, project):
- source = _image_path(os.path.join(image, 'image'))
- if sys.platform.startswith('win'):
- return shutil.copy(source, path)
- else:
- return utils.execute('cp', source, path)
-
-
-def _image_path(path):
- return os.path.join(FLAGS.images_path, path)
-
-
# TODO(vish): xenapi should use the glance client code directly instead
# of retrieving the image using this method.
def image_url(image):
diff --git a/nova/virt/libvirt/__init__.py b/nova/virt/libvirt/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/nova/virt/libvirt/__init__.py
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt/connection.py
index 92d580314..c814f658b 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt/connection.py
@@ -66,20 +66,23 @@ from nova.compute import power_state
from nova.virt import disk
from nova.virt import driver
from nova.virt import images
+from nova.virt.libvirt import netutils
+
libvirt = None
libxml2 = None
Template = None
+
LOG = logging.getLogger('nova.virt.libvirt_conn')
+
FLAGS = flags.FLAGS
flags.DECLARE('live_migration_retry_count', 'nova.compute.manager')
# TODO(vish): These flags should probably go into a shared location
flags.DEFINE_string('rescue_image_id', 'ami-rescue', 'Rescue ami image')
flags.DEFINE_string('rescue_kernel_id', 'aki-rescue', 'Rescue aki image')
flags.DEFINE_string('rescue_ramdisk_id', 'ari-rescue', 'Rescue ari image')
-
flags.DEFINE_string('libvirt_xml_template',
utils.abspath('virt/libvirt.xml.template'),
'Libvirt XML Template')
@@ -101,7 +104,7 @@ flags.DEFINE_string('ajaxterm_portrange',
'10000-12000',
'Range of ports that ajaxterm should randomly try to bind')
flags.DEFINE_string('firewall_driver',
- 'nova.virt.libvirt_conn.IptablesFirewallDriver',
+ 'nova.virt.libvirt.firewall.IptablesFirewallDriver',
'Firewall driver (defaults to iptables)')
flags.DEFINE_string('cpuinfo_xml_template',
utils.abspath('virt/cpuinfo.xml.template'),
@@ -143,69 +146,6 @@ def _late_load_cheetah():
Template = t.Template
-def _get_net_and_mask(cidr):
- net = IPy.IP(cidr)
- return str(net.net()), str(net.netmask())
-
-
-def _get_net_and_prefixlen(cidr):
- net = IPy.IP(cidr)
- return str(net.net()), str(net.prefixlen())
-
-
-def _get_ip_version(cidr):
- net = IPy.IP(cidr)
- return int(net.version())
-
-
-def _get_network_info(instance):
- # TODO(adiantum) If we will keep this function
- # we should cache network_info
- admin_context = context.get_admin_context()
-
- ip_addresses = db.fixed_ip_get_all_by_instance(admin_context,
- instance['id'])
- networks = db.network_get_all_by_instance(admin_context,
- instance['id'])
- flavor = db.instance_type_get_by_id(admin_context,
- instance['instance_type_id'])
- network_info = []
-
- for network in networks:
- network_ips = [ip for ip in ip_addresses
- if ip['network_id'] == network['id']]
-
- def ip_dict(ip):
- return {
- 'ip': ip['address'],
- 'netmask': network['netmask'],
- 'enabled': '1'}
-
- def ip6_dict():
- prefix = network['cidr_v6']
- mac = instance['mac_address']
- return {
- 'ip': utils.to_global_ipv6(prefix, mac),
- 'netmask': network['netmask_v6'],
- 'enabled': '1'}
-
- mapping = {
- 'label': network['label'],
- 'gateway': network['gateway'],
- 'broadcast': network['broadcast'],
- 'mac': instance['mac_address'],
- 'rxtx_cap': flavor['rxtx_cap'],
- 'dns': [network['dns']],
- 'ips': [ip_dict(ip) for ip in network_ips]}
-
- if FLAGS.use_ipv6:
- mapping['ip6s'] = [ip6_dict()]
- mapping['gateway6'] = network['gateway_v6']
-
- network_info.append((network, mapping))
- return network_info
-
-
class LibvirtConnection(driver.ComputeDriver):
def __init__(self, read_only):
@@ -808,7 +748,7 @@ class LibvirtConnection(driver.ComputeDriver):
def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None,
network_info=None):
if not network_info:
- network_info = _get_network_info(inst)
+ network_info = netutils.get_network_info(inst)
if not suffix:
suffix = ''
@@ -967,10 +907,10 @@ class LibvirtConnection(driver.ComputeDriver):
if FLAGS.allow_project_net_traffic:
template = "<parameter name=\"%s\"value=\"%s\" />\n"
- net, mask = _get_net_and_mask(network['cidr'])
+ net, mask = netutils.get_net_and_mask(network['cidr'])
values = [("PROJNET", net), ("PROJMASK", mask)]
if FLAGS.use_ipv6:
- net_v6, prefixlen_v6 = _get_net_and_prefixlen(
+ net_v6, prefixlen_v6 = netutils.get_net_and_prefixlen(
network['cidr_v6'])
values.extend([("PROJNETV6", net_v6),
("PROJMASKV6", prefixlen_v6)])
@@ -997,7 +937,7 @@ class LibvirtConnection(driver.ComputeDriver):
# TODO(adiantum) remove network_info creation code
# when multinics will be completed
if not network_info:
- network_info = _get_network_info(instance)
+ network_info = netutils.get_network_info(instance)
nics = []
for (network, mapping) in network_info:
@@ -1585,597 +1525,10 @@ class LibvirtConnection(driver.ComputeDriver):
"""See comments of same method in firewall_driver."""
self.firewall_driver.unfilter_instance(instance_ref)
-
-class FirewallDriver(object):
- def prepare_instance_filter(self, instance, network_info=None):
- """Prepare filters for the instance.
-
- At this point, the instance isn't running yet."""
- raise NotImplementedError()
-
- def unfilter_instance(self, instance):
- """Stop filtering instance"""
- raise NotImplementedError()
-
- def apply_instance_filter(self, instance):
- """Apply instance filter.
-
- Once this method returns, the instance should be firewalled
- appropriately. This method should as far as possible be a
- no-op. It's vastly preferred to get everything set up in
- prepare_instance_filter.
- """
- raise NotImplementedError()
-
- def refresh_security_group_rules(self, security_group_id):
- """Refresh security group rules from data store
-
- Gets called when a rule has been added to or removed from
- the security group."""
- raise NotImplementedError()
-
- def refresh_security_group_members(self, security_group_id):
- """Refresh security group members from data store
-
- Gets called when an instance gets added to or removed from
- the security group."""
- raise NotImplementedError()
-
- def setup_basic_filtering(self, instance, network_info=None):
- """Create rules to block spoofing and allow dhcp.
-
- This gets called when spawning an instance, before
- :method:`prepare_instance_filter`.
-
- """
- raise NotImplementedError()
-
- def instance_filter_exists(self, instance):
- """Check nova-instance-instance-xxx exists"""
- raise NotImplementedError()
-
-
-class NWFilterFirewall(FirewallDriver):
- """
- This class implements a network filtering mechanism versatile
- enough for EC2 style Security Group filtering by leveraging
- libvirt's nwfilter.
-
- First, all instances get a filter ("nova-base-filter") applied.
- This filter provides some basic security such as protection against
- MAC spoofing, IP spoofing, and ARP spoofing.
-
- This filter drops all incoming ipv4 and ipv6 connections.
- Outgoing connections are never blocked.
-
- Second, every security group maps to a nwfilter filter(*).
- NWFilters can be updated at runtime and changes are applied
- immediately, so changes to security groups can be applied at
- runtime (as mandated by the spec).
-
- Security group rules are named "nova-secgroup-<id>" where <id>
- is the internal id of the security group. They're applied only on
- hosts that have instances in the security group in question.
-
- Updates to security groups are done by updating the data model
- (in response to API calls) followed by a request sent to all
- the nodes with instances in the security group to refresh the
- security group.
-
- Each instance has its own NWFilter, which references the above
- mentioned security group NWFilters. This was done because
- interfaces can only reference one filter while filters can
- reference multiple other filters. This has the added benefit of
- actually being able to add and remove security groups from an
- instance at run time. This functionality is not exposed anywhere,
- though.
-
- Outstanding questions:
-
- The name is unique, so would there be any good reason to sync
- the uuid across the nodes (by assigning it from the datamodel)?
-
-
- (*) This sentence brought to you by the redundancy department of
- redundancy.
-
- """
-
- def __init__(self, get_connection, **kwargs):
- self._libvirt_get_connection = get_connection
- self.static_filters_configured = False
- self.handle_security_groups = False
-
- def apply_instance_filter(self, instance):
- """No-op. Everything is done in prepare_instance_filter"""
+ def update_host_status(self):
+ """See xenapi_conn.py implementation."""
pass
- def _get_connection(self):
- return self._libvirt_get_connection()
- _conn = property(_get_connection)
-
- def nova_dhcp_filter(self):
- """The standard allow-dhcp-server filter is an <ip> one, so it uses
- ebtables to allow traffic through. Without a corresponding rule in
- iptables, it'll get blocked anyway."""
-
- return '''<filter name='nova-allow-dhcp-server' chain='ipv4'>
- <uuid>891e4787-e5c0-d59b-cbd6-41bc3c6b36fc</uuid>
- <rule action='accept' direction='out'
- priority='100'>
- <udp srcipaddr='0.0.0.0'
- dstipaddr='255.255.255.255'
- srcportstart='68'
- dstportstart='67'/>
- </rule>
- <rule action='accept' direction='in'
- priority='100'>
- <udp srcipaddr='$DHCPSERVER'
- srcportstart='67'
- dstportstart='68'/>
- </rule>
- </filter>'''
-
- def nova_ra_filter(self):
- return '''<filter name='nova-allow-ra-server' chain='root'>
- <uuid>d707fa71-4fb5-4b27-9ab7-ba5ca19c8804</uuid>
- <rule action='accept' direction='inout'
- priority='100'>
- <icmpv6 srcipaddr='$RASERVER'/>
- </rule>
- </filter>'''
-
- def setup_basic_filtering(self, instance, network_info=None):
- """Set up basic filtering (MAC, IP, and ARP spoofing protection)"""
- logging.info('called setup_basic_filtering in nwfilter')
-
- if not network_info:
- network_info = _get_network_info(instance)
-
- if self.handle_security_groups:
- # No point in setting up a filter set that we'll be overriding
- # anyway.
- return
-
- logging.info('ensuring static filters')
- self._ensure_static_filters()
-
- if instance['image_id'] == str(FLAGS.vpn_image_id):
- base_filter = 'nova-vpn'
- else:
- base_filter = 'nova-base'
-
- for (network, mapping) in network_info:
- nic_id = mapping['mac'].replace(':', '')
- instance_filter_name = self._instance_filter_name(instance, nic_id)
- self._define_filter(self._filter_container(instance_filter_name,
- [base_filter]))
-
- def _ensure_static_filters(self):
- if self.static_filters_configured:
- return
-
- self._define_filter(self._filter_container('nova-base',
- ['no-mac-spoofing',
- 'no-ip-spoofing',
- 'no-arp-spoofing',
- 'allow-dhcp-server']))
- self._define_filter(self._filter_container('nova-vpn',
- ['allow-dhcp-server']))
- self._define_filter(self.nova_base_ipv4_filter)
- self._define_filter(self.nova_base_ipv6_filter)
- self._define_filter(self.nova_dhcp_filter)
- self._define_filter(self.nova_ra_filter)
- if FLAGS.allow_project_net_traffic:
- self._define_filter(self.nova_project_filter)
- if FLAGS.use_ipv6:
- self._define_filter(self.nova_project_filter_v6)
-
- self.static_filters_configured = True
-
- def _filter_container(self, name, filters):
- xml = '''<filter name='%s' chain='root'>%s</filter>''' % (
- name,
- ''.join(["<filterref filter='%s'/>" % (f,) for f in filters]))
- return xml
-
- def nova_base_ipv4_filter(self):
- retval = "<filter name='nova-base-ipv4' chain='ipv4'>"
- for protocol in ['tcp', 'udp', 'icmp']:
- for direction, action, priority in [('out', 'accept', 399),
- ('in', 'drop', 400)]:
- retval += """<rule action='%s' direction='%s' priority='%d'>
- <%s />
- </rule>""" % (action, direction,
- priority, protocol)
- retval += '</filter>'
- return retval
-
- def nova_base_ipv6_filter(self):
- retval = "<filter name='nova-base-ipv6' chain='ipv6'>"
- for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']:
- for direction, action, priority in [('out', 'accept', 399),
- ('in', 'drop', 400)]:
- retval += """<rule action='%s' direction='%s' priority='%d'>
- <%s />
- </rule>""" % (action, direction,
- priority, protocol)
- retval += '</filter>'
- return retval
-
- def nova_project_filter(self):
- retval = "<filter name='nova-project' chain='ipv4'>"
- for protocol in ['tcp', 'udp', 'icmp']:
- retval += """<rule action='accept' direction='in' priority='200'>
- <%s srcipaddr='$PROJNET' srcipmask='$PROJMASK' />
- </rule>""" % protocol
- retval += '</filter>'
- return retval
-
- def nova_project_filter_v6(self):
- retval = "<filter name='nova-project-v6' chain='ipv6'>"
- for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']:
- retval += """<rule action='accept' direction='inout'
- priority='200'>
- <%s srcipaddr='$PROJNETV6'
- srcipmask='$PROJMASKV6' />
- </rule>""" % (protocol)
- retval += '</filter>'
- return retval
-
- def _define_filter(self, xml):
- if callable(xml):
- xml = xml()
- # execute in a native thread and block current greenthread until done
- tpool.execute(self._conn.nwfilterDefineXML, xml)
-
- def unfilter_instance(self, instance):
- # Nothing to do
- pass
-
- def prepare_instance_filter(self, instance, network_info=None):
- """
- Creates an NWFilter for the given instance. In the process,
- it makes sure the filters for the security groups as well as
- the base filter are all in place.
- """
- if not network_info:
- network_info = _get_network_info(instance)
-
- ctxt = context.get_admin_context()
-
- instance_secgroup_filter_name = \
- '%s-secgroup' % (self._instance_filter_name(instance))
- #% (instance_filter_name,)
-
- instance_secgroup_filter_children = ['nova-base-ipv4',
- 'nova-base-ipv6',
- 'nova-allow-dhcp-server']
-
- if FLAGS.use_ipv6:
- networks = [network for (network, _m) in network_info if
- network['gateway_v6']]
-
- if networks:
- instance_secgroup_filter_children.\
- append('nova-allow-ra-server')
-
- for security_group in \
- db.security_group_get_by_instance(ctxt, instance['id']):
-
- self.refresh_security_group_rules(security_group['id'])
-
- instance_secgroup_filter_children.append('nova-secgroup-%s' %
- security_group['id'])
-
- self._define_filter(
- self._filter_container(instance_secgroup_filter_name,
- instance_secgroup_filter_children))
-
- network_filters = self.\
- _create_network_filters(instance, network_info,
- instance_secgroup_filter_name)
-
- for (name, children) in network_filters:
- self._define_filters(name, children)
-
- def _create_network_filters(self, instance, network_info,
- instance_secgroup_filter_name):
- if instance['image_id'] == str(FLAGS.vpn_image_id):
- base_filter = 'nova-vpn'
- else:
- base_filter = 'nova-base'
-
- result = []
- for (_n, mapping) in network_info:
- nic_id = mapping['mac'].replace(':', '')
- instance_filter_name = self._instance_filter_name(instance, nic_id)
- instance_filter_children = [base_filter,
- instance_secgroup_filter_name]
-
- if FLAGS.allow_project_net_traffic:
- instance_filter_children.append('nova-project')
- if FLAGS.use_ipv6:
- instance_filter_children.append('nova-project-v6')
-
- result.append((instance_filter_name, instance_filter_children))
-
- return result
-
- def _define_filters(self, filter_name, filter_children):
- self._define_filter(self._filter_container(filter_name,
- filter_children))
-
- def refresh_security_group_rules(self, security_group_id):
- return self._define_filter(
- self.security_group_to_nwfilter_xml(security_group_id))
-
- def security_group_to_nwfilter_xml(self, security_group_id):
- security_group = db.security_group_get(context.get_admin_context(),
- security_group_id)
- rule_xml = ""
- v6protocol = {'tcp': 'tcp-ipv6', 'udp': 'udp-ipv6', 'icmp': 'icmpv6'}
- for rule in security_group.rules:
- rule_xml += "<rule action='accept' direction='in' priority='300'>"
- if rule.cidr:
- version = _get_ip_version(rule.cidr)
- if(FLAGS.use_ipv6 and version == 6):
- net, prefixlen = _get_net_and_prefixlen(rule.cidr)
- rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
- (v6protocol[rule.protocol], net, prefixlen)
- else:
- net, mask = _get_net_and_mask(rule.cidr)
- rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
- (rule.protocol, net, mask)
- if rule.protocol in ['tcp', 'udp']:
- rule_xml += "dstportstart='%s' dstportend='%s' " % \
- (rule.from_port, rule.to_port)
- elif rule.protocol == 'icmp':
- LOG.info('rule.protocol: %r, rule.from_port: %r, '
- 'rule.to_port: %r', rule.protocol,
- rule.from_port, rule.to_port)
- if rule.from_port != -1:
- rule_xml += "type='%s' " % rule.from_port
- if rule.to_port != -1:
- rule_xml += "code='%s' " % rule.to_port
-
- rule_xml += '/>\n'
- rule_xml += "</rule>\n"
- xml = "<filter name='nova-secgroup-%s' " % security_group_id
- if(FLAGS.use_ipv6):
- xml += "chain='root'>%s</filter>" % rule_xml
- else:
- xml += "chain='ipv4'>%s</filter>" % rule_xml
- return xml
-
- def _instance_filter_name(self, instance, nic_id=None):
- if not nic_id:
- return 'nova-instance-%s' % (instance['name'])
- return 'nova-instance-%s-%s' % (instance['name'], nic_id)
-
- def instance_filter_exists(self, instance):
- """Check nova-instance-instance-xxx exists"""
- network_info = _get_network_info(instance)
- for (network, mapping) in network_info:
- nic_id = mapping['mac'].replace(':', '')
- instance_filter_name = self._instance_filter_name(instance, nic_id)
- try:
- self._conn.nwfilterLookupByName(instance_filter_name)
- except libvirt.libvirtError:
- name = instance.name
- LOG.debug(_('The nwfilter(%(instance_filter_name)s) for'
- '%(name)s is not found.') % locals())
- return False
- return True
-
-
-class IptablesFirewallDriver(FirewallDriver):
- def __init__(self, execute=None, **kwargs):
- from nova.network import linux_net
- self.iptables = linux_net.iptables_manager
- self.instances = {}
- self.nwfilter = NWFilterFirewall(kwargs['get_connection'])
-
- self.iptables.ipv4['filter'].add_chain('sg-fallback')
- self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP')
- self.iptables.ipv6['filter'].add_chain('sg-fallback')
- self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP')
-
- def setup_basic_filtering(self, instance, network_info=None):
- """Use NWFilter from libvirt for this."""
- if not network_info:
- network_info = _get_network_info(instance)
- return self.nwfilter.setup_basic_filtering(instance, network_info)
-
- def apply_instance_filter(self, instance):
- """No-op. Everything is done in prepare_instance_filter"""
- pass
-
- def unfilter_instance(self, instance):
- if self.instances.pop(instance['id'], None):
- self.remove_filters_for_instance(instance)
- self.iptables.apply()
- else:
- LOG.info(_('Attempted to unfilter instance %s which is not '
- 'filtered'), instance['id'])
-
- def prepare_instance_filter(self, instance, network_info=None):
- if not network_info:
- network_info = _get_network_info(instance)
- self.instances[instance['id']] = instance
- self.add_filters_for_instance(instance, network_info)
- self.iptables.apply()
-
- def _create_filter(self, ips, chain_name):
- return ['-d %s -j $%s' % (ip, chain_name) for ip in ips]
-
- def _filters_for_instance(self, chain_name, network_info):
- ips_v4 = [ip['ip'] for (_n, mapping) in network_info
- for ip in mapping['ips']]
- ipv4_rules = self._create_filter(ips_v4, chain_name)
-
- ipv6_rules = []
- if FLAGS.use_ipv6:
- ips_v6 = [ip['ip'] for (_n, mapping) in network_info
- for ip in mapping['ip6s']]
- ipv6_rules = self._create_filter(ips_v6, chain_name)
-
- return ipv4_rules, ipv6_rules
-
- def _add_filters(self, chain_name, ipv4_rules, ipv6_rules):
- for rule in ipv4_rules:
- self.iptables.ipv4['filter'].add_rule(chain_name, rule)
-
- if FLAGS.use_ipv6:
- for rule in ipv6_rules:
- self.iptables.ipv6['filter'].add_rule(chain_name, rule)
-
- def add_filters_for_instance(self, instance, network_info=None):
- chain_name = self._instance_chain_name(instance)
- if FLAGS.use_ipv6:
- self.iptables.ipv6['filter'].add_chain(chain_name)
- self.iptables.ipv4['filter'].add_chain(chain_name)
- ipv4_rules, ipv6_rules = self._filters_for_instance(chain_name,
- network_info)
- self._add_filters('local', ipv4_rules, ipv6_rules)
- ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info)
- self._add_filters(chain_name, ipv4_rules, ipv6_rules)
-
- def remove_filters_for_instance(self, instance):
- chain_name = self._instance_chain_name(instance)
-
- self.iptables.ipv4['filter'].remove_chain(chain_name)
- if FLAGS.use_ipv6:
- self.iptables.ipv6['filter'].remove_chain(chain_name)
-
- def instance_rules(self, instance, network_info=None):
- if not network_info:
- network_info = _get_network_info(instance)
- ctxt = context.get_admin_context()
-
- ipv4_rules = []
- ipv6_rules = []
-
- # Always drop invalid packets
- ipv4_rules += ['-m state --state ' 'INVALID -j DROP']
- ipv6_rules += ['-m state --state ' 'INVALID -j DROP']
-
- # Allow established connections
- ipv4_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
- ipv6_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
-
- dhcp_servers = [network['gateway'] for (network, _m) in network_info]
-
- for dhcp_server in dhcp_servers:
- ipv4_rules.append('-s %s -p udp --sport 67 --dport 68 '
- '-j ACCEPT' % (dhcp_server,))
-
- #Allow project network traffic
- if FLAGS.allow_project_net_traffic:
- cidrs = [network['cidr'] for (network, _m) in network_info]
- for cidr in cidrs:
- ipv4_rules.append('-s %s -j ACCEPT' % (cidr,))
-
- # We wrap these in FLAGS.use_ipv6 because they might cause
- # a DB lookup. The other ones are just list operations, so
- # they're not worth the clutter.
- if FLAGS.use_ipv6:
- # Allow RA responses
- gateways_v6 = [network['gateway_v6'] for (network, _) in
- network_info]
- for gateway_v6 in gateways_v6:
- ipv6_rules.append(
- '-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,))
-
- #Allow project network traffic
- if FLAGS.allow_project_net_traffic:
- cidrv6s = [network['cidr_v6'] for (network, _m)
- in network_info]
-
- for cidrv6 in cidrv6s:
- ipv6_rules.append('-s %s -j ACCEPT' % (cidrv6,))
-
- security_groups = db.security_group_get_by_instance(ctxt,
- instance['id'])
-
- # then, security group chains and rules
- for security_group in security_groups:
- rules = db.security_group_rule_get_by_security_group(ctxt,
- security_group['id'])
-
- for rule in rules:
- logging.info('%r', rule)
-
- if not rule.cidr:
- # Eventually, a mechanism to grant access for security
- # groups will turn up here. It'll use ipsets.
- continue
-
- version = _get_ip_version(rule.cidr)
- if version == 4:
- rules = ipv4_rules
- else:
- rules = ipv6_rules
-
- protocol = rule.protocol
- if version == 6 and rule.protocol == 'icmp':
- protocol = 'icmpv6'
-
- args = ['-p', protocol, '-s', rule.cidr]
-
- if rule.protocol in ['udp', 'tcp']:
- if rule.from_port == rule.to_port:
- args += ['--dport', '%s' % (rule.from_port,)]
- else:
- args += ['-m', 'multiport',
- '--dports', '%s:%s' % (rule.from_port,
- rule.to_port)]
- elif rule.protocol == 'icmp':
- icmp_type = rule.from_port
- icmp_code = rule.to_port
-
- if icmp_type == -1:
- icmp_type_arg = None
- else:
- icmp_type_arg = '%s' % icmp_type
- if not icmp_code == -1:
- icmp_type_arg += '/%s' % icmp_code
-
- if icmp_type_arg:
- if version == 4:
- args += ['-m', 'icmp', '--icmp-type',
- icmp_type_arg]
- elif version == 6:
- args += ['-m', 'icmp6', '--icmpv6-type',
- icmp_type_arg]
-
- args += ['-j ACCEPT']
- rules += [' '.join(args)]
-
- ipv4_rules += ['-j $sg-fallback']
- ipv6_rules += ['-j $sg-fallback']
-
- return ipv4_rules, ipv6_rules
-
- def instance_filter_exists(self, instance):
- """Check nova-instance-instance-xxx exists"""
- return self.nwfilter.instance_filter_exists(instance)
-
- def refresh_security_group_members(self, security_group):
+ def get_host_stats(self, refresh=False):
+ """See xenapi_conn.py implementation."""
pass
-
- def refresh_security_group_rules(self, security_group):
- self.do_refresh_security_group_rules(security_group)
- self.iptables.apply()
-
- @utils.synchronized('iptables', external=True)
- def do_refresh_security_group_rules(self, security_group):
- for instance in self.instances.values():
- self.remove_filters_for_instance(instance)
- self.add_filters_for_instance(instance)
-
- def _security_group_chain_name(self, security_group_id):
- return 'nova-sg-%s' % (security_group_id,)
-
- def _instance_chain_name(self, instance):
- return 'inst-%s' % (instance['id'],)
diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py
new file mode 100644
index 000000000..7e00662cd
--- /dev/null
+++ b/nova/virt/libvirt/firewall.py
@@ -0,0 +1,642 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from eventlet import tpool
+
+from nova import context
+from nova import db
+from nova import flags
+from nova import log as logging
+from nova import utils
+from nova.virt.libvirt import netutils
+
+
+LOG = logging.getLogger("nova.virt.libvirt.firewall")
+FLAGS = flags.FLAGS
+
+
+try:
+ import libvirt
+except ImportError:
+ LOG.warn(_("Libvirt module could not be loaded. NWFilterFirewall will "
+ "not work correctly."))
+
+
+class FirewallDriver(object):
+ def prepare_instance_filter(self, instance, network_info=None):
+ """Prepare filters for the instance.
+
+ At this point, the instance isn't running yet."""
+ raise NotImplementedError()
+
+ def unfilter_instance(self, instance):
+ """Stop filtering instance"""
+ raise NotImplementedError()
+
+ def apply_instance_filter(self, instance):
+ """Apply instance filter.
+
+ Once this method returns, the instance should be firewalled
+ appropriately. This method should as far as possible be a
+ no-op. It's vastly preferred to get everything set up in
+ prepare_instance_filter.
+ """
+ raise NotImplementedError()
+
+ def refresh_security_group_rules(self,
+ security_group_id,
+ network_info=None):
+ """Refresh security group rules from data store
+
+ Gets called when a rule has been added to or removed from
+ the security group."""
+ raise NotImplementedError()
+
+ def refresh_security_group_members(self, security_group_id):
+ """Refresh security group members from data store
+
+ Gets called when an instance gets added to or removed from
+ the security group."""
+ raise NotImplementedError()
+
+ def setup_basic_filtering(self, instance, network_info=None):
+ """Create rules to block spoofing and allow dhcp.
+
+ This gets called when spawning an instance, before
+ :method:`prepare_instance_filter`.
+
+ """
+ raise NotImplementedError()
+
+ def instance_filter_exists(self, instance):
+ """Check nova-instance-instance-xxx exists"""
+ raise NotImplementedError()
+
+
+class NWFilterFirewall(FirewallDriver):
+ """
+ This class implements a network filtering mechanism versatile
+ enough for EC2 style Security Group filtering by leveraging
+ libvirt's nwfilter.
+
+ First, all instances get a filter ("nova-base-filter") applied.
+ This filter provides some basic security such as protection against
+ MAC spoofing, IP spoofing, and ARP spoofing.
+
+ This filter drops all incoming ipv4 and ipv6 connections.
+ Outgoing connections are never blocked.
+
+ Second, every security group maps to a nwfilter filter(*).
+ NWFilters can be updated at runtime and changes are applied
+ immediately, so changes to security groups can be applied at
+ runtime (as mandated by the spec).
+
+ Security group rules are named "nova-secgroup-<id>" where <id>
+ is the internal id of the security group. They're applied only on
+ hosts that have instances in the security group in question.
+
+ Updates to security groups are done by updating the data model
+ (in response to API calls) followed by a request sent to all
+ the nodes with instances in the security group to refresh the
+ security group.
+
+ Each instance has its own NWFilter, which references the above
+ mentioned security group NWFilters. This was done because
+ interfaces can only reference one filter while filters can
+ reference multiple other filters. This has the added benefit of
+ actually being able to add and remove security groups from an
+ instance at run time. This functionality is not exposed anywhere,
+ though.
+
+ Outstanding questions:
+
+ The name is unique, so would there be any good reason to sync
+ the uuid across the nodes (by assigning it from the datamodel)?
+
+
+ (*) This sentence brought to you by the redundancy department of
+ redundancy.
+
+ """
+
+ def __init__(self, get_connection, **kwargs):
+ self._libvirt_get_connection = get_connection
+ self.static_filters_configured = False
+ self.handle_security_groups = False
+
+ def apply_instance_filter(self, instance):
+ """No-op. Everything is done in prepare_instance_filter"""
+ pass
+
+ def _get_connection(self):
+ return self._libvirt_get_connection()
+ _conn = property(_get_connection)
+
+ def nova_dhcp_filter(self):
+ """The standard allow-dhcp-server filter is an <ip> one, so it uses
+ ebtables to allow traffic through. Without a corresponding rule in
+ iptables, it'll get blocked anyway."""
+
+ return '''<filter name='nova-allow-dhcp-server' chain='ipv4'>
+ <uuid>891e4787-e5c0-d59b-cbd6-41bc3c6b36fc</uuid>
+ <rule action='accept' direction='out'
+ priority='100'>
+ <udp srcipaddr='0.0.0.0'
+ dstipaddr='255.255.255.255'
+ srcportstart='68'
+ dstportstart='67'/>
+ </rule>
+ <rule action='accept' direction='in'
+ priority='100'>
+ <udp srcipaddr='$DHCPSERVER'
+ srcportstart='67'
+ dstportstart='68'/>
+ </rule>
+ </filter>'''
+
+ def nova_ra_filter(self):
+ return '''<filter name='nova-allow-ra-server' chain='root'>
+ <uuid>d707fa71-4fb5-4b27-9ab7-ba5ca19c8804</uuid>
+ <rule action='accept' direction='inout'
+ priority='100'>
+ <icmpv6 srcipaddr='$RASERVER'/>
+ </rule>
+ </filter>'''
+
+ def setup_basic_filtering(self, instance, network_info=None):
+ """Set up basic filtering (MAC, IP, and ARP spoofing protection)"""
+ logging.info('called setup_basic_filtering in nwfilter')
+
+ if not network_info:
+ network_info = netutils.get_network_info(instance)
+
+ if self.handle_security_groups:
+ # No point in setting up a filter set that we'll be overriding
+ # anyway.
+ return
+
+ logging.info('ensuring static filters')
+ self._ensure_static_filters()
+
+ if instance['image_id'] == str(FLAGS.vpn_image_id):
+ base_filter = 'nova-vpn'
+ else:
+ base_filter = 'nova-base'
+
+ for (network, mapping) in network_info:
+ nic_id = mapping['mac'].replace(':', '')
+ instance_filter_name = self._instance_filter_name(instance, nic_id)
+ self._define_filter(self._filter_container(instance_filter_name,
+ [base_filter]))
+
+ def _ensure_static_filters(self):
+ if self.static_filters_configured:
+ return
+
+ self._define_filter(self._filter_container('nova-base',
+ ['no-mac-spoofing',
+ 'no-ip-spoofing',
+ 'no-arp-spoofing',
+ 'allow-dhcp-server']))
+ self._define_filter(self._filter_container('nova-vpn',
+ ['allow-dhcp-server']))
+ self._define_filter(self.nova_base_ipv4_filter)
+ self._define_filter(self.nova_base_ipv6_filter)
+ self._define_filter(self.nova_dhcp_filter)
+ self._define_filter(self.nova_ra_filter)
+ if FLAGS.allow_project_net_traffic:
+ self._define_filter(self.nova_project_filter)
+ if FLAGS.use_ipv6:
+ self._define_filter(self.nova_project_filter_v6)
+
+ self.static_filters_configured = True
+
+ def _filter_container(self, name, filters):
+ xml = '''<filter name='%s' chain='root'>%s</filter>''' % (
+ name,
+ ''.join(["<filterref filter='%s'/>" % (f,) for f in filters]))
+ return xml
+
+ def nova_base_ipv4_filter(self):
+ retval = "<filter name='nova-base-ipv4' chain='ipv4'>"
+ for protocol in ['tcp', 'udp', 'icmp']:
+ for direction, action, priority in [('out', 'accept', 399),
+ ('in', 'drop', 400)]:
+ retval += """<rule action='%s' direction='%s' priority='%d'>
+ <%s />
+ </rule>""" % (action, direction,
+ priority, protocol)
+ retval += '</filter>'
+ return retval
+
+ def nova_base_ipv6_filter(self):
+ retval = "<filter name='nova-base-ipv6' chain='ipv6'>"
+ for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']:
+ for direction, action, priority in [('out', 'accept', 399),
+ ('in', 'drop', 400)]:
+ retval += """<rule action='%s' direction='%s' priority='%d'>
+ <%s />
+ </rule>""" % (action, direction,
+ priority, protocol)
+ retval += '</filter>'
+ return retval
+
+ def nova_project_filter(self):
+ retval = "<filter name='nova-project' chain='ipv4'>"
+ for protocol in ['tcp', 'udp', 'icmp']:
+ retval += """<rule action='accept' direction='in' priority='200'>
+ <%s srcipaddr='$PROJNET' srcipmask='$PROJMASK' />
+ </rule>""" % protocol
+ retval += '</filter>'
+ return retval
+
+ def nova_project_filter_v6(self):
+ retval = "<filter name='nova-project-v6' chain='ipv6'>"
+ for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']:
+ retval += """<rule action='accept' direction='inout'
+ priority='200'>
+ <%s srcipaddr='$PROJNETV6'
+ srcipmask='$PROJMASKV6' />
+ </rule>""" % (protocol)
+ retval += '</filter>'
+ return retval
+
+ def _define_filter(self, xml):
+ if callable(xml):
+ xml = xml()
+ # execute in a native thread and block current greenthread until done
+ tpool.execute(self._conn.nwfilterDefineXML, xml)
+
+ def unfilter_instance(self, instance):
+ # Nothing to do
+ pass
+
+ def prepare_instance_filter(self, instance, network_info=None):
+ """
+ Creates an NWFilter for the given instance. In the process,
+ it makes sure the filters for the security groups as well as
+ the base filter are all in place.
+ """
+ if not network_info:
+ network_info = netutils.get_network_info(instance)
+
+ ctxt = context.get_admin_context()
+
+ instance_secgroup_filter_name = \
+ '%s-secgroup' % (self._instance_filter_name(instance))
+ #% (instance_filter_name,)
+
+ instance_secgroup_filter_children = ['nova-base-ipv4',
+ 'nova-base-ipv6',
+ 'nova-allow-dhcp-server']
+
+ if FLAGS.use_ipv6:
+ networks = [network for (network, _m) in network_info if
+ network['gateway_v6']]
+
+ if networks:
+ instance_secgroup_filter_children.\
+ append('nova-allow-ra-server')
+
+ for security_group in \
+ db.security_group_get_by_instance(ctxt, instance['id']):
+
+ self.refresh_security_group_rules(security_group['id'])
+
+ instance_secgroup_filter_children.append('nova-secgroup-%s' %
+ security_group['id'])
+
+ self._define_filter(
+ self._filter_container(instance_secgroup_filter_name,
+ instance_secgroup_filter_children))
+
+ network_filters = self.\
+ _create_network_filters(instance, network_info,
+ instance_secgroup_filter_name)
+
+ for (name, children) in network_filters:
+ self._define_filters(name, children)
+
+ def _create_network_filters(self, instance, network_info,
+ instance_secgroup_filter_name):
+ if instance['image_id'] == str(FLAGS.vpn_image_id):
+ base_filter = 'nova-vpn'
+ else:
+ base_filter = 'nova-base'
+
+ result = []
+ for (_n, mapping) in network_info:
+ nic_id = mapping['mac'].replace(':', '')
+ instance_filter_name = self._instance_filter_name(instance, nic_id)
+ instance_filter_children = [base_filter,
+ instance_secgroup_filter_name]
+
+ if FLAGS.allow_project_net_traffic:
+ instance_filter_children.append('nova-project')
+ if FLAGS.use_ipv6:
+ instance_filter_children.append('nova-project-v6')
+
+ result.append((instance_filter_name, instance_filter_children))
+
+ return result
+
+ def _define_filters(self, filter_name, filter_children):
+ self._define_filter(self._filter_container(filter_name,
+ filter_children))
+
+ def refresh_security_group_rules(self,
+ security_group_id,
+ network_info=None):
+ return self._define_filter(
+ self.security_group_to_nwfilter_xml(security_group_id))
+
+ def security_group_to_nwfilter_xml(self, security_group_id):
+ security_group = db.security_group_get(context.get_admin_context(),
+ security_group_id)
+ rule_xml = ""
+ v6protocol = {'tcp': 'tcp-ipv6', 'udp': 'udp-ipv6', 'icmp': 'icmpv6'}
+ for rule in security_group.rules:
+ rule_xml += "<rule action='accept' direction='in' priority='300'>"
+ if rule.cidr:
+ version = netutils.get_ip_version(rule.cidr)
+ if(FLAGS.use_ipv6 and version == 6):
+ net, prefixlen = netutils.get_net_and_prefixlen(rule.cidr)
+ rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
+ (v6protocol[rule.protocol], net, prefixlen)
+ else:
+ net, mask = netutils.get_net_and_mask(rule.cidr)
+ rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
+ (rule.protocol, net, mask)
+ if rule.protocol in ['tcp', 'udp']:
+ rule_xml += "dstportstart='%s' dstportend='%s' " % \
+ (rule.from_port, rule.to_port)
+ elif rule.protocol == 'icmp':
+ LOG.info('rule.protocol: %r, rule.from_port: %r, '
+ 'rule.to_port: %r', rule.protocol,
+ rule.from_port, rule.to_port)
+ if rule.from_port != -1:
+ rule_xml += "type='%s' " % rule.from_port
+ if rule.to_port != -1:
+ rule_xml += "code='%s' " % rule.to_port
+
+ rule_xml += '/>\n'
+ rule_xml += "</rule>\n"
+ xml = "<filter name='nova-secgroup-%s' " % security_group_id
+ if(FLAGS.use_ipv6):
+ xml += "chain='root'>%s</filter>" % rule_xml
+ else:
+ xml += "chain='ipv4'>%s</filter>" % rule_xml
+ return xml
+
+ def _instance_filter_name(self, instance, nic_id=None):
+ if not nic_id:
+ return 'nova-instance-%s' % (instance['name'])
+ return 'nova-instance-%s-%s' % (instance['name'], nic_id)
+
+ def instance_filter_exists(self, instance):
+ """Check nova-instance-instance-xxx exists"""
+ network_info = netutils.get_network_info(instance)
+ for (network, mapping) in network_info:
+ nic_id = mapping['mac'].replace(':', '')
+ instance_filter_name = self._instance_filter_name(instance, nic_id)
+ try:
+ self._conn.nwfilterLookupByName(instance_filter_name)
+ except libvirt.libvirtError:
+ name = instance.name
+ LOG.debug(_('The nwfilter(%(instance_filter_name)s) for'
+ '%(name)s is not found.') % locals())
+ return False
+ return True
+
+
+class IptablesFirewallDriver(FirewallDriver):
+ def __init__(self, execute=None, **kwargs):
+ from nova.network import linux_net
+ self.iptables = linux_net.iptables_manager
+ self.instances = {}
+ self.nwfilter = NWFilterFirewall(kwargs['get_connection'])
+
+ self.iptables.ipv4['filter'].add_chain('sg-fallback')
+ self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP')
+ self.iptables.ipv6['filter'].add_chain('sg-fallback')
+ self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP')
+
+ def setup_basic_filtering(self, instance, network_info=None):
+ """Use NWFilter from libvirt for this."""
+ if not network_info:
+ network_info = netutils.get_network_info(instance)
+ return self.nwfilter.setup_basic_filtering(instance, network_info)
+
+ def apply_instance_filter(self, instance):
+ """No-op. Everything is done in prepare_instance_filter"""
+ pass
+
+ def unfilter_instance(self, instance):
+ if self.instances.pop(instance['id'], None):
+ self.remove_filters_for_instance(instance)
+ self.iptables.apply()
+ else:
+ LOG.info(_('Attempted to unfilter instance %s which is not '
+ 'filtered'), instance['id'])
+
+ def prepare_instance_filter(self, instance, network_info=None):
+ if not network_info:
+ network_info = netutils.get_network_info(instance)
+ self.instances[instance['id']] = instance
+ self.add_filters_for_instance(instance, network_info)
+ self.iptables.apply()
+
+ def _create_filter(self, ips, chain_name):
+ return ['-d %s -j $%s' % (ip, chain_name) for ip in ips]
+
+ def _filters_for_instance(self, chain_name, network_info):
+ ips_v4 = [ip['ip'] for (_n, mapping) in network_info
+ for ip in mapping['ips']]
+ ipv4_rules = self._create_filter(ips_v4, chain_name)
+
+ ipv6_rules = []
+ if FLAGS.use_ipv6:
+ ips_v6 = [ip['ip'] for (_n, mapping) in network_info
+ for ip in mapping['ip6s']]
+ ipv6_rules = self._create_filter(ips_v6, chain_name)
+
+ return ipv4_rules, ipv6_rules
+
+ def _add_filters(self, chain_name, ipv4_rules, ipv6_rules):
+ for rule in ipv4_rules:
+ self.iptables.ipv4['filter'].add_rule(chain_name, rule)
+
+ if FLAGS.use_ipv6:
+ for rule in ipv6_rules:
+ self.iptables.ipv6['filter'].add_rule(chain_name, rule)
+
+ def add_filters_for_instance(self, instance, network_info=None):
+ chain_name = self._instance_chain_name(instance)
+ if FLAGS.use_ipv6:
+ self.iptables.ipv6['filter'].add_chain(chain_name)
+ self.iptables.ipv4['filter'].add_chain(chain_name)
+ ipv4_rules, ipv6_rules = self._filters_for_instance(chain_name,
+ network_info)
+ self._add_filters('local', ipv4_rules, ipv6_rules)
+ ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info)
+ self._add_filters(chain_name, ipv4_rules, ipv6_rules)
+
+ def remove_filters_for_instance(self, instance):
+ chain_name = self._instance_chain_name(instance)
+
+ self.iptables.ipv4['filter'].remove_chain(chain_name)
+ if FLAGS.use_ipv6:
+ self.iptables.ipv6['filter'].remove_chain(chain_name)
+
+ def instance_rules(self, instance, network_info=None):
+ if not network_info:
+ network_info = netutils.get_network_info(instance)
+ ctxt = context.get_admin_context()
+
+ ipv4_rules = []
+ ipv6_rules = []
+
+ # Always drop invalid packets
+ ipv4_rules += ['-m state --state ' 'INVALID -j DROP']
+ ipv6_rules += ['-m state --state ' 'INVALID -j DROP']
+
+ # Allow established connections
+ ipv4_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
+ ipv6_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
+
+ dhcp_servers = [network['gateway'] for (network, _m) in network_info]
+
+ for dhcp_server in dhcp_servers:
+ ipv4_rules.append('-s %s -p udp --sport 67 --dport 68 '
+ '-j ACCEPT' % (dhcp_server,))
+
+ #Allow project network traffic
+ if FLAGS.allow_project_net_traffic:
+ cidrs = [network['cidr'] for (network, _m) in network_info]
+ for cidr in cidrs:
+ ipv4_rules.append('-s %s -j ACCEPT' % (cidr,))
+
+ # We wrap these in FLAGS.use_ipv6 because they might cause
+ # a DB lookup. The other ones are just list operations, so
+ # they're not worth the clutter.
+ if FLAGS.use_ipv6:
+ # Allow RA responses
+ gateways_v6 = [network['gateway_v6'] for (network, _) in
+ network_info]
+ for gateway_v6 in gateways_v6:
+ ipv6_rules.append(
+ '-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,))
+
+ #Allow project network traffic
+ if FLAGS.allow_project_net_traffic:
+ cidrv6s = [network['cidr_v6'] for (network, _m)
+ in network_info]
+
+ for cidrv6 in cidrv6s:
+ ipv6_rules.append('-s %s -j ACCEPT' % (cidrv6,))
+
+ security_groups = db.security_group_get_by_instance(ctxt,
+ instance['id'])
+
+ # then, security group chains and rules
+ for security_group in security_groups:
+ rules = db.security_group_rule_get_by_security_group(ctxt,
+ security_group['id'])
+
+ for rule in rules:
+ logging.info('%r', rule)
+
+ if not rule.cidr:
+ # Eventually, a mechanism to grant access for security
+ # groups will turn up here. It'll use ipsets.
+ continue
+
+ version = netutils.get_ip_version(rule.cidr)
+ if version == 4:
+ rules = ipv4_rules
+ else:
+ rules = ipv6_rules
+
+ protocol = rule.protocol
+ if version == 6 and rule.protocol == 'icmp':
+ protocol = 'icmpv6'
+
+ args = ['-p', protocol, '-s', rule.cidr]
+
+ if rule.protocol in ['udp', 'tcp']:
+ if rule.from_port == rule.to_port:
+ args += ['--dport', '%s' % (rule.from_port,)]
+ else:
+ args += ['-m', 'multiport',
+ '--dports', '%s:%s' % (rule.from_port,
+ rule.to_port)]
+ elif rule.protocol == 'icmp':
+ icmp_type = rule.from_port
+ icmp_code = rule.to_port
+
+ if icmp_type == -1:
+ icmp_type_arg = None
+ else:
+ icmp_type_arg = '%s' % icmp_type
+ if not icmp_code == -1:
+ icmp_type_arg += '/%s' % icmp_code
+
+ if icmp_type_arg:
+ if version == 4:
+ args += ['-m', 'icmp', '--icmp-type',
+ icmp_type_arg]
+ elif version == 6:
+ args += ['-m', 'icmp6', '--icmpv6-type',
+ icmp_type_arg]
+
+ args += ['-j ACCEPT']
+ rules += [' '.join(args)]
+
+ ipv4_rules += ['-j $sg-fallback']
+ ipv6_rules += ['-j $sg-fallback']
+
+ return ipv4_rules, ipv6_rules
+
+ def instance_filter_exists(self, instance):
+ """Check nova-instance-instance-xxx exists"""
+ return self.nwfilter.instance_filter_exists(instance)
+
+ def refresh_security_group_members(self, security_group):
+ pass
+
+ def refresh_security_group_rules(self, security_group, network_info=None):
+ self.do_refresh_security_group_rules(security_group, network_info)
+ self.iptables.apply()
+
+ @utils.synchronized('iptables', external=True)
+ def do_refresh_security_group_rules(self,
+ security_group,
+ network_info=None):
+ for instance in self.instances.values():
+ self.remove_filters_for_instance(instance)
+ if not network_info:
+ network_info = netutils.get_network_info(instance)
+ self.add_filters_for_instance(instance, network_info)
+
+ def _security_group_chain_name(self, security_group_id):
+ return 'nova-sg-%s' % (security_group_id,)
+
+ def _instance_chain_name(self, instance):
+ return 'inst-%s' % (instance['id'],)
diff --git a/nova/virt/libvirt/netutils.py b/nova/virt/libvirt/netutils.py
new file mode 100644
index 000000000..4d596078a
--- /dev/null
+++ b/nova/virt/libvirt/netutils.py
@@ -0,0 +1,97 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+"""Network-releated utilities for supporting libvirt connection code."""
+
+
+import IPy
+
+from nova import context
+from nova import db
+from nova import flags
+from nova import ipv6
+from nova import utils
+
+
+FLAGS = flags.FLAGS
+
+
+def get_net_and_mask(cidr):
+ net = IPy.IP(cidr)
+ return str(net.net()), str(net.netmask())
+
+
+def get_net_and_prefixlen(cidr):
+ net = IPy.IP(cidr)
+ return str(net.net()), str(net.prefixlen())
+
+
+def get_ip_version(cidr):
+ net = IPy.IP(cidr)
+ return int(net.version())
+
+
+def get_network_info(instance):
+ # TODO(adiantum) If we will keep this function
+ # we should cache network_info
+ admin_context = context.get_admin_context()
+
+ ip_addresses = db.fixed_ip_get_all_by_instance(admin_context,
+ instance['id'])
+ networks = db.network_get_all_by_instance(admin_context,
+ instance['id'])
+ flavor = db.instance_type_get_by_id(admin_context,
+ instance['instance_type_id'])
+ network_info = []
+
+ for network in networks:
+ network_ips = [ip for ip in ip_addresses
+ if ip['network_id'] == network['id']]
+
+ def ip_dict(ip):
+ return {
+ 'ip': ip['address'],
+ 'netmask': network['netmask'],
+ 'enabled': '1'}
+
+ def ip6_dict():
+ prefix = network['cidr_v6']
+ mac = instance['mac_address']
+ project_id = instance['project_id']
+ return {
+ 'ip': ipv6.to_global(prefix, mac, project_id),
+ 'netmask': network['netmask_v6'],
+ 'enabled': '1'}
+
+ mapping = {
+ 'label': network['label'],
+ 'gateway': network['gateway'],
+ 'broadcast': network['broadcast'],
+ 'mac': instance['mac_address'],
+ 'rxtx_cap': flavor['rxtx_cap'],
+ 'dns': [network['dns']],
+ 'ips': [ip_dict(ip) for ip in network_ips]}
+
+ if FLAGS.use_ipv6:
+ mapping['ip6s'] = [ip6_dict()]
+ mapping['gateway6'] = network['gateway_v6']
+
+ network_info.append((network, mapping))
+ return network_info
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index c8f342aa8..9f6cd608c 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -48,6 +48,8 @@ FLAGS = flags.FLAGS
flags.DEFINE_string('default_os_type', 'linux', 'Default OS type')
flags.DEFINE_integer('block_device_creation_timeout', 10,
'time to wait for a block device to be created')
+flags.DEFINE_integer('max_kernel_ramdisk_size', 16 * 1024 * 1024,
+ 'maximum size in bytes of kernel or ramdisk images')
XENAPI_POWER_STATE = {
'Halted': power_state.SHUTDOWN,
@@ -444,6 +446,12 @@ class VMHelper(HelperBase):
if image_type == ImageType.DISK:
# Make room for MBR.
vdi_size += MBR_SIZE_BYTES
+ elif image_type == ImageType.KERNEL_RAMDISK and \
+ vdi_size > FLAGS.max_kernel_ramdisk_size:
+ max_size = FLAGS.max_kernel_ramdisk_size
+ raise exception.Error(
+ _("Kernel/Ramdisk image is too large: %(vdi_size)d bytes, "
+ "max %(max_size)d bytes") % locals())
name_label = get_name_label_for_image(image)
vdi_ref = cls.create_vdi(session, sr_ref, name_label, vdi_size, False)
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index fe9a74dd6..be6ef48ea 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -25,15 +25,15 @@ import M2Crypto
import os
import pickle
import subprocess
-import tempfile
import uuid
-from nova import db
from nova import context
-from nova import log as logging
+from nova import db
from nova import exception
-from nova import utils
from nova import flags
+from nova import ipv6
+from nova import log as logging
+from nova import utils
from nova.auth.manager import AuthManager
from nova.compute import power_state
@@ -202,6 +202,13 @@ class VMOps(object):
for path, contents in instance.injected_files:
LOG.debug(_("Injecting file path: '%s'") % path)
self.inject_file(instance, path, contents)
+
+ def _set_admin_password():
+ admin_password = instance.admin_pass
+ if admin_password:
+ LOG.debug(_("Setting admin password"))
+ self.set_admin_password(instance, admin_password)
+
# NOTE(armando): Do we really need to do this in virt?
# NOTE(tr3buchet): not sure but wherever we do it, we need to call
# reset_network afterwards
@@ -214,6 +221,7 @@ class VMOps(object):
LOG.debug(_('Instance %s: booted'), instance_name)
timer.stop()
_inject_files()
+ _set_admin_password()
return True
except Exception, exc:
LOG.warn(exc)
@@ -253,7 +261,8 @@ class VMOps(object):
instance_name = instance_or_vm.name
vm_ref = VMHelper.lookup(self._session, instance_name)
if vm_ref is None:
- raise exception.InstanceNotFound(instance_id=instance_obj.id)
+ raise exception.NotFound(_("No opaque_ref could be determined "
+ "for '%s'.") % instance_or_vm)
return vm_ref
def _acquire_bootlock(self, vm):
@@ -457,6 +466,9 @@ class VMOps(object):
# Successful return code from password is '0'
if resp_dict['returncode'] != '0':
raise RuntimeError(resp_dict['message'])
+ db.instance_update(context.get_admin_context(),
+ instance['id'],
+ dict(admin_pass=new_pass))
return resp_dict['message']
def inject_file(self, instance, path, contents):
@@ -808,8 +820,9 @@ class VMOps(object):
def ip6_dict():
return {
- "ip": utils.to_global_ipv6(network['cidr_v6'],
- instance['mac_address']),
+ "ip": ipv6.to_global(network['cidr_v6'],
+ instance['mac_address'],
+ instance['project_id']),
"netmask": network['netmask_v6'],
"enabled": "1"}
@@ -1161,23 +1174,22 @@ class SimpleDH(object):
return mpi
def _run_ssl(self, text, which):
- base_cmd = ('cat %(tmpfile)s | openssl enc -aes-128-cbc '
- '-a -pass pass:%(shared)s -nosalt %(dec_flag)s')
+ base_cmd = ('openssl enc -aes-128-cbc -a -pass pass:%(shared)s '
+ '-nosalt %(dec_flag)s')
if which.lower()[0] == 'd':
dec_flag = ' -d'
else:
dec_flag = ''
- fd, tmpfile = tempfile.mkstemp()
- os.close(fd)
- file(tmpfile, 'w').write(text)
shared = self._shared
cmd = base_cmd % locals()
proc = _runproc(cmd)
+ proc.stdin.write(text + '\n')
+ proc.stdin.close()
proc.wait()
err = proc.stderr.read()
if err:
raise RuntimeError(_('OpenSSL error: %s') % err)
- return proc.stdout.read()
+ return proc.stdout.read().strip('\n')
def encrypt(self, text):
return self._run_ssl(text, 'enc')
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
index 72284ac02..7821a4f7e 100644
--- a/nova/virt/xenapi/volume_utils.py
+++ b/nova/virt/xenapi/volume_utils.py
@@ -204,14 +204,17 @@ def _get_volume_id(path_or_id):
if isinstance(path_or_id, int):
return path_or_id
# n must contain at least the volume_id
- # /vol- is for remote volumes
- # -vol- is for local volumes
+ # :volume- is for remote volumes
+ # -volume- is for local volumes
# see compute/manager->setup_compute_volume
- volume_id = path_or_id[path_or_id.find('/vol-') + 1:]
+ volume_id = path_or_id[path_or_id.find(':volume-') + 1:]
if volume_id == path_or_id:
- volume_id = path_or_id[path_or_id.find('-vol-') + 1:]
- volume_id = volume_id.replace('--', '-')
- return volume_id
+ volume_id = path_or_id[path_or_id.find('-volume--') + 1:]
+ volume_id = volume_id.replace('volume--', '')
+ else:
+ volume_id = volume_id.replace('volume-', '')
+ volume_id = volume_id[0:volume_id.find('-')]
+ return int(volume_id)
def _get_target_host(iscsi_string):
@@ -244,25 +247,23 @@ def _get_target(volume_id):
Gets iscsi name and portal from volume name and host.
For this method to work the following are needed:
1) volume_ref['host'] must resolve to something rather than loopback
- 2) ietd must bind only to the address as resolved above
- If any of the two conditions are not met, fall back on Flags.
"""
- volume_ref = db.volume_get_by_ec2_id(context.get_admin_context(),
- volume_id)
+ volume_ref = db.volume_get(context.get_admin_context(),
+ volume_id)
result = (None, None)
try:
- (r, _e) = utils.execute("sudo iscsiadm -m discovery -t "
- "sendtargets -p %s" %
- volume_ref['host'])
+ (r, _e) = utils.execute('sudo', 'iscsiadm',
+ '-m', 'discovery',
+ '-t', 'sendtargets',
+ '-p', volume_ref['host'])
except exception.ProcessExecutionError, exc:
LOG.exception(exc)
else:
- targets = r.splitlines()
- if len(_e) == 0 and len(targets) == 1:
- for target in targets:
- if volume_id in target:
- (location, _sep, iscsi_name) = target.partition(" ")
- break
- iscsi_portal = location.split(",")[0]
- result = (iscsi_name, iscsi_portal)
+ volume_name = "volume-%08x" % volume_id
+ for target in r.splitlines():
+ if FLAGS.iscsi_ip_prefix in target and volume_name in target:
+ (location, _sep, iscsi_name) = target.partition(" ")
+ break
+ iscsi_portal = location.split(",")[0]
+ result = (iscsi_name, iscsi_portal)
return result
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index 0cabccf08..6d828e109 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -57,6 +57,8 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block.
- suffix "_rec" for record objects
"""
+import json
+import random
import sys
import urlparse
import xmlrpclib
@@ -67,10 +69,12 @@ from eventlet import timeout
from nova import context
from nova import db
+from nova import exception
from nova import utils
from nova import flags
from nova import log as logging
from nova.virt import driver
+from nova.virt.xenapi import vm_utils
from nova.virt.xenapi.vmops import VMOps
from nova.virt.xenapi.volumeops import VolumeOps
@@ -165,9 +169,16 @@ class XenAPIConnection(driver.ComputeDriver):
def __init__(self, url, user, pw):
super(XenAPIConnection, self).__init__()
- session = XenAPISession(url, user, pw)
- self._vmops = VMOps(session)
- self._volumeops = VolumeOps(session)
+ self._session = XenAPISession(url, user, pw)
+ self._vmops = VMOps(self._session)
+ self._volumeops = VolumeOps(self._session)
+ self._host_state = None
+
+ @property
+ def HostState(self):
+ if not self._host_state:
+ self._host_state = HostState(self._session)
+ return self._host_state
def init_host(self, host):
#FIXME(armando): implement this
@@ -315,6 +326,16 @@ class XenAPIConnection(driver.ComputeDriver):
"""This method is supported only by libvirt."""
raise NotImplementedError('This method is supported only by libvirt.')
+ def update_host_status(self):
+ """Update the status info of the host, and return those values
+ to the calling program."""
+ return self.HostState.update_status()
+
+ def get_host_stats(self, refresh=False):
+ """Return the current state of the host. If 'refresh' is
+ True, run the update first."""
+ return self.HostState.get_host_stats(refresh=refresh)
+
class XenAPISession(object):
"""The session to invoke XenAPI SDK calls"""
@@ -326,7 +347,6 @@ class XenAPISession(object):
"(is the Dom0 disk full?)"))
with timeout.Timeout(FLAGS.xenapi_login_timeout, exception):
self._session.login_with_password(user, pw)
- self.loop = None
def get_imported_xenapi(self):
"""Stubout point. This can be replaced with a mock xenapi module."""
@@ -363,57 +383,52 @@ class XenAPISession(object):
def wait_for_task(self, task, id=None):
"""Return the result of the given task. The task is polled
- until it completes. Not re-entrant."""
+ until it completes."""
done = event.Event()
- self.loop = utils.LoopingCall(self._poll_task, id, task, done)
- self.loop.start(FLAGS.xenapi_task_poll_interval, now=True)
- rv = done.wait()
- self.loop.stop()
- return rv
-
- def _stop_loop(self):
- """Stop polling for task to finish."""
- #NOTE(sandy-walsh) Had to break this call out to support unit tests.
- if self.loop:
- self.loop.stop()
+ loop = utils.LoopingCall(f=None)
+
+ def _poll_task():
+ """Poll the given XenAPI task, and return the result if the
+ action was completed successfully or not.
+ """
+ try:
+ name = self._session.xenapi.task.get_name_label(task)
+ status = self._session.xenapi.task.get_status(task)
+ if id:
+ action = dict(
+ instance_id=int(id),
+ action=name[0:255], # Ensure action is never > 255
+ error=None)
+ if status == "pending":
+ return
+ elif status == "success":
+ result = self._session.xenapi.task.get_result(task)
+ LOG.info(_("Task [%(name)s] %(task)s status:"
+ " success %(result)s") % locals())
+ done.send(_parse_xmlrpc_value(result))
+ else:
+ error_info = self._session.xenapi.task.get_error_info(task)
+ action["error"] = str(error_info)
+ LOG.warn(_("Task [%(name)s] %(task)s status:"
+ " %(status)s %(error_info)s") % locals())
+ done.send_exception(self.XenAPI.Failure(error_info))
+
+ if id:
+ db.instance_action_create(context.get_admin_context(),
+ action)
+ except self.XenAPI.Failure, exc:
+ LOG.warn(exc)
+ done.send_exception(*sys.exc_info())
+ loop.stop()
+
+ loop.f = _poll_task
+ loop.start(FLAGS.xenapi_task_poll_interval, now=True)
+ return done.wait()
def _create_session(self, url):
"""Stubout point. This can be replaced with a mock session."""
return self.XenAPI.Session(url)
- def _poll_task(self, id, task, done):
- """Poll the given XenAPI task, and fire the given action if we
- get a result.
- """
- try:
- name = self._session.xenapi.task.get_name_label(task)
- status = self._session.xenapi.task.get_status(task)
- if id:
- action = dict(
- instance_id=int(id),
- action=name[0:255], # Ensure action is never > 255
- error=None)
- if status == "pending":
- return
- elif status == "success":
- result = self._session.xenapi.task.get_result(task)
- LOG.info(_("Task [%(name)s] %(task)s status:"
- " success %(result)s") % locals())
- done.send(_parse_xmlrpc_value(result))
- else:
- error_info = self._session.xenapi.task.get_error_info(task)
- action["error"] = str(error_info)
- LOG.warn(_("Task [%(name)s] %(task)s status:"
- " %(status)s %(error_info)s") % locals())
- done.send_exception(self.XenAPI.Failure(error_info))
-
- if id:
- db.instance_action_create(context.get_admin_context(), action)
- except self.XenAPI.Failure, exc:
- LOG.warn(exc)
- done.send_exception(*sys.exc_info())
- self._stop_loop()
-
def _unwrap_plugin_exceptions(self, func, *args, **kwargs):
"""Parse exception details"""
try:
@@ -436,6 +451,65 @@ class XenAPISession(object):
raise
+class HostState(object):
+ """Manages information about the XenServer host this compute
+ node is running on.
+ """
+ def __init__(self, session):
+ super(HostState, self).__init__()
+ self._session = session
+ self._stats = {}
+ self.update_status()
+
+ def get_host_stats(self, refresh=False):
+ """Return the current state of the host. If 'refresh' is
+ True, run the update first.
+ """
+ if refresh:
+ self.update_status()
+ return self._stats
+
+ def update_status(self):
+ """Since under Xenserver, a compute node runs on a given host,
+ we can get host status information using xenapi.
+ """
+ LOG.debug(_("Updating host stats"))
+ # Make it something unlikely to match any actual instance ID
+ task_id = random.randint(-80000, -70000)
+ task = self._session.async_call_plugin("xenhost", "host_data", {})
+ task_result = self._session.wait_for_task(task, task_id)
+ if not task_result:
+ task_result = json.dumps("")
+ try:
+ data = json.loads(task_result)
+ except ValueError as e:
+ # Invalid JSON object
+ LOG.error(_("Unable to get updated status: %s") % e)
+ return
+ # Get the SR usage
+ try:
+ sr_ref = vm_utils.safe_find_sr(self._session)
+ except exception.NotFound as e:
+ # No SR configured
+ LOG.error(_("Unable to get SR for this host: %s") % e)
+ return
+ sr_rec = self._session.get_xenapi().SR.get_record(sr_ref)
+ total = int(sr_rec["virtual_allocation"])
+ used = int(sr_rec["physical_utilisation"])
+ data["disk_total"] = total
+ data["disk_used"] = used
+ data["disk_available"] = total - used
+ host_memory = data.get('host_memory', None)
+ if host_memory:
+ data["host_memory_total"] = host_memory.get('total', 0)
+ data["host_memory_overhead"] = host_memory.get('overhead', 0)
+ data["host_memory_free"] = host_memory.get('free', 0)
+ data["host_memory_free_computed"] = \
+ host_memory.get('free-computed', 0)
+ del data['host_memory']
+ self._stats = data
+
+
def _parse_xmlrpc_value(val):
"""Parse the given value as if it were an XML-RPC value. This is
sometimes used as the format for the task.result field."""
diff --git a/nova/wsgi.py b/nova/wsgi.py
index e60a8820d..ea9bb963d 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -59,13 +59,16 @@ class Server(object):
def __init__(self, threads=1000):
self.pool = eventlet.GreenPool(threads)
+ self.socket_info = {}
- def start(self, application, port, host='0.0.0.0', backlog=128):
+ def start(self, application, port, host='0.0.0.0', key=None, backlog=128):
"""Run a WSGI server with the given application."""
arg0 = sys.argv[0]
logging.audit(_('Starting %(arg0)s on %(host)s:%(port)s') % locals())
socket = eventlet.listen((host, port), backlog=backlog)
self.pool.spawn_n(self._run, application, socket)
+ if key:
+ self.socket_info[key] = socket.getsockname()
def wait(self):
"""Wait until all servers have completed running."""
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
index 0a45f3873..4b45671ae 100644
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
@@ -68,12 +68,12 @@ def _download_tarball(sr_path, staging_path, image_id, glance_host,
area.
"""
conn = httplib.HTTPConnection(glance_host, glance_port)
- conn.request('GET', '/images/%s' % image_id)
+ conn.request('GET', '/v1/images/%s' % image_id)
resp = conn.getresponse()
if resp.status == httplib.NOT_FOUND:
raise Exception("Image '%s' not found in Glance" % image_id)
elif resp.status != httplib.OK:
- raise Exception("Unexpected response from Glance %i" % res.status)
+ raise Exception("Unexpected response from Glance %i" % resp.status)
tar_cmd = "tar -zx --directory=%(staging_path)s" % locals()
tar_proc = _make_subprocess(tar_cmd, stderr=True, stdin=True)
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost
new file mode 100644
index 000000000..a8428e841
--- /dev/null
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost
@@ -0,0 +1,183 @@
+#!/usr/bin/env python
+
+# Copyright 2011 OpenStack LLC.
+# Copyright 2011 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+#
+# XenAPI plugin for reading/writing information to xenstore
+#
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+import os
+import random
+import re
+import subprocess
+import tempfile
+import time
+
+import XenAPIPlugin
+
+from pluginlib_nova import *
+configure_logging("xenhost")
+
+host_data_pattern = re.compile(r"\s*(\S+) \([^\)]+\) *: ?(.*)")
+
+
+def jsonify(fnc):
+ def wrapper(*args, **kwargs):
+ return json.dumps(fnc(*args, **kwargs))
+ return wrapper
+
+
+class TimeoutError(StandardError):
+ pass
+
+
+def _run_command(cmd):
+ """Abstracts out the basics of issuing system commands. If the command
+ returns anything in stderr, a PluginError is raised with that information.
+ Otherwise, the output from stdout is returned.
+ """
+ pipe = subprocess.PIPE
+ proc = subprocess.Popen([cmd], shell=True, stdin=pipe, stdout=pipe,
+ stderr=pipe, close_fds=True)
+ proc.wait()
+ err = proc.stderr.read()
+ if err:
+ raise pluginlib.PluginError(err)
+ return proc.stdout.read()
+
+
+@jsonify
+def host_data(self, arg_dict):
+ """Runs the commands on the xenstore host to return the current status
+ information.
+ """
+ cmd = "xe host-list | grep uuid"
+ resp = _run_command(cmd)
+ host_uuid = resp.split(":")[-1].strip()
+ cmd = "xe host-param-list uuid=%s" % host_uuid
+ resp = _run_command(cmd)
+ parsed_data = parse_response(resp)
+ # We have the raw dict of values. Extract those that we need,
+ # and convert the data types as needed.
+ ret_dict = cleanup(parsed_data)
+ return ret_dict
+
+
+def parse_response(resp):
+ data = {}
+ for ln in resp.splitlines():
+ if not ln:
+ continue
+ mtch = host_data_pattern.match(ln.strip())
+ try:
+ k, v = mtch.groups()
+ data[k] = v
+ except AttributeError:
+ # Not a valid line; skip it
+ continue
+ return data
+
+
+def cleanup(dct):
+ """Take the raw KV pairs returned and translate them into the
+ appropriate types, discarding any we don't need.
+ """
+ def safe_int(val):
+ """Integer values will either be string versions of numbers,
+ or empty strings. Convert the latter to nulls.
+ """
+ try:
+ return int(val)
+ except ValueError:
+ return None
+
+ def strip_kv(ln):
+ return [val.strip() for val in ln.split(":", 1)]
+
+ out = {}
+
+# sbs = dct.get("supported-bootloaders", "")
+# out["host_supported-bootloaders"] = sbs.split("; ")
+# out["host_suspend-image-sr-uuid"] = dct.get("suspend-image-sr-uuid", "")
+# out["host_crash-dump-sr-uuid"] = dct.get("crash-dump-sr-uuid", "")
+# out["host_local-cache-sr"] = dct.get("local-cache-sr", "")
+ out["host_memory"] = omm = {}
+ omm["total"] = safe_int(dct.get("memory-total", ""))
+ omm["overhead"] = safe_int(dct.get("memory-overhead", ""))
+ omm["free"] = safe_int(dct.get("memory-free", ""))
+ omm["free-computed"] = safe_int(
+ dct.get("memory-free-computed", ""))
+
+# out["host_API-version"] = avv = {}
+# avv["vendor"] = dct.get("API-version-vendor", "")
+# avv["major"] = safe_int(dct.get("API-version-major", ""))
+# avv["minor"] = safe_int(dct.get("API-version-minor", ""))
+
+ out["host_uuid"] = dct.get("uuid", None)
+ out["host_name-label"] = dct.get("name-label", "")
+ out["host_name-description"] = dct.get("name-description", "")
+# out["host_host-metrics-live"] = dct.get(
+# "host-metrics-live", "false") == "true"
+ out["host_hostname"] = dct.get("hostname", "")
+ out["host_ip_address"] = dct.get("address", "")
+ oc = dct.get("other-config", "")
+ out["host_other-config"] = ocd = {}
+ if oc:
+ for oc_fld in oc.split("; "):
+ ock, ocv = strip_kv(oc_fld)
+ ocd[ock] = ocv
+# out["host_capabilities"] = dct.get("capabilities", "").split("; ")
+# out["host_allowed-operations"] = dct.get(
+# "allowed-operations", "").split("; ")
+# lsrv = dct.get("license-server", "")
+# out["host_license-server"] = ols = {}
+# if lsrv:
+# for lspart in lsrv.split("; "):
+# lsk, lsv = lspart.split(": ")
+# if lsk == "port":
+# ols[lsk] = safe_int(lsv)
+# else:
+# ols[lsk] = lsv
+# sv = dct.get("software-version", "")
+# out["host_software-version"] = osv = {}
+# if sv:
+# for svln in sv.split("; "):
+# svk, svv = strip_kv(svln)
+# osv[svk] = svv
+ cpuinf = dct.get("cpu_info", "")
+ out["host_cpu_info"] = ocp = {}
+ if cpuinf:
+ for cpln in cpuinf.split("; "):
+ cpk, cpv = strip_kv(cpln)
+ if cpk in ("cpu_count", "family", "model", "stepping"):
+ ocp[cpk] = safe_int(cpv)
+ else:
+ ocp[cpk] = cpv
+# out["host_edition"] = dct.get("edition", "")
+# out["host_external-auth-service-name"] = dct.get(
+# "external-auth-service-name", "")
+ return out
+
+
+if __name__ == "__main__":
+ XenAPIPlugin.dispatch(
+ {"host_data": host_data})
diff --git a/run_tests.sh b/run_tests.sh
index e3a0bd243..9aa555484 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -59,7 +59,13 @@ function run_tests {
function run_pep8 {
echo "Running pep8 ..."
+ # Opt-out files from pep8
+ ignore_scripts="*.sh:*nova-debug:*clean-vlans"
+ ignore_files="*eventlet-patch:*pip-requires"
+ ignore_dirs="*ajaxterm*"
+ GLOBIGNORE="$ignore_scripts:$ignore_files:$ignore_dirs"
srcfiles=`find bin -type f ! -name "nova.conf*"`
+ srcfiles+=" `find tools/*`"
srcfiles+=" nova setup.py plugins/xenserver/xenapi/etc/xapi.d/plugins/glance"
pep8 --repeat --show-pep8 --show-source --exclude=vcsversion.py ${srcfiles}
}
diff --git a/tools/install_venv.py b/tools/install_venv.py
index 30ec85374..812b1dd0f 100644
--- a/tools/install_venv.py
+++ b/tools/install_venv.py
@@ -1,3 +1,4 @@
+
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
@@ -30,114 +31,125 @@ import sys
ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
VENV = os.path.join(ROOT, '.nova-venv')
PIP_REQUIRES = os.path.join(ROOT, 'tools', 'pip-requires')
-TWISTED_NOVA='http://nova.openstack.org/Twisted-10.0.0Nova.tar.gz'
+TWISTED_NOVA = 'http://nova.openstack.org/Twisted-10.0.0Nova.tar.gz'
+PY_VERSION = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
+
def die(message, *args):
- print >>sys.stderr, message % args
- sys.exit(1)
+ print >>sys.stderr, message % args
+ sys.exit(1)
+
+
+def check_python_version():
+ if sys.version_info < (2, 6):
+ die("Need Python Version >= 2.6")
def run_command(cmd, redirect_output=True, check_exit_code=True):
- """
- Runs a command in an out-of-process shell, returning the
- output of that command. Working directory is ROOT.
- """
- if redirect_output:
- stdout = subprocess.PIPE
- else:
- stdout = None
+ """
+ Runs a command in an out-of-process shell, returning the
+ output of that command. Working directory is ROOT.
+ """
+ if redirect_output:
+ stdout = subprocess.PIPE
+ else:
+ stdout = None
- proc = subprocess.Popen(cmd, cwd=ROOT, stdout=stdout)
- output = proc.communicate()[0]
- if check_exit_code and proc.returncode != 0:
- die('Command "%s" failed.\n%s', ' '.join(cmd), output)
- return output
+ proc = subprocess.Popen(cmd, cwd=ROOT, stdout=stdout)
+ output = proc.communicate()[0]
+ if check_exit_code and proc.returncode != 0:
+ die('Command "%s" failed.\n%s', ' '.join(cmd), output)
+ return output
-HAS_EASY_INSTALL = bool(run_command(['which', 'easy_install'], check_exit_code=False).strip())
-HAS_VIRTUALENV = bool(run_command(['which', 'virtualenv'], check_exit_code=False).strip())
+HAS_EASY_INSTALL = bool(run_command(['which', 'easy_install'],
+ check_exit_code=False).strip())
+HAS_VIRTUALENV = bool(run_command(['which', 'virtualenv'],
+ check_exit_code=False).strip())
def check_dependencies():
- """Make sure virtualenv is in the path."""
-
- if not HAS_VIRTUALENV:
- print 'not found.'
- # Try installing it via easy_install...
- if HAS_EASY_INSTALL:
- print 'Installing virtualenv via easy_install...',
- if not (run_command(['which', 'easy_install']) and
- run_command(['easy_install', 'virtualenv'])):
- die('ERROR: virtualenv not found.\n\nNova development requires virtualenv,'
- ' please install it using your favorite package management tool')
- print 'done.'
- print 'done.'
+ """Make sure virtualenv is in the path."""
+
+ if not HAS_VIRTUALENV:
+ print 'not found.'
+ # Try installing it via easy_install...
+ if HAS_EASY_INSTALL:
+ print 'Installing virtualenv via easy_install...',
+ if not (run_command(['which', 'easy_install']) and
+ run_command(['easy_install', 'virtualenv'])):
+ die('ERROR: virtualenv not found.\n\nNova development'
+ ' requires virtualenv, please install it using your'
+ ' favorite package management tool')
+ print 'done.'
+ print 'done.'
def create_virtualenv(venv=VENV):
- """Creates the virtual environment and installs PIP only into the
- virtual environment
- """
- print 'Creating venv...',
- run_command(['virtualenv', '-q', '--no-site-packages', VENV])
- print 'done.'
- print 'Installing pip in virtualenv...',
- if not run_command(['tools/with_venv.sh', 'easy_install', 'pip']).strip():
- die("Failed to install pip.")
- print 'done.'
+ """Creates the virtual environment and installs PIP only into the
+ virtual environment
+ """
+ print 'Creating venv...',
+ run_command(['virtualenv', '-q', '--no-site-packages', VENV])
+ print 'done.'
+ print 'Installing pip in virtualenv...',
+ if not run_command(['tools/with_venv.sh', 'easy_install', 'pip']).strip():
+ die("Failed to install pip.")
+ print 'done.'
def install_dependencies(venv=VENV):
- print 'Installing dependencies with pip (this can take a while)...'
- # Install greenlet by hand - just listing it in the requires file does not
- # get it in stalled in the right order
- run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, 'greenlet'],
- redirect_output=False)
- run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, '-r', PIP_REQUIRES],
- redirect_output=False)
- run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, TWISTED_NOVA],
- redirect_output=False)
-
-
- # Tell the virtual env how to "import nova"
- pthfile = os.path.join(venv, "lib", "python2.6", "site-packages", "nova.pth")
- f = open(pthfile, 'w')
- f.write("%s\n" % ROOT)
- # Patch eventlet (see FAQ # 1485)
- patchsrc = os.path.join(ROOT, 'tools', 'eventlet-patch')
- patchfile = os.path.join(venv, "lib", "python2.6", "site-packages", "eventlet",
- "green", "subprocess.py")
- patch_cmd = "patch %s %s" % (patchfile, patchsrc)
- os.system(patch_cmd)
+ print 'Installing dependencies with pip (this can take a while)...'
+ # Install greenlet by hand - just listing it in the requires file does not
+ # get it in stalled in the right order
+ run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv,
+ 'greenlet'], redirect_output=False)
+ run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, '-r',
+ PIP_REQUIRES], redirect_output=False)
+ run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv,
+ TWISTED_NOVA], redirect_output=False)
+
+ # Tell the virtual env how to "import nova"
+ pthfile = os.path.join(venv, "lib", PY_VERSION, "site-packages",
+ "nova.pth")
+ f = open(pthfile, 'w')
+ f.write("%s\n" % ROOT)
+ # Patch eventlet (see FAQ # 1485)
+ patchsrc = os.path.join(ROOT, 'tools', 'eventlet-patch')
+ patchfile = os.path.join(venv, "lib", PY_VERSION, "site-packages",
+ "eventlet", "green", "subprocess.py")
+ patch_cmd = "patch %s %s" % (patchfile, patchsrc)
+ os.system(patch_cmd)
def print_help():
- help = """
- Nova development environment setup is complete.
+ help = """
+ Nova development environment setup is complete.
- Nova development uses virtualenv to track and manage Python dependencies
- while in development and testing.
+ Nova development uses virtualenv to track and manage Python dependencies
+ while in development and testing.
- To activate the Nova virtualenv for the extent of your current shell session
- you can run:
+ To activate the Nova virtualenv for the extent of your current shell
+ session you can run:
- $ source .nova-venv/bin/activate
+ $ source .nova-venv/bin/activate
- Or, if you prefer, you can run commands in the virtualenv on a case by case
- basis by running:
+ Or, if you prefer, you can run commands in the virtualenv on a case by case
+ basis by running:
- $ tools/with_venv.sh <your command>
+ $ tools/with_venv.sh <your command>
- Also, make test will automatically use the virtualenv.
- """
- print help
+ Also, make test will automatically use the virtualenv.
+ """
+ print help
def main(argv):
- check_dependencies()
- create_virtualenv()
- install_dependencies()
- print_help()
+ check_python_version()
+ check_dependencies()
+ create_virtualenv()
+ install_dependencies()
+ print_help()
if __name__ == '__main__':
- main(sys.argv)
+ main(sys.argv)
diff --git a/tools/pip-requires b/tools/pip-requires
index e438c2a41..8f8018765 100644
--- a/tools/pip-requires
+++ b/tools/pip-requires
@@ -2,7 +2,7 @@ SQLAlchemy==0.6.3
pep8==0.5.0
pylint==0.19
IPy==0.70
-Cheetah==2.4.2.1
+Cheetah==2.4.4
M2Crypto==0.20.2
amqplib==0.6.1
anyjson==0.2.4
@@ -33,3 +33,4 @@ nova-adminclient
suds==0.4
coverage
nosexcover
+GitPython