summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTrey Morris <trey.morris@rackspace.com>2011-05-24 16:19:46 -0500
committerTrey Morris <trey.morris@rackspace.com>2011-05-24 16:19:46 -0500
commitd3c6f77f287f8078606ca7fc99a8121cadb76fd4 (patch)
treee37cb63b7448436fefff8d11388d56ee64615177
parent08a22883fd5bf58b5b74645d1b2065a0be8c733b (diff)
parent330b3febe9970a0358cbc145ea88faeb3da121d5 (diff)
moved auto assign floating ip functionality from compute manager to network manager
-rw-r--r--.mailmap3
-rw-r--r--Authors6
-rwxr-xr-xbin/nova-manage75
-rwxr-xr-xbin/stack2
-rw-r--r--doc/source/_theme/layout.html2
-rw-r--r--doc/source/devref/cloudpipe.rst8
-rw-r--r--doc/source/devref/down.sh (renamed from doc/source/down.sh)0
-rw-r--r--doc/source/devref/interfaces17
-rw-r--r--doc/source/devref/up.sh (renamed from doc/source/up.sh)0
-rwxr-xr-xnova/CA/geninter.sh2
-rw-r--r--nova/CA/openssl.cnf.tmpl2
-rw-r--r--nova/api/ec2/__init__.py10
-rw-r--r--nova/api/ec2/admin.py2
-rw-r--r--nova/api/ec2/cloud.py50
-rw-r--r--nova/api/ec2/ec2utils.py2
-rw-r--r--nova/api/openstack/__init__.py9
-rw-r--r--nova/api/openstack/accounts.py2
-rw-r--r--nova/api/openstack/auth.py17
-rw-r--r--nova/api/openstack/common.py11
-rw-r--r--nova/api/openstack/flavors.py3
-rw-r--r--nova/api/openstack/images.py2
-rw-r--r--nova/api/openstack/limits.py31
-rw-r--r--nova/api/openstack/servers.py130
-rw-r--r--nova/api/openstack/users.py2
-rw-r--r--nova/api/openstack/views/images.py17
-rw-r--r--nova/api/openstack/views/limits.py109
-rw-r--r--nova/api/openstack/views/servers.py8
-rw-r--r--nova/auth/dbdriver.py21
-rw-r--r--nova/auth/ldapdriver.py59
-rw-r--r--nova/auth/manager.py102
-rw-r--r--nova/cloudpipe/pipelib.py3
-rw-r--r--nova/compute/api.py63
-rw-r--r--nova/compute/instance_types.py16
-rw-r--r--nova/compute/manager.py188
-rw-r--r--nova/compute/power_state.py21
-rw-r--r--nova/console/vmrc.py6
-rw-r--r--nova/db/api.py40
-rw-r--r--nova/db/sqlalchemy/api.py232
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/001_austin.py8
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py7
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py7
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py7
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py7
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py6
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py7
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py9
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py6
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py7
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/011_live_migration.py5
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py8
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py6
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py7
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py35
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py203
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py68
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py60
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/019_multi_nic.py (renamed from nova/db/sqlalchemy/migrate_repo/versions/015_multi_nic.py)0
-rw-r--r--nova/db/sqlalchemy/models.py21
-rw-r--r--nova/exception.py526
-rw-r--r--nova/flags.py8
-rw-r--r--nova/image/fake.py10
-rw-r--r--nova/image/glance.py16
-rw-r--r--nova/image/local.py14
-rw-r--r--nova/ipv6/__init__.py17
-rw-r--r--nova/ipv6/account_identifier.py45
-rw-r--r--nova/ipv6/api.py41
-rw-r--r--nova/ipv6/rfc2462.py42
-rw-r--r--nova/network/api.py83
-rw-r--r--nova/network/linux_net.py228
-rw-r--r--nova/network/manager.py248
-rw-r--r--nova/network/vmwareapi_net.py37
-rw-r--r--nova/network/xenapi_net.py18
-rw-r--r--nova/notifier/__init__.py14
-rw-r--r--nova/notifier/api.py83
-rw-r--r--nova/notifier/log_notifier.py34
-rw-r--r--nova/notifier/no_op_notifier.py19
-rw-r--r--nova/notifier/rabbit_notifier.py36
-rw-r--r--nova/quota.py44
-rw-r--r--nova/scheduler/api.py8
-rw-r--r--nova/scheduler/driver.py36
-rw-r--r--nova/scheduler/host_filter.py288
-rw-r--r--nova/scheduler/manager.py7
-rw-r--r--nova/scheduler/zone_manager.py20
-rw-r--r--nova/tests/api/openstack/fakes.py3
-rw-r--r--nova/tests/api/openstack/test_flavors.py4
-rw-r--r--nova/tests/api/openstack/test_images.py24
-rw-r--r--nova/tests/api/openstack/test_limits.py121
-rw-r--r--nova/tests/api/openstack/test_servers.py311
-rw-r--r--nova/tests/api/openstack/test_zones.py2
-rw-r--r--nova/tests/api/test_wsgi.py6
-rw-r--r--nova/tests/db/fakes.py1
-rw-r--r--nova/tests/network/base.py9
-rw-r--r--nova/tests/test_api.py19
-rw-r--r--nova/tests/test_auth.py40
-rw-r--r--nova/tests/test_cloud.py57
-rw-r--r--nova/tests/test_compute.py31
-rw-r--r--nova/tests/test_exception.py34
-rw-r--r--nova/tests/test_host_filter.py208
-rw-r--r--nova/tests/test_instance_types.py15
-rw-r--r--nova/tests/test_ipv6.py60
-rw-r--r--nova/tests/test_misc.py49
-rw-r--r--nova/tests/test_notifier.py117
-rw-r--r--nova/tests/test_quota.py81
-rw-r--r--nova/tests/test_scheduler.py99
-rw-r--r--nova/tests/test_utils.py27
-rw-r--r--nova/tests/test_virt.py232
-rw-r--r--nova/tests/test_volume.py2
-rw-r--r--nova/tests/test_xenapi.py73
-rw-r--r--nova/tests/test_zones.py18
-rw-r--r--nova/tests/xenapi/stubs.py39
-rw-r--r--nova/utils.py63
-rw-r--r--nova/virt/fake.py5
-rw-r--r--nova/virt/hyperv.py21
-rw-r--r--nova/virt/libvirt_conn.py287
-rw-r--r--nova/virt/vmwareapi/fake.py9
-rw-r--r--nova/virt/vmwareapi/vmops.py43
-rw-r--r--nova/virt/xenapi/vm_utils.py9
-rw-r--r--nova/virt/xenapi/vmops.py46
-rw-r--r--nova/virt/xenapi/volumeops.py6
-rw-r--r--nova/virt/xenapi_conn.py172
-rw-r--r--nova/volume/api.py7
-rw-r--r--nova/wsgi.py21
-rwxr-xr-xplugins/xenserver/xenapi/etc/xapi.d/plugins/agent3
-rw-r--r--plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost183
-rwxr-xr-xplugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py8
-rwxr-xr-xrun_tests.sh1
-rw-r--r--setup.py20
-rw-r--r--tools/install_venv.py10
-rw-r--r--tools/pip-requires5
129 files changed, 4810 insertions, 1470 deletions
diff --git a/.mailmap b/.mailmap
index 7e031fc7c..3f0238ee9 100644
--- a/.mailmap
+++ b/.mailmap
@@ -29,6 +29,7 @@
<matt.dietz@rackspace.com> <matthewdietz@Matthew-Dietzs-MacBook-Pro.local>
<matt.dietz@rackspace.com> <mdietz@openstack>
<mordred@inaugust.com> <mordred@hudson>
+<naveedm9@gmail.com> <naveed.massjouni@rackspace.com>
<nirmal.ranganathan@rackspace.com> <nirmal.ranganathan@rackspace.coom>
<paul@openstack.org> <paul.voccio@rackspace.com>
<paul@openstack.org> <pvoccio@castor.local>
@@ -36,6 +37,7 @@
<rlane@wikimedia.org> <laner@controller>
<sleepsonthefloor@gmail.com> <root@tonbuntu>
<soren.hansen@rackspace.com> <soren@linux2go.dk>
+<throughnothing@gmail.com> <will.wolf@rackspace.com>
<todd@ansolabs.com> <todd@lapex>
<todd@ansolabs.com> <todd@rubidine.com>
<tushar.vitthal.patil@gmail.com> <tpatil@vertex.co.in>
@@ -44,5 +46,4 @@
<ueno.nachi@lab.ntt.co.jp> <openstack@lab.ntt.co.jp>
<vishvananda@gmail.com> <root@mirror.nasanebula.net>
<vishvananda@gmail.com> <root@ubuntu>
-<naveedm9@gmail.com> <naveed.massjouni@rackspace.com>
<vishvananda@gmail.com> <vishvananda@yahoo.com>
diff --git a/Authors b/Authors
index c440d3c11..546c9091f 100644
--- a/Authors
+++ b/Authors
@@ -1,3 +1,4 @@
+Alex Meade <alex.meade@rackspace.com>
Andy Smith <code@term.ie>
Andy Southgate <andy.southgate@citrix.com>
Anne Gentle <anne@openstack.org>
@@ -43,11 +44,14 @@ Josh Kearney <josh@jk0.org>
Josh Kleinpeter <josh@kleinpeter.org>
Joshua McKenty <jmckenty@gmail.com>
Justin Santa Barbara <justin@fathomdb.com>
+Justin Shepherd <jshepher@rackspace.com>
Kei Masumoto <masumotok@nttdata.co.jp>
Ken Pepple <ken.pepple@gmail.com>
+Kevin Bringard <kbringard@attinteractive.com>
Kevin L. Mitchell <kevin.mitchell@rackspace.com>
Koji Iida <iida.koji@lab.ntt.co.jp>
Lorin Hochstein <lorin@isi.edu>
+Lvov Maxim <usrleon@gmail.com>
Mark Washenberger <mark.washenberger@rackspace.com>
Masanori Itoh <itoumsn@nttdata.co.jp>
Matt Dietz <matt.dietz@rackspace.com>
@@ -76,6 +80,8 @@ Trey Morris <trey.morris@rackspace.com>
Tushar Patil <tushar.vitthal.patil@gmail.com>
Vasiliy Shlykov <vash@vasiliyshlykov.org>
Vishvananda Ishaya <vishvananda@gmail.com>
+William Wolf <throughnothing@gmail.com>
Yoshiaki Tamura <yoshi@midokura.jp>
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
+Yuriy Taraday <yorik.sar@gmail.com>
Zhixue Wu <Zhixue.Wu@citrix.com>
diff --git a/bin/nova-manage b/bin/nova-manage
index 8e2a49ef2..5bd60e45c 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -82,6 +82,7 @@ from nova import log as logging
from nova import quota
from nova import rpc
from nova import utils
+from nova import version
from nova.api.ec2 import ec2utils
from nova.auth import manager
from nova.cloudpipe import pipelib
@@ -150,7 +151,7 @@ class VpnCommands(object):
state = 'up'
print address,
print vpn['host'],
- print vpn['ec2_id'],
+ print ec2utils.id_to_ec2_id(vpn['id']),
print vpn['state_description'],
print state
else:
@@ -391,10 +392,10 @@ class ProjectCommands(object):
with open(filename, 'w') as f:
f.write(rc)
- def list(self):
+ def list(self, username=None):
"""Lists all projects
- arguments: <none>"""
- for project in self.manager.get_projects():
+ arguments: [username]"""
+ for project in self.manager.get_projects(username):
print project.name
def quota(self, project_id, key=None, value=None):
@@ -402,11 +403,10 @@ class ProjectCommands(object):
arguments: project_id [key] [value]"""
ctxt = context.get_admin_context()
if key:
- quo = {'project_id': project_id, key: value}
try:
- db.quota_update(ctxt, project_id, quo)
+ db.quota_update(ctxt, project_id, key, value)
except exception.NotFound:
- db.quota_create(ctxt, quo)
+ db.quota_create(ctxt, project_id, key, value)
project_quota = quota.get_quota(ctxt, project_id)
for key, value in project_quota.iteritems():
print '%s: %s' % (key, value)
@@ -532,8 +532,10 @@ class NetworkCommands(object):
[label='public'], [flat_network_bridge=FLAG,
[bridge_interface=FLAG]"""
if not fixed_range:
- raise TypeError(_('Fixed range in the form of 10.0.0.0/8 is '
- 'required to create networks.'))
+ msg = _('Fixed range in the form of 10.0.0.0/8 is '
+ 'required to create networks.')
+ print msg
+ raise TypeError(msg)
if not num_networks:
num_networks = FLAGS.num_networks
if not network_size:
@@ -549,16 +551,21 @@ class NetworkCommands(object):
if not bridge_interface:
bridge_interface = FLAGS.flat_interface or FLAGS.vlan_interface
net_manager = utils.import_object(FLAGS.network_manager)
- net_manager.create_networks(context.get_admin_context(),
- cidr=fixed_range,
- num_networks=int(num_networks),
- network_size=int(network_size),
- vlan_start=int(vlan_start),
- vpn_start=int(vpn_start),
- cidr_v6=fixed_range_v6,
- label=label,
- bridge=flat_network_bridge,
- bridge_interface=bridge_interface)
+
+ try:
+ net_manager.create_networks(context.get_admin_context(),
+ cidr=fixed_range,
+ num_networks=int(num_networks),
+ network_size=int(network_size),
+ vlan_start=int(vlan_start),
+ vpn_start=int(vpn_start),
+ cidr_v6=fixed_range_v6,
+ label=label,
+ bridge=flat_network_bridge,
+ bridge_interface=bridge_interface)
+ except ValueError, e:
+ print e
+ raise e
def list(self):
"""List all created networks"""
@@ -774,6 +781,17 @@ class DbCommands(object):
print migration.db_version()
+class VersionCommands(object):
+ """Class for exposing the codebase version."""
+
+ def __init__(self):
+ pass
+
+ def list(self):
+ print _("%s (%s)") %\
+ (version.version_string(), version.version_string_with_vcs())
+
+
class VolumeCommands(object):
"""Methods for dealing with a cloud in an odd state"""
@@ -842,11 +860,17 @@ class InstanceTypeCommands(object):
instance_types.create(name, memory, vcpus, local_gb,
flavorid, swap, rxtx_quota, rxtx_cap)
except exception.InvalidInputException:
- print "Must supply valid parameters to create instance type"
+ print "Must supply valid parameters to create instance_type"
print e
sys.exit(1)
- except exception.DBError, e:
- print "DB Error: %s" % e
+ except exception.ApiError, e:
+ print "\n\n"
+ print "\n%s" % e
+ print "Please ensure instance_type name and flavorid are unique."
+ print "To complete remove a instance_type, use the --purge flag:"
+ print "\n # nova-manage instance_type delete <name> --purge\n"
+ print "Currently defined instance_type names and flavorids:"
+ self.list("--all")
sys.exit(2)
except:
print "Unknown error"
@@ -970,7 +994,7 @@ class ImageCommands(object):
try:
internal_id = ec2utils.ec2_id_to_id(old_image_id)
image = self.image_service.show(context, internal_id)
- except exception.NotFound:
+ except (exception.InvalidEc2Id, exception.ImageNotFound):
image = self.image_service.show_by_name(context, old_image_id)
return image['id']
@@ -1059,7 +1083,8 @@ CATEGORIES = [
('volume', VolumeCommands),
('instance_type', InstanceTypeCommands),
('image', ImageCommands),
- ('flavor', InstanceTypeCommands)]
+ ('flavor', InstanceTypeCommands),
+ ('version', VersionCommands)]
def lazy_match(name, key_value_tuples):
@@ -1101,6 +1126,8 @@ def main():
script_name = argv.pop(0)
if len(argv) < 1:
+ print _("\nOpenStack Nova version: %s (%s)\n") %\
+ (version.version_string(), version.version_string_with_vcs())
print script_name + " category action [<args>]"
print _("Available categories:")
for k, _v in CATEGORIES:
diff --git a/bin/stack b/bin/stack
index d84a82e27..a1c6d1348 100755
--- a/bin/stack
+++ b/bin/stack
@@ -65,7 +65,7 @@ def format_help(d):
indent = MAX_INDENT - 6
out = []
- for k, v in d.iteritems():
+ for k, v in sorted(d.iteritems()):
if (len(k) + 6) > MAX_INDENT:
out.extend([' %s' % k])
initial_indent = ' ' * (indent + 6)
diff --git a/doc/source/_theme/layout.html b/doc/source/_theme/layout.html
index 0a37a7943..b28edb364 100644
--- a/doc/source/_theme/layout.html
+++ b/doc/source/_theme/layout.html
@@ -73,7 +73,7 @@
<script type="text/javascript">$('#searchbox').show(0);</script>
<p class="triangle-border right">
- Psst... hey. You're reading the latest content, but it might be out of sync with code. You can read <a href="http://nova.openstack.org/2011.1">Nova 2011.1 docs</a> or <a href="http://docs.openstack.org">all OpenStack docs</a> too.
+ Psst... hey. You're reading the latest content, but it might be out of sync with code. You can read <a href="http://nova.openstack.org/2011.2">Nova 2011.2 docs</a> or <a href="http://docs.openstack.org">all OpenStack docs</a> too.
</p>
{%- endif %}
diff --git a/doc/source/devref/cloudpipe.rst b/doc/source/devref/cloudpipe.rst
index 95570aa1b..15d3160b7 100644
--- a/doc/source/devref/cloudpipe.rst
+++ b/doc/source/devref/cloudpipe.rst
@@ -62,12 +62,18 @@ Making a cloudpipe image is relatively easy.
:language: bash
:linenos:
-# download and run the payload on boot from /etc/rc.local.
+# download and run the payload on boot from /etc/rc.local
.. literalinclude:: rc.local
:language: bash
:linenos:
+# setup /etc/network/interfaces
+
+.. literalinclude:: interfaces
+ :language: bash
+ :linenos:
+
# register the image and set the image id in your flagfile::
--vpn_image_id=ami-xxxxxxxx
diff --git a/doc/source/down.sh b/doc/source/devref/down.sh
index 5c1888870..5c1888870 100644
--- a/doc/source/down.sh
+++ b/doc/source/devref/down.sh
diff --git a/doc/source/devref/interfaces b/doc/source/devref/interfaces
new file mode 100644
index 000000000..b7116aeb7
--- /dev/null
+++ b/doc/source/devref/interfaces
@@ -0,0 +1,17 @@
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+# The primary network interface
+auto eth0
+iface eth0 inet manual
+ up ifconfig $IFACE 0.0.0.0 up
+ down ifconfig $IFACE down
+
+auto br0
+iface br0 inet dhcp
+ bridge_ports eth0
+
diff --git a/doc/source/up.sh b/doc/source/devref/up.sh
index 073a58e15..073a58e15 100644
--- a/doc/source/up.sh
+++ b/doc/source/devref/up.sh
diff --git a/nova/CA/geninter.sh b/nova/CA/geninter.sh
index 4b7f5a55c..9b3ea3b76 100755
--- a/nova/CA/geninter.sh
+++ b/nova/CA/geninter.sh
@@ -21,7 +21,7 @@ NAME=$1
SUBJ=$2
mkdir -p projects/$NAME
cd projects/$NAME
-cp ../../openssl.cnf.tmpl openssl.cnf
+cp "$(dirname $0)/openssl.cnf.tmpl" openssl.cnf
sed -i -e s/%USERNAME%/$NAME/g openssl.cnf
mkdir -p certs crl newcerts private
openssl req -new -x509 -extensions v3_ca -keyout private/cakey.pem -out cacert.pem -days 365 -config ./openssl.cnf -batch -nodes
diff --git a/nova/CA/openssl.cnf.tmpl b/nova/CA/openssl.cnf.tmpl
index b80fadf40..f87d9f3b2 100644
--- a/nova/CA/openssl.cnf.tmpl
+++ b/nova/CA/openssl.cnf.tmpl
@@ -46,7 +46,7 @@ policy = policy_match
# RHEL 6 and Fedora 14 (using openssl-1.0.0-4.el6.x86_64 or
# openssl-1.0.0d-1.fc14.x86_64)
[ policy_match ]
-countryName = match
+countryName = supplied
stateOrProvinceName = supplied
organizationName = optional
organizationalUnitName = optional
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index a3c3b25a1..cd59340bd 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -46,8 +46,6 @@ flags.DEFINE_integer('lockout_minutes', 15,
'Number of minutes to lockout if triggered.')
flags.DEFINE_integer('lockout_window', 15,
'Number of minutes for lockout window.')
-flags.DEFINE_list('lockout_memcached_servers', None,
- 'Memcached servers or None for in process cache.')
class RequestLogging(wsgi.Middleware):
@@ -107,11 +105,11 @@ class Lockout(wsgi.Middleware):
def __init__(self, application):
"""middleware can use fake for testing."""
- if FLAGS.lockout_memcached_servers:
+ if FLAGS.memcached_servers:
import memcache
else:
from nova import fakememcache as memcache
- self.mc = memcache.Client(FLAGS.lockout_memcached_servers,
+ self.mc = memcache.Client(FLAGS.memcached_servers,
debug=0)
super(Lockout, self).__init__(application)
@@ -322,9 +320,7 @@ class Executor(wsgi.Application):
except exception.InstanceNotFound as ex:
LOG.info(_('InstanceNotFound raised: %s'), unicode(ex),
context=context)
- ec2_id = ec2utils.id_to_ec2_id(ex.instance_id)
- message = _('Instance %s not found') % ec2_id
- return self._error(req, context, type(ex).__name__, message)
+ return self._error(req, context, type(ex).__name__, ex.message)
except exception.VolumeNotFound as ex:
LOG.info(_('VolumeNotFound raised: %s'), unicode(ex),
context=context)
diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py
index 6a5609d4a..ea94d9c1f 100644
--- a/nova/api/ec2/admin.py
+++ b/nova/api/ec2/admin.py
@@ -266,7 +266,7 @@ class AdminController(object):
def _vpn_for(self, context, project_id):
"""Get the VPN instance for a project ID."""
for instance in db.instance_get_all_by_project(context, project_id):
- if (instance['image_id'] == FLAGS.vpn_image_id
+ if (instance['image_id'] == str(FLAGS.vpn_image_id)
and not instance['state_description'] in
['shutting_down', 'shutdown']):
return instance
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 42ade8dca..de34a2874 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -35,6 +35,7 @@ from nova import crypto
from nova import db
from nova import exception
from nova import flags
+from nova import ipv6
from nova import log as logging
from nova import network
from nova import utils
@@ -49,8 +50,6 @@ flags.DECLARE('service_down_time', 'nova.scheduler.driver')
LOG = logging.getLogger("nova.api.cloud")
-InvalidInputException = exception.InvalidInputException
-
def _gen_key(context, user_id, key_name):
"""Generate a key
@@ -61,8 +60,7 @@ def _gen_key(context, user_id, key_name):
# creation before creating key_pair
try:
db.key_pair_get(context, user_id, key_name)
- raise exception.Duplicate(_("The key_pair %s already exists")
- % key_name)
+ raise exception.KeyPairExists(key_name=key_name)
except exception.NotFound:
pass
private_key, public_key, fingerprint = crypto.generate_key_pair()
@@ -159,7 +157,7 @@ class CloudController(object):
floating_ip = db.instance_get_floating_address(ctxt,
instance_ref['id'])
ec2_id = ec2utils.id_to_ec2_id(instance_ref['id'])
- image_ec2_id = self._image_ec2_id(instance_ref['image_id'], 'ami')
+ image_ec2_id = self.image_ec2_id(instance_ref['image_id'])
data = {
'user-data': base64.b64decode(instance_ref['user_data']),
'meta-data': {
@@ -188,8 +186,8 @@ class CloudController(object):
for image_type in ['kernel', 'ramdisk']:
if instance_ref.get('%s_id' % image_type):
- ec2_id = self._image_ec2_id(instance_ref['%s_id' % image_type],
- self._image_type(image_type))
+ ec2_id = self.image_ec2_id(instance_ref['%s_id' % image_type],
+ self._image_type(image_type))
data['meta-data']['%s-id' % image_type] = ec2_id
if False: # TODO(vish): store ancestor ids
@@ -399,11 +397,11 @@ class CloudController(object):
ip_protocol = str(ip_protocol)
if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']:
- raise InvalidInputException(_('%s is not a valid ipProtocol') %
- (ip_protocol,))
+ raise exception.InvalidIpProtocol(protocol=ip_protocol)
if ((min(from_port, to_port) < -1) or
(max(from_port, to_port) > 65535)):
- raise InvalidInputException(_('Invalid port range'))
+ raise exception.InvalidPortRange(from_port=from_port,
+ to_port=to_port)
values['protocol'] = ip_protocol
values['from_port'] = from_port
@@ -703,13 +701,13 @@ class CloudController(object):
instances = self.compute_api.get_all(context, **kwargs)
for instance in instances:
if not context.is_admin:
- if instance['image_id'] == FLAGS.vpn_image_id:
+ if instance['image_id'] == str(FLAGS.vpn_image_id):
continue
i = {}
instance_id = instance['id']
ec2_id = ec2utils.id_to_ec2_id(instance_id)
i['instanceId'] = ec2_id
- i['imageId'] = self._image_ec2_id(instance['image_id'])
+ i['imageId'] = self.image_ec2_id(instance['image_id'])
i['instanceState'] = {
'code': instance['state'],
'name': instance['state_description']}
@@ -721,9 +719,10 @@ class CloudController(object):
fixed = instance['fixed_ip']
floating_addr = fixed['floating_ips'][0]['address']
if instance['fixed_ip']['network'] and 'use_v6' in kwargs:
- i['dnsNameV6'] = utils.to_global_ipv6(
+ i['dnsNameV6'] = ipv6.to_global(
instance['fixed_ip']['network']['cidr_v6'],
- instance['fixed_ip']['mac_address']['address'])
+ instance['fixed_ip']['mac_address']['address'],
+ instance['project_id'])
i['privateDnsName'] = fixed_addr
i['privateIpAddress'] = fixed_addr
@@ -900,7 +899,7 @@ class CloudController(object):
return image_type
@staticmethod
- def _image_ec2_id(image_id, image_type='ami'):
+ def image_ec2_id(image_id, image_type='ami'):
"""Returns image ec2_id using id and three letter type."""
template = image_type + '-%08x'
return ec2utils.id_to_ec2_id(int(image_id), template=template)
@@ -909,25 +908,25 @@ class CloudController(object):
try:
internal_id = ec2utils.ec2_id_to_id(ec2_id)
return self.image_service.show(context, internal_id)
- except exception.NotFound:
+ except (exception.InvalidEc2Id, exception.ImageNotFound):
try:
return self.image_service.show_by_name(context, ec2_id)
except exception.NotFound:
- raise exception.NotFound(_('Image %s not found') % ec2_id)
+ raise exception.ImageNotFound(image_id=ec2_id)
def _format_image(self, image):
"""Convert from format defined by BaseImageService to S3 format."""
i = {}
image_type = self._image_type(image.get('container_format'))
- ec2_id = self._image_ec2_id(image.get('id'), image_type)
+ ec2_id = self.image_ec2_id(image.get('id'), image_type)
name = image.get('name')
i['imageId'] = ec2_id
kernel_id = image['properties'].get('kernel_id')
if kernel_id:
- i['kernelId'] = self._image_ec2_id(kernel_id, 'aki')
+ i['kernelId'] = self.image_ec2_id(kernel_id, 'aki')
ramdisk_id = image['properties'].get('ramdisk_id')
if ramdisk_id:
- i['ramdiskId'] = self._image_ec2_id(ramdisk_id, 'ari')
+ i['ramdiskId'] = self.image_ec2_id(ramdisk_id, 'ari')
i['imageOwnerId'] = image['properties'].get('owner_id')
if name:
i['imageLocation'] = "%s (%s)" % (image['properties'].
@@ -957,8 +956,7 @@ class CloudController(object):
try:
image = self._get_image(context, ec2_id)
except exception.NotFound:
- raise exception.NotFound(_('Image %s not found') %
- ec2_id)
+ raise exception.ImageNotFound(image_id=ec2_id)
images.append(image)
else:
images = self.image_service.detail(context)
@@ -978,8 +976,8 @@ class CloudController(object):
metadata = {'properties': {'image_location': image_location}}
image = self.image_service.create(context, metadata)
image_type = self._image_type(image.get('container_format'))
- image_id = self._image_ec2_id(image['id'],
- image_type)
+ image_id = self.image_ec2_id(image['id'],
+ image_type)
msg = _("Registered image %(image_location)s with"
" id %(image_id)s") % locals()
LOG.audit(msg, context=context)
@@ -992,7 +990,7 @@ class CloudController(object):
try:
image = self._get_image(context, image_id)
except exception.NotFound:
- raise exception.NotFound(_('Image %s not found') % image_id)
+ raise exception.ImageNotFound(image_id=image_id)
result = {'imageId': image_id, 'launchPermission': []}
if image['is_public']:
result['launchPermission'].append({'group': 'all'})
@@ -1015,7 +1013,7 @@ class CloudController(object):
try:
image = self._get_image(context, image_id)
except exception.NotFound:
- raise exception.NotFound(_('Image %s not found') % image_id)
+ raise exception.ImageNotFound(image_id=image_id)
internal_id = image['id']
del(image['id'])
diff --git a/nova/api/ec2/ec2utils.py b/nova/api/ec2/ec2utils.py
index 3b34f6ea5..163aa4ed2 100644
--- a/nova/api/ec2/ec2utils.py
+++ b/nova/api/ec2/ec2utils.py
@@ -24,7 +24,7 @@ def ec2_id_to_id(ec2_id):
try:
return int(ec2_id.split('-')[-1], 16)
except ValueError:
- raise exception.NotFound(_("Id %s Not Found") % ec2_id)
+ raise exception.InvalidEc2Id(ec2_id=ec2_id)
def id_to_ec2_id(instance_id, template='i-%08x'):
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py
index 5e76a06f7..348b70d5b 100644
--- a/nova/api/openstack/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -112,9 +112,6 @@ class APIRouter(wsgi.Router):
parent_resource=dict(member_name='server',
collection_name='servers'))
- _limits = limits.LimitsController()
- mapper.resource("limit", "limits", controller=_limits)
-
super(APIRouter, self).__init__(mapper)
@@ -145,6 +142,9 @@ class APIRouterV10(APIRouter):
parent_resource=dict(member_name='server',
collection_name='servers'))
+ mapper.resource("limit", "limits",
+ controller=limits.LimitsControllerV10())
+
mapper.resource("ip", "ips", controller=ips.Controller(),
collection=dict(public='GET', private='GET'),
parent_resource=dict(member_name='server',
@@ -178,3 +178,6 @@ class APIRouterV11(APIRouter):
mapper.resource("flavor", "flavors",
controller=flavors.ControllerV11(),
collection={'detail': 'GET'})
+
+ mapper.resource("limit", "limits",
+ controller=limits.LimitsControllerV11())
diff --git a/nova/api/openstack/accounts.py b/nova/api/openstack/accounts.py
index 6e3763e47..00fdd4540 100644
--- a/nova/api/openstack/accounts.py
+++ b/nova/api/openstack/accounts.py
@@ -48,7 +48,7 @@ class Controller(common.OpenstackController):
"""We cannot depend on the db layer to check for admin access
for the auth manager, so we do it here"""
if not context.is_admin:
- raise exception.NotAuthorized(_("Not admin user."))
+ raise exception.AdminRequired()
def index(self, req):
raise faults.Fault(webob.exc.HTTPNotImplemented())
diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py
index 311e6bde9..6c6ee22a2 100644
--- a/nova/api/openstack/auth.py
+++ b/nova/api/openstack/auth.py
@@ -17,7 +17,6 @@
import datetime
import hashlib
-import json
import time
import webob.exc
@@ -25,11 +24,9 @@ import webob.dec
from nova import auth
from nova import context
-from nova import db
from nova import exception
from nova import flags
from nova import log as logging
-from nova import manager
from nova import utils
from nova import wsgi
from nova.api.openstack import faults
@@ -102,11 +99,11 @@ class AuthMiddleware(wsgi.Middleware):
token, user = self._authorize_user(username, key, req)
if user and token:
res = webob.Response()
- res.headers['X-Auth-Token'] = token.token_hash
+ res.headers['X-Auth-Token'] = token['token_hash']
res.headers['X-Server-Management-Url'] = \
- token.server_management_url
- res.headers['X-Storage-Url'] = token.storage_url
- res.headers['X-CDN-Management-Url'] = token.cdn_management_url
+ token['server_management_url']
+ res.headers['X-Storage-Url'] = token['storage_url']
+ res.headers['X-CDN-Management-Url'] = token['cdn_management_url']
res.content_type = 'text/plain'
res.status = '204'
LOG.debug(_("Successfully authenticated '%s'") % username)
@@ -130,11 +127,11 @@ class AuthMiddleware(wsgi.Middleware):
except exception.NotFound:
return None
if token:
- delta = datetime.datetime.now() - token.created_at
+ delta = datetime.datetime.utcnow() - token['created_at']
if delta.days >= 2:
- self.db.auth_token_destroy(ctxt, token.token_hash)
+ self.db.auth_token_destroy(ctxt, token['token_hash'])
else:
- return self.auth.get_user(token.user_id)
+ return self.auth.get_user(token['user_id'])
return None
def _authorize_user(self, username, key, req):
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index 0b6dc944a..32cd689ca 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -15,6 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import re
from urlparse import urlparse
import webob
@@ -124,16 +125,22 @@ def get_image_id_from_image_hash(image_service, context, image_hash):
"should have numerical format") % image_id
LOG.error(msg)
raise Exception(msg)
- raise exception.NotFound(image_hash)
+ raise exception.ImageNotFound(image_id=image_hash)
def get_id_from_href(href):
"""Return the id portion of a url as an int.
- Given: http://www.foo.com/bar/123?q=4
+ Given: 'http://www.foo.com/bar/123?q=4'
+ Returns: 123
+
+ In order to support local hrefs, the href argument can be just an id:
+ Given: '123'
Returns: 123
"""
+ if re.match(r'\d+$', str(href)):
+ return int(href)
try:
return int(urlparse(href).path.split('/')[-1])
except:
diff --git a/nova/api/openstack/flavors.py b/nova/api/openstack/flavors.py
index 40787bd17..4c5971cf6 100644
--- a/nova/api/openstack/flavors.py
+++ b/nova/api/openstack/flavors.py
@@ -45,6 +45,9 @@ class Controller(common.OpenstackController):
items = self._get_flavors(req, is_detail=True)
return dict(flavors=items)
+ def _get_view_builder(self, req):
+ raise NotImplementedError()
+
def _get_flavors(self, req, is_detail=True):
"""Helper function that returns a list of flavor dicts."""
ctxt = req.environ['nova.context']
diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py
index 77baf5947..34d4c27fc 100644
--- a/nova/api/openstack/images.py
+++ b/nova/api/openstack/images.py
@@ -127,7 +127,7 @@ class Controller(common.OpenstackController):
raise webob.exc.HTTPBadRequest()
image = self._compute_service.snapshot(context, server_id, image_name)
- return self.get_builder(req).build(image, detail=True)
+ return dict(image=self.get_builder(req).build(image, detail=True))
def get_builder(self, request):
"""Indicates that you must use a Controller subclass."""
diff --git a/nova/api/openstack/limits.py b/nova/api/openstack/limits.py
index 9877af191..47bc238f1 100644
--- a/nova/api/openstack/limits.py
+++ b/nova/api/openstack/limits.py
@@ -33,7 +33,7 @@ from webob.dec import wsgify
from nova import wsgi
from nova.api.openstack import common
from nova.api.openstack import faults
-from nova.wsgi import Middleware
+from nova.api.openstack.views import limits as limits_views
# Convenience constants for the limits dictionary passed to Limiter().
@@ -51,8 +51,8 @@ class LimitsController(common.OpenstackController):
_serialization_metadata = {
"application/xml": {
"attributes": {
- "limit": ["verb", "URI", "regex", "value", "unit",
- "resetTime", "remaining", "name"],
+ "limit": ["verb", "URI", "uri", "regex", "value", "unit",
+ "resetTime", "next-available", "remaining", "name"],
},
"plurals": {
"rate": "limit",
@@ -67,12 +67,21 @@ class LimitsController(common.OpenstackController):
abs_limits = {}
rate_limits = req.environ.get("nova.limits", [])
- return {
- "limits": {
- "rate": rate_limits,
- "absolute": abs_limits,
- },
- }
+ builder = self._get_view_builder(req)
+ return builder.build(rate_limits, abs_limits)
+
+ def _get_view_builder(self, req):
+ raise NotImplementedError()
+
+
+class LimitsControllerV10(LimitsController):
+ def _get_view_builder(self, req):
+ return limits_views.ViewBuilderV10()
+
+
+class LimitsControllerV11(LimitsController):
+ def _get_view_builder(self, req):
+ return limits_views.ViewBuilderV11()
class Limit(object):
@@ -186,7 +195,7 @@ DEFAULT_LIMITS = [
]
-class RateLimitingMiddleware(Middleware):
+class RateLimitingMiddleware(wsgi.Middleware):
"""
Rate-limits requests passing through this middleware. All limit information
is stored in memory for this implementation.
@@ -200,7 +209,7 @@ class RateLimitingMiddleware(Middleware):
@param application: WSGI application to wrap
@param limits: List of dictionaries describing limits
"""
- Middleware.__init__(self, application)
+ wsgi.Middleware.__init__(self, application)
self._limiter = Limiter(limits or DEFAULT_LIMITS)
@wsgify(RequestClass=wsgi.Request)
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 415c0995f..8f2de2afe 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -14,28 +14,25 @@
# under the License.
import base64
-import hashlib
import traceback
from webob import exc
from xml.dom import minidom
from nova import compute
-from nova import context
from nova import exception
from nova import flags
from nova import log as logging
from nova import quota
from nova import utils
-from nova import wsgi
from nova.api.openstack import common
from nova.api.openstack import faults
import nova.api.openstack.views.addresses
import nova.api.openstack.views.flavors
+import nova.api.openstack.views.images
import nova.api.openstack.views.servers
from nova.auth import manager as auth_manager
from nova.compute import instance_types
-from nova.compute import power_state
import nova.api.openstack
from nova.scheduler import api as scheduler_api
@@ -78,6 +75,21 @@ class Controller(common.OpenstackController):
""" Returns a list of server details for a given user """
return self._items(req, is_detail=True)
+ def _image_id_from_req_data(self, data):
+ raise NotImplementedError()
+
+ def _flavor_id_from_req_data(self, data):
+ raise NotImplementedError()
+
+ def _get_view_builder(self, req):
+ raise NotImplementedError()
+
+ def _limit_items(self, items, req):
+ raise NotImplementedError()
+
+ def _action_rebuild(self, info, request, instance_id):
+ raise NotImplementedError()
+
def _items(self, req, is_detail):
"""Returns a list of servers for a given user.
@@ -320,10 +332,6 @@ class Controller(common.OpenstackController):
return faults.Fault(exc.HTTPBadRequest())
return exc.HTTPAccepted()
- def _action_rebuild(self, input_dict, req, id):
- LOG.debug(_("Rebuild server action is not implemented"))
- return faults.Fault(exc.HTTPNotImplemented())
-
def _action_resize(self, input_dict, req, id):
""" Resizes a given instance to the flavor size requested """
try:
@@ -564,9 +572,8 @@ class Controller(common.OpenstackController):
"""
image_id = image_meta['id']
if image_meta['status'] != 'active':
- raise exception.Invalid(
- _("Cannot build from image %(image_id)s, status not active") %
- locals())
+ raise exception.ImageUnacceptable(image_id=image_id,
+ reason=_("status is not active"))
if image_meta.get('container_format') != 'ami':
return None, None
@@ -574,14 +581,12 @@ class Controller(common.OpenstackController):
try:
kernel_id = image_meta['properties']['kernel_id']
except KeyError:
- raise exception.NotFound(
- _("Kernel not found for image %(image_id)s") % locals())
+ raise exception.KernelNotFoundForImage(image_id=image_id)
try:
ramdisk_id = image_meta['properties']['ramdisk_id']
except KeyError:
- raise exception.NotFound(
- _("Ramdisk not found for image %(image_id)s") % locals())
+ raise exception.RamdiskNotFoundForImage(image_id=image_id)
return kernel_id, ramdisk_id
@@ -598,19 +603,35 @@ class ControllerV10(Controller):
return nova.api.openstack.views.servers.ViewBuilderV10(
addresses_builder)
- def _get_addresses_view_builder(self, req):
- return nova.api.openstack.views.addresses.ViewBuilderV10(req)
-
def _limit_items(self, items, req):
return common.limited(items, req)
def _parse_update(self, context, server_id, inst_dict, update_dict):
if 'adminPass' in inst_dict['server']:
update_dict['admin_pass'] = inst_dict['server']['adminPass']
- try:
- self.compute_api.set_admin_password(context, server_id)
- except exception.TimeoutException:
- return exc.HTTPRequestTimeout()
+ self.compute_api.set_admin_password(context, server_id)
+
+ def _action_rebuild(self, info, request, instance_id):
+ context = request.environ['nova.context']
+ instance_id = int(instance_id)
+
+ try:
+ image_id = info["rebuild"]["imageId"]
+ except (KeyError, TypeError):
+ msg = _("Could not parse imageId from request.")
+ LOG.debug(msg)
+ return faults.Fault(exc.HTTPBadRequest(explanation=msg))
+
+ try:
+ self.compute_api.rebuild(context, instance_id, image_id)
+ except exception.BuildInProgress:
+ msg = _("Instance %d is currently being rebuilt.") % instance_id
+ LOG.debug(msg)
+ return faults.Fault(exc.HTTPConflict(explanation=msg))
+
+ response = exc.HTTPAccepted()
+ response.empty_body = True
+ return response
class ControllerV11(Controller):
@@ -632,9 +653,6 @@ class ControllerV11(Controller):
return nova.api.openstack.views.servers.ViewBuilderV11(
addresses_builder, flavor_builder, image_builder, base_url)
- def _get_addresses_view_builder(self, req):
- return nova.api.openstack.views.addresses.ViewBuilderV11(req)
-
def _action_change_password(self, input_dict, req, id):
context = req.environ['nova.context']
if (not 'changePassword' in input_dict
@@ -651,6 +669,63 @@ class ControllerV11(Controller):
def _limit_items(self, items, req):
return common.limited_by_marker(items, req)
+ def _validate_metadata(self, metadata):
+ """Ensure that we can work with the metadata given."""
+ try:
+ metadata.iteritems()
+ except AttributeError as ex:
+ msg = _("Unable to parse metadata key/value pairs.")
+ LOG.debug(msg)
+ raise faults.Fault(exc.HTTPBadRequest(explanation=msg))
+
+ def _decode_personalities(self, personalities):
+ """Decode the Base64-encoded personalities."""
+ for personality in personalities:
+ try:
+ path = personality["path"]
+ contents = personality["contents"]
+ except (KeyError, TypeError):
+ msg = _("Unable to parse personality path/contents.")
+ LOG.info(msg)
+ raise faults.Fault(exc.HTTPBadRequest(explanation=msg))
+
+ try:
+ personality["contents"] = base64.b64decode(contents)
+ except TypeError:
+ msg = _("Personality content could not be Base64 decoded.")
+ LOG.info(msg)
+ raise faults.Fault(exc.HTTPBadRequest(explanation=msg))
+
+ def _action_rebuild(self, info, request, instance_id):
+ context = request.environ['nova.context']
+ instance_id = int(instance_id)
+
+ try:
+ image_ref = info["rebuild"]["imageRef"]
+ except (KeyError, TypeError):
+ msg = _("Could not parse imageRef from request.")
+ LOG.debug(msg)
+ return faults.Fault(exc.HTTPBadRequest(explanation=msg))
+
+ image_id = common.get_id_from_href(image_ref)
+ personalities = info["rebuild"].get("personality", [])
+ metadata = info["rebuild"].get("metadata", {})
+
+ self._validate_metadata(metadata)
+ self._decode_personalities(personalities)
+
+ try:
+ self.compute_api.rebuild(context, instance_id, image_id, metadata,
+ personalities)
+ except exception.BuildInProgress:
+ msg = _("Instance %d is currently being rebuilt.") % instance_id
+ LOG.debug(msg)
+ return faults.Fault(exc.HTTPConflict(explanation=msg))
+
+ response = exc.HTTPAccepted()
+ response.empty_body = True
+ return response
+
def _get_server_admin_password(self, server):
""" Determine the admin password for a server on creation """
password = server.get('adminPass')
@@ -683,8 +758,9 @@ class ServerCreateRequestXMLDeserializer(object):
"""Marshal the server attribute of a parsed request"""
server = {}
server_node = self._find_first_child_named(node, 'server')
- for attr in ["name", "imageId", "flavorId"]:
- server[attr] = server_node.getAttribute(attr)
+ for attr in ["name", "imageId", "flavorId", "imageRef", "flavorRef"]:
+ if server_node.getAttribute(attr):
+ server[attr] = server_node.getAttribute(attr)
metadata = self._extract_metadata(server_node)
if metadata is not None:
server["metadata"] = metadata
diff --git a/nova/api/openstack/users.py b/nova/api/openstack/users.py
index 077ccfc79..7ae4c3232 100644
--- a/nova/api/openstack/users.py
+++ b/nova/api/openstack/users.py
@@ -48,7 +48,7 @@ class Controller(common.OpenstackController):
"""We cannot depend on the db layer to check for admin access
for the auth manager, so we do it here"""
if not context.is_admin:
- raise exception.NotAuthorized(_("Not admin user"))
+ raise exception.AdminRequired()
def index(self, req):
"""Return all users in brief"""
diff --git a/nova/api/openstack/views/images.py b/nova/api/openstack/views/images.py
index 9dec8a355..2773c9c13 100644
--- a/nova/api/openstack/views/images.py
+++ b/nova/api/openstack/views/images.py
@@ -46,6 +46,14 @@ class ViewBuilder(object):
except KeyError:
image['status'] = image['status'].upper()
+ def _build_server(self, image, instance_id):
+ """Indicates that you must use a ViewBuilder subclass."""
+ raise NotImplementedError
+
+ def generate_server_ref(self, server_id):
+ """Return an href string pointing to this server."""
+ return os.path.join(self._url, "servers", str(server_id))
+
def generate_href(self, image_id):
"""Return an href string pointing to this object."""
return os.path.join(self._url, "images", str(image_id))
@@ -66,7 +74,7 @@ class ViewBuilder(object):
if "instance_id" in properties:
try:
- image["serverId"] = int(properties["instance_id"])
+ self._build_server(image, int(properties["instance_id"]))
except ValueError:
pass
@@ -85,12 +93,17 @@ class ViewBuilder(object):
class ViewBuilderV10(ViewBuilder):
"""OpenStack API v1.0 Image Builder"""
- pass
+
+ def _build_server(self, image, instance_id):
+ image["serverId"] = instance_id
class ViewBuilderV11(ViewBuilder):
"""OpenStack API v1.1 Image Builder"""
+ def _build_server(self, image, instance_id):
+ image["serverRef"] = self.generate_server_ref(instance_id)
+
def build(self, image_obj, detail=False):
"""Return a standardized image structure for display by the API."""
image = ViewBuilder.build(self, image_obj, detail)
diff --git a/nova/api/openstack/views/limits.py b/nova/api/openstack/views/limits.py
new file mode 100644
index 000000000..22d1c260d
--- /dev/null
+++ b/nova/api/openstack/views/limits.py
@@ -0,0 +1,109 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010-2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import time
+
+from nova.api.openstack import common
+
+
+class ViewBuilder(object):
+ """Openstack API base limits view builder."""
+
+ def _build_rate_limits(self, rate_limits):
+ raise NotImplementedError()
+
+ def _build_rate_limit(self, rate_limit):
+ raise NotImplementedError()
+
+ def _build_absolute_limits(self, absolute_limit):
+ raise NotImplementedError()
+
+ def build(self, rate_limits, absolute_limits):
+ rate_limits = self._build_rate_limits(rate_limits)
+ absolute_limits = self._build_absolute_limits(absolute_limits)
+
+ output = {
+ "limits": {
+ "rate": rate_limits,
+ "absolute": absolute_limits,
+ },
+ }
+
+ return output
+
+
+class ViewBuilderV10(ViewBuilder):
+ """Openstack API v1.0 limits view builder."""
+
+ def _build_rate_limits(self, rate_limits):
+ return [self._build_rate_limit(r) for r in rate_limits]
+
+ def _build_rate_limit(self, rate_limit):
+ return {
+ "verb": rate_limit["verb"],
+ "URI": rate_limit["URI"],
+ "regex": rate_limit["regex"],
+ "value": rate_limit["value"],
+ "remaining": int(rate_limit["remaining"]),
+ "unit": rate_limit["unit"],
+ "resetTime": rate_limit["resetTime"],
+ }
+
+ def _build_absolute_limits(self, absolute_limit):
+ return {}
+
+
+class ViewBuilderV11(ViewBuilder):
+ """Openstack API v1.1 limits view builder."""
+
+ def _build_rate_limits(self, rate_limits):
+ limits = []
+ for rate_limit in rate_limits:
+ _rate_limit_key = None
+ _rate_limit = self._build_rate_limit(rate_limit)
+
+ # check for existing key
+ for limit in limits:
+ if limit["uri"] == rate_limit["URI"] and \
+ limit["regex"] == limit["regex"]:
+ _rate_limit_key = limit
+ break
+
+ # ensure we have a key if we didn't find one
+ if not _rate_limit_key:
+ _rate_limit_key = {
+ "uri": rate_limit["URI"],
+ "regex": rate_limit["regex"],
+ "limit": [],
+ }
+ limits.append(_rate_limit_key)
+
+ _rate_limit_key["limit"].append(_rate_limit)
+
+ return limits
+
+ def _build_rate_limit(self, rate_limit):
+ return {
+ "verb": rate_limit["verb"],
+ "value": rate_limit["value"],
+ "remaining": int(rate_limit["remaining"]),
+ "unit": rate_limit["unit"],
+ "next-available": rate_limit["resetTime"],
+ }
+
+ def _build_absolute_limits(self, absolute_limit):
+ return {}
diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py
index e52bfaea3..0be468edc 100644
--- a/nova/api/openstack/views/servers.py
+++ b/nova/api/openstack/views/servers.py
@@ -63,10 +63,12 @@ class ViewBuilder(object):
power_state.BLOCKED: 'ACTIVE',
power_state.SUSPENDED: 'SUSPENDED',
power_state.PAUSED: 'PAUSED',
- power_state.SHUTDOWN: 'ACTIVE',
- power_state.SHUTOFF: 'ACTIVE',
+ power_state.SHUTDOWN: 'SHUTDOWN',
+ power_state.SHUTOFF: 'SHUTOFF',
power_state.CRASHED: 'ERROR',
- power_state.FAILED: 'ERROR'}
+ power_state.FAILED: 'ERROR',
+ power_state.BUILDING: 'BUILD',
+ }
inst_dict = {
'id': int(inst['id']),
diff --git a/nova/auth/dbdriver.py b/nova/auth/dbdriver.py
index b2c580d83..a429b7812 100644
--- a/nova/auth/dbdriver.py
+++ b/nova/auth/dbdriver.py
@@ -81,7 +81,7 @@ class DbDriver(object):
user_ref = db.user_create(context.get_admin_context(), values)
return self._db_user_to_auth_user(user_ref)
except exception.Duplicate, e:
- raise exception.Duplicate(_('User %s already exists') % name)
+ raise exception.UserExists(user=name)
def _db_user_to_auth_user(self, user_ref):
return {'id': user_ref['id'],
@@ -103,9 +103,7 @@ class DbDriver(object):
"""Create a project"""
manager = db.user_get(context.get_admin_context(), manager_uid)
if not manager:
- raise exception.NotFound(_("Project can't be created because "
- "manager %s doesn't exist")
- % manager_uid)
+ raise exception.UserNotFound(user_id=manager_uid)
# description is a required attribute
if description is None:
@@ -119,9 +117,7 @@ class DbDriver(object):
for member_uid in member_uids:
member = db.user_get(context.get_admin_context(), member_uid)
if not member:
- raise exception.NotFound(_("Project can't be created "
- "because user %s doesn't exist")
- % member_uid)
+ raise exception.UserNotFound(user_id=member_uid)
members.add(member)
values = {'id': name,
@@ -132,8 +128,7 @@ class DbDriver(object):
try:
project = db.project_create(context.get_admin_context(), values)
except exception.Duplicate:
- raise exception.Duplicate(_("Project can't be created because "
- "project %s already exists") % name)
+ raise exception.ProjectExists(project=name)
for member in members:
db.project_add_member(context.get_admin_context(),
@@ -154,9 +149,7 @@ class DbDriver(object):
if manager_uid:
manager = db.user_get(context.get_admin_context(), manager_uid)
if not manager:
- raise exception.NotFound(_("Project can't be modified because "
- "manager %s doesn't exist") %
- manager_uid)
+ raise exception.UserNotFound(user_id=manager_uid)
values['project_manager'] = manager['id']
if description:
values['description'] = description
@@ -244,8 +237,8 @@ class DbDriver(object):
def _validate_user_and_project(self, user_id, project_id):
user = db.user_get(context.get_admin_context(), user_id)
if not user:
- raise exception.NotFound(_('User "%s" not found') % user_id)
+ raise exception.UserNotFound(user_id=user_id)
project = db.project_get(context.get_admin_context(), project_id)
if not project:
- raise exception.NotFound(_('Project "%s" not found') % project_id)
+ raise exception.ProjectNotFound(project_id=project_id)
return user, project
diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py
index fcac55510..3f8432851 100644
--- a/nova/auth/ldapdriver.py
+++ b/nova/auth/ldapdriver.py
@@ -171,7 +171,7 @@ class LdapDriver(object):
def create_user(self, name, access_key, secret_key, is_admin):
"""Create a user"""
if self.__user_exists(name):
- raise exception.Duplicate(_("LDAP user %s already exists") % name)
+ raise exception.LDAPUserExists(user=name)
if FLAGS.ldap_user_modify_only:
if self.__ldap_user_exists(name):
# Retrieve user by name
@@ -202,8 +202,7 @@ class LdapDriver(object):
self.conn.modify_s(self.__uid_to_dn(name), attr)
return self.get_user(name)
else:
- raise exception.NotFound(_("LDAP object for %s doesn't exist")
- % name)
+ raise exception.LDAPUserNotFound(user_id=name)
else:
attr = [
('objectclass', ['person',
@@ -226,12 +225,9 @@ class LdapDriver(object):
description=None, member_uids=None):
"""Create a project"""
if self.__project_exists(name):
- raise exception.Duplicate(_("Project can't be created because "
- "project %s already exists") % name)
+ raise exception.ProjectExists(project=name)
if not self.__user_exists(manager_uid):
- raise exception.NotFound(_("Project can't be created because "
- "manager %s doesn't exist")
- % manager_uid)
+ raise exception.LDAPUserNotFound(user_id=manager_uid)
manager_dn = self.__uid_to_dn(manager_uid)
# description is a required attribute
if description is None:
@@ -240,9 +236,7 @@ class LdapDriver(object):
if member_uids is not None:
for member_uid in member_uids:
if not self.__user_exists(member_uid):
- raise exception.NotFound(_("Project can't be created "
- "because user %s doesn't exist")
- % member_uid)
+ raise exception.LDAPUserNotFound(user_id=member_uid)
members.append(self.__uid_to_dn(member_uid))
# always add the manager as a member because members is required
if not manager_dn in members:
@@ -265,9 +259,7 @@ class LdapDriver(object):
attr = []
if manager_uid:
if not self.__user_exists(manager_uid):
- raise exception.NotFound(_("Project can't be modified because "
- "manager %s doesn't exist")
- % manager_uid)
+ raise exception.LDAPUserNotFound(user_id=manager_uid)
manager_dn = self.__uid_to_dn(manager_uid)
attr.append((self.ldap.MOD_REPLACE, LdapDriver.project_attribute,
manager_dn))
@@ -347,7 +339,7 @@ class LdapDriver(object):
def delete_user(self, uid):
"""Delete a user"""
if not self.__user_exists(uid):
- raise exception.NotFound(_("User %s doesn't exist") % uid)
+ raise exception.LDAPUserNotFound(user_id=uid)
self.__remove_from_all(uid)
if FLAGS.ldap_user_modify_only:
# Delete attributes
@@ -471,15 +463,12 @@ class LdapDriver(object):
description, member_uids=None):
"""Create a group"""
if self.__group_exists(group_dn):
- raise exception.Duplicate(_("Group can't be created because "
- "group %s already exists") % name)
+ raise exception.LDAPGroupExists(group=name)
members = []
if member_uids is not None:
for member_uid in member_uids:
if not self.__user_exists(member_uid):
- raise exception.NotFound(_("Group can't be created "
- "because user %s doesn't exist")
- % member_uid)
+ raise exception.LDAPUserNotFound(user_id=member_uid)
members.append(self.__uid_to_dn(member_uid))
dn = self.__uid_to_dn(uid)
if not dn in members:
@@ -494,8 +483,7 @@ class LdapDriver(object):
def __is_in_group(self, uid, group_dn):
"""Check if user is in group"""
if not self.__user_exists(uid):
- raise exception.NotFound(_("User %s can't be searched in group "
- "because the user doesn't exist") % uid)
+ raise exception.LDAPUserNotFound(user_id=uid)
if not self.__group_exists(group_dn):
return False
res = self.__find_object(group_dn,
@@ -506,29 +494,23 @@ class LdapDriver(object):
def __add_to_group(self, uid, group_dn):
"""Add user to group"""
if not self.__user_exists(uid):
- raise exception.NotFound(_("User %s can't be added to the group "
- "because the user doesn't exist") % uid)
+ raise exception.LDAPUserNotFound(user_id=uid)
if not self.__group_exists(group_dn):
- raise exception.NotFound(_("The group at dn %s doesn't exist") %
- group_dn)
+ raise exception.LDAPGroupNotFound(group_id=group_dn)
if self.__is_in_group(uid, group_dn):
- raise exception.Duplicate(_("User %(uid)s is already a member of "
- "the group %(group_dn)s") % locals())
+ raise exception.LDAPMembershipExists(uid=uid, group_dn=group_dn)
attr = [(self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))]
self.conn.modify_s(group_dn, attr)
def __remove_from_group(self, uid, group_dn):
"""Remove user from group"""
if not self.__group_exists(group_dn):
- raise exception.NotFound(_("The group at dn %s doesn't exist")
- % group_dn)
+ raise exception.LDAPGroupNotFound(group_id=group_dn)
if not self.__user_exists(uid):
- raise exception.NotFound(_("User %s can't be removed from the "
- "group because the user doesn't exist")
- % uid)
+ raise exception.LDAPUserNotFound(user_id=uid)
if not self.__is_in_group(uid, group_dn):
- raise exception.NotFound(_("User %s is not a member of the group")
- % uid)
+ raise exception.LDAPGroupMembershipNotFound(user_id=uid,
+ group_id=group_dn)
# NOTE(vish): remove user from group and any sub_groups
sub_dns = self.__find_group_dns_with_member(group_dn, uid)
for sub_dn in sub_dns:
@@ -548,9 +530,7 @@ class LdapDriver(object):
def __remove_from_all(self, uid):
"""Remove user from all roles and projects"""
if not self.__user_exists(uid):
- raise exception.NotFound(_("User %s can't be removed from all "
- "because the user doesn't exist")
- % uid)
+ raise exception.LDAPUserNotFound(user_id=uid)
role_dns = self.__find_group_dns_with_member(
FLAGS.role_project_subtree, uid)
for role_dn in role_dns:
@@ -563,8 +543,7 @@ class LdapDriver(object):
def __delete_group(self, group_dn):
"""Delete Group"""
if not self.__group_exists(group_dn):
- raise exception.NotFound(_("Group at dn %s doesn't exist")
- % group_dn)
+ raise exception.LDAPGroupNotFound(group_id=group_dn)
self.conn.delete_s(group_dn)
def __delete_roles(self, project_dn):
diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index 157f4007f..da640a533 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -223,6 +223,13 @@ class AuthManager(object):
if driver or not getattr(self, 'driver', None):
self.driver = utils.import_class(driver or FLAGS.auth_driver)
+ if FLAGS.memcached_servers:
+ import memcache
+ else:
+ from nova import fakememcache as memcache
+ self.mc = memcache.Client(FLAGS.memcached_servers,
+ debug=0)
+
def authenticate(self, access, signature, params, verb='GET',
server_string='127.0.0.1:8773', path='/',
check_type='ec2', headers=None):
@@ -270,8 +277,7 @@ class AuthManager(object):
LOG.debug('user: %r', user)
if user is None:
LOG.audit(_("Failed authorization for access key %s"), access_key)
- raise exception.NotFound(_('No user found for access key %s')
- % access_key)
+ raise exception.AccessKeyNotFound(access_key=access_key)
# NOTE(vish): if we stop using project name as id we need better
# logic to find a default project for user
@@ -285,8 +291,7 @@ class AuthManager(object):
uname = user.name
LOG.audit(_("failed authorization: no project named %(pjid)s"
" (user=%(uname)s)") % locals())
- raise exception.NotFound(_('No project called %s could be found')
- % project_id)
+ raise exception.ProjectNotFound(project_id=project_id)
if not self.is_admin(user) and not self.is_project_member(user,
project):
uname = user.name
@@ -295,28 +300,40 @@ class AuthManager(object):
pjid = project.id
LOG.audit(_("Failed authorization: user %(uname)s not admin"
" and not member of project %(pjname)s") % locals())
- raise exception.NotFound(_('User %(uid)s is not a member of'
- ' project %(pjid)s') % locals())
+ raise exception.ProjectMembershipNotFound(project_id=pjid,
+ user_id=uid)
if check_type == 's3':
sign = signer.Signer(user.secret.encode())
expected_signature = sign.s3_authorization(headers, verb, path)
- LOG.debug('user.secret: %s', user.secret)
- LOG.debug('expected_signature: %s', expected_signature)
- LOG.debug('signature: %s', signature)
+ LOG.debug(_('user.secret: %s'), user.secret)
+ LOG.debug(_('expected_signature: %s'), expected_signature)
+ LOG.debug(_('signature: %s'), signature)
if signature != expected_signature:
LOG.audit(_("Invalid signature for user %s"), user.name)
- raise exception.NotAuthorized(_('Signature does not match'))
+ raise exception.InvalidSignature(signature=signature,
+ user=user)
elif check_type == 'ec2':
# NOTE(vish): hmac can't handle unicode, so encode ensures that
# secret isn't unicode
expected_signature = signer.Signer(user.secret.encode()).generate(
params, verb, server_string, path)
- LOG.debug('user.secret: %s', user.secret)
- LOG.debug('expected_signature: %s', expected_signature)
- LOG.debug('signature: %s', signature)
+ LOG.debug(_('user.secret: %s'), user.secret)
+ LOG.debug(_('expected_signature: %s'), expected_signature)
+ LOG.debug(_('signature: %s'), signature)
if signature != expected_signature:
+ (addr_str, port_str) = utils.parse_server_string(server_string)
+ # If the given server_string contains port num, try without it.
+ if port_str != '':
+ host_only_signature = signer.Signer(
+ user.secret.encode()).generate(params, verb,
+ addr_str, path)
+ LOG.debug(_('host_only_signature: %s'),
+ host_only_signature)
+ if signature == host_only_signature:
+ return (user, project)
LOG.audit(_("Invalid signature for user %s"), user.name)
- raise exception.NotAuthorized(_('Signature does not match'))
+ raise exception.InvalidSignature(signature=signature,
+ user=user)
return (user, project)
def get_access_key(self, user, project):
@@ -360,6 +377,27 @@ class AuthManager(object):
if self.has_role(user, role):
return True
+ def _build_mc_key(self, user, role, project=None):
+ key_parts = ['rolecache', User.safe_id(user), str(role)]
+ if project:
+ key_parts.append(Project.safe_id(project))
+ return '-'.join(key_parts)
+
+ def _clear_mc_key(self, user, role, project=None):
+ # NOTE(anthony): it would be better to delete the key
+ self.mc.set(self._build_mc_key(user, role, project), None)
+
+ def _has_role(self, user, role, project=None):
+ mc_key = self._build_mc_key(user, role, project)
+ rslt = self.mc.get(mc_key)
+ if rslt is None:
+ with self.driver() as drv:
+ rslt = drv.has_role(user, role, project)
+ self.mc.set(mc_key, rslt)
+ return rslt
+ else:
+ return rslt
+
def has_role(self, user, role, project=None):
"""Checks existence of role for user
@@ -383,24 +421,24 @@ class AuthManager(object):
@rtype: bool
@return: True if the user has the role.
"""
- with self.driver() as drv:
- if role == 'projectmanager':
- if not project:
- raise exception.Error(_("Must specify project"))
- return self.is_project_manager(user, project)
+ if role == 'projectmanager':
+ if not project:
+ raise exception.Error(_("Must specify project"))
+ return self.is_project_manager(user, project)
+
+ global_role = self._has_role(User.safe_id(user),
+ role,
+ None)
- global_role = drv.has_role(User.safe_id(user),
- role,
- None)
- if not global_role:
- return global_role
+ if not global_role:
+ return global_role
- if not project or role in FLAGS.global_roles:
- return global_role
+ if not project or role in FLAGS.global_roles:
+ return global_role
- return drv.has_role(User.safe_id(user),
- role,
- Project.safe_id(project))
+ return self._has_role(User.safe_id(user),
+ role,
+ Project.safe_id(project))
def add_role(self, user, role, project=None):
"""Adds role for user
@@ -420,9 +458,9 @@ class AuthManager(object):
@param project: Project in which to add local role.
"""
if role not in FLAGS.allowed_roles:
- raise exception.NotFound(_("The %s role can not be found") % role)
+ raise exception.UserRoleNotFound(role_id=role)
if project is not None and role in FLAGS.global_roles:
- raise exception.NotFound(_("The %s role is global only") % role)
+ raise exception.GlobalRoleNotAllowed(role_id=role)
uid = User.safe_id(user)
pid = Project.safe_id(project)
if project:
@@ -432,6 +470,7 @@ class AuthManager(object):
LOG.audit(_("Adding sitewide role %(role)s to user %(uid)s")
% locals())
with self.driver() as drv:
+ self._clear_mc_key(uid, role, pid)
drv.add_role(uid, role, pid)
def remove_role(self, user, role, project=None):
@@ -460,6 +499,7 @@ class AuthManager(object):
LOG.audit(_("Removing sitewide role %(role)s"
" from user %(uid)s") % locals())
with self.driver() as drv:
+ self._clear_mc_key(uid, role, pid)
drv.remove_role(uid, role, pid)
@staticmethod
diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py
index dc6f55af2..7844d31e1 100644
--- a/nova/cloudpipe/pipelib.py
+++ b/nova/cloudpipe/pipelib.py
@@ -101,12 +101,13 @@ class CloudPipe(object):
key_name = self.setup_key_pair(ctxt)
group_name = self.setup_security_group(ctxt)
+ ec2_id = self.controller.image_ec2_id(FLAGS.vpn_image_id)
reservation = self.controller.run_instances(ctxt,
user_data=self.get_encoded_zip(project_id),
max_count=1,
min_count=1,
instance_type='m1.tiny',
- image_id=FLAGS.vpn_image_id,
+ image_id=ec2_id,
key_name=key_name,
security_group=[group_name])
diff --git a/nova/compute/api.py b/nova/compute/api.py
index c7ee0a119..2dc083e9e 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -19,6 +19,7 @@
"""Handles all requests relating to instances (guest vms)."""
import datetime
+import eventlet
import re
import time
@@ -32,6 +33,7 @@ from nova import rpc
from nova import utils
from nova import volume
from nova.compute import instance_types
+from nova.compute import power_state
from nova.scheduler import api as scheduler_api
from nova.db import base
@@ -41,6 +43,8 @@ LOG = logging.getLogger('nova.compute.api')
FLAGS = flags.FLAGS
flags.DECLARE('vncproxy_topic', 'nova.vnc')
+flags.DEFINE_integer('find_host_timeout', 30,
+ 'Timeout after NN seconds when looking for a host.')
def generate_default_hostname(instance_id):
@@ -463,6 +467,26 @@ class API(base.Base):
"""Generic handler for RPC calls to the scheduler."""
rpc.cast(context, FLAGS.scheduler_topic, args)
+ def _find_host(self, context, instance_id):
+ """Find the host associated with an instance."""
+ for attempts in xrange(FLAGS.find_host_timeout):
+ instance = self.get(context, instance_id)
+ host = instance["host"]
+ if host:
+ return host
+ time.sleep(1)
+ raise exception.Error(_("Unable to find host for Instance %s")
+ % instance_id)
+
+ def _set_admin_password(self, context, instance_id, password):
+ """Set the root/admin password for the given instance."""
+ host = self._find_host(context, instance_id)
+
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "set_admin_password",
+ "args": {"instance_id": instance_id, "new_pass": password}})
+
def snapshot(self, context, instance_id, name):
"""Snapshot the given instance.
@@ -483,14 +507,41 @@ class API(base.Base):
"""Reboot the given instance."""
self._cast_compute_message('reboot_instance', context, instance_id)
+ def rebuild(self, context, instance_id, image_id, metadata=None,
+ files_to_inject=None):
+ """Rebuild the given instance with the provided metadata."""
+ instance = db.api.instance_get(context, instance_id)
+
+ if instance["state"] == power_state.BUILDING:
+ msg = _("Instance already building")
+ raise exception.BuildInProgress(msg)
+
+ metadata = metadata or {}
+ self._check_metadata_properties_quota(context, metadata)
+
+ files_to_inject = files_to_inject or []
+ self._check_injected_file_quota(context, files_to_inject)
+
+ self.db.instance_update(context, instance_id, {"metadata": metadata})
+
+ rebuild_params = {
+ "image_id": image_id,
+ "injected_files": files_to_inject,
+ }
+
+ self._cast_compute_message('rebuild_instance',
+ context,
+ instance_id,
+ params=rebuild_params)
+
def revert_resize(self, context, instance_id):
"""Reverts a resize, deleting the 'new' instance in the process."""
context = context.elevated()
migration_ref = self.db.migration_get_by_instance_and_status(context,
instance_id, 'finished')
if not migration_ref:
- raise exception.NotFound(_("No finished migrations found for "
- "instance"))
+ raise exception.MigrationNotFoundByStatus(instance_id=instance_id,
+ status='finished')
params = {'migration_id': migration_ref['id']}
self._cast_compute_message('revert_resize', context, instance_id,
@@ -504,8 +555,8 @@ class API(base.Base):
migration_ref = self.db.migration_get_by_instance_and_status(context,
instance_id, 'finished')
if not migration_ref:
- raise exception.NotFound(_("No finished migrations found for "
- "instance"))
+ raise exception.MigrationNotFoundByStatus(instance_id=instance_id,
+ status='finished')
instance_ref = self.db.instance_get(context, instance_id)
params = {'migration_id': migration_ref['id']}
self._cast_compute_message('confirm_resize', context, instance_id,
@@ -589,8 +640,8 @@ class API(base.Base):
def set_admin_password(self, context, instance_id, password=None):
"""Set the root/admin password for the given instance."""
- self._cast_compute_message(
- 'set_admin_password', context, instance_id, password)
+ eventlet.spawn_n(self._set_admin_password(context, instance_id,
+ password))
def inject_file(self, context, instance_id):
"""Write a file to the given instance."""
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py
index 98b4425c8..1275a6fdd 100644
--- a/nova/compute/instance_types.py
+++ b/nova/compute/instance_types.py
@@ -37,11 +37,11 @@ def create(name, memory, vcpus, local_gb, flavorid, swap=0,
try:
int(option)
except ValueError:
- raise exception.InvalidInputException(
- _("create arguments must be positive integers"))
+ raise exception.InvalidInput(reason=_("create arguments must "
+ "be positive integers"))
if (int(memory) <= 0) or (int(vcpus) <= 0) or (int(local_gb) < 0):
- raise exception.InvalidInputException(
- _("create arguments must be positive integers"))
+ raise exception.InvalidInput(reason=_("create arguments must "
+ "be positive integers"))
try:
db.instance_type_create(
@@ -56,13 +56,15 @@ def create(name, memory, vcpus, local_gb, flavorid, swap=0,
rxtx_cap=rxtx_cap))
except exception.DBError, e:
LOG.exception(_('DB error: %s') % e)
- raise exception.ApiError(_("Cannot create instance type: %s") % name)
+ raise exception.ApiError(_("Cannot create instance_type with "
+ "name %(name)s and flavorid %(flavorid)s")
+ % locals())
def destroy(name):
"""Marks instance types as deleted."""
if name is None:
- raise exception.InvalidInputException(_("No instance type specified"))
+ raise exception.InvalidInstanceType(instance_type=name)
else:
try:
db.instance_type_destroy(context.get_admin_context(), name)
@@ -74,7 +76,7 @@ def destroy(name):
def purge(name):
"""Removes instance types from database."""
if name is None:
- raise exception.InvalidInputException(_("No instance type specified"))
+ raise exception.InvalidInstanceType(instance_type=name)
else:
try:
db.instance_type_purge(context.get_admin_context(), name)
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 03aee0204..c8893d5ba 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -37,11 +37,10 @@ terminating it.
import datetime
import os
-import random
-import string
import socket
import sys
import tempfile
+import time
import functools
from eventlet import greenthread
@@ -53,6 +52,7 @@ from nova import manager
from nova import network
from nova import rpc
from nova import utils
+from nova import volume
from nova.compute import power_state
from nova.virt import driver
@@ -75,7 +75,10 @@ flags.DEFINE_integer('live_migration_retry_count', 30,
flags.DEFINE_integer("rescue_timeout", 0,
"Automatically unrescue an instance after N seconds."
" Set to 0 to disable.")
-
+flags.DEFINE_bool('auto_assign_floating_ip', False,
+ 'Autoassigning floating ip to VM')
+flags.DEFINE_integer('host_state_interval', 120,
+ 'Interval in seconds for querying the host status')
LOG = logging.getLogger('nova.compute.manager')
@@ -129,6 +132,8 @@ class ComputeManager(manager.SchedulerDependentManager):
self.network_api = network.API()
self.network_manager = utils.import_object(FLAGS.network_manager)
self.volume_manager = utils.import_object(FLAGS.volume_manager)
+ self.network_api = network.API()
+ self._last_host_check = 0
super(ComputeManager, self).__init__(service_name="compute",
*args, **kwargs)
@@ -136,17 +141,33 @@ class ComputeManager(manager.SchedulerDependentManager):
"""Initialization for a standalone compute service."""
self.driver.init_host(host=self.host)
- def _update_state(self, context, instance_id):
+ def _update_state(self, context, instance_id, state=None):
"""Update the state of an instance from the driver info."""
- # FIXME(ja): include other fields from state?
instance_ref = self.db.instance_get(context, instance_id)
- try:
- info = self.driver.get_info(instance_ref['name'])
- state = info['state']
- except exception.NotFound:
- state = power_state.FAILED
+
+ if state is None:
+ try:
+ info = self.driver.get_info(instance_ref['name'])
+ except exception.NotFound:
+ info = None
+
+ if info is not None:
+ state = info['state']
+ else:
+ state = power_state.FAILED
+
self.db.instance_set_state(context, instance_id, state)
+ def _update_launched_at(self, context, instance_id, launched_at=None):
+ """Update the launched_at parameter of the given instance."""
+ data = {'launched_at': launched_at or datetime.datetime.utcnow()}
+ self.db.instance_update(context, instance_id, data)
+
+ def _update_image_id(self, context, instance_id, image_id):
+ """Update the image_id for the given instance."""
+ data = {'image_id': image_id}
+ self.db.instance_update(context, instance_id, data)
+
def get_console_topic(self, context, **kwargs):
"""Retrieves the console host for a project on this host.
@@ -200,7 +221,11 @@ class ComputeManager(manager.SchedulerDependentManager):
power_state.NOSTATE,
'networking')
- is_vpn = instance['image_id'] == FLAGS.vpn_image_id
+ is_vpn = instance['image_id'] == str(FLAGS.vpn_image_id)
+ # NOTE(vish): This could be a cast because we don't do anything
+ # with the address currently, but I'm leaving it as
+ # a call to ensure that network setup completes. We
+ # will eventually also need to save the address here.
if not FLAGS.stub_network:
network_info = self.network_api.allocate_for_instance(context,
instance,
@@ -215,25 +240,17 @@ class ComputeManager(manager.SchedulerDependentManager):
network_info = []
# TODO(vish) check to make sure the availability zone matches
- self.db.instance_set_state(context,
- instance_id,
- power_state.NOSTATE,
- 'spawning')
+ self._update_state(context, instance_id, power_state.BUILDING)
try:
self.driver.spawn(instance, network_info)
- now = datetime.datetime.utcnow()
- self.db.instance_update(context,
- instance_id,
- {'launched_at': now})
- except Exception: # pylint: disable=W0702
- LOG.exception(_("Instance '%s' failed to spawn. Is virtualization"
- " enabled in the BIOS?"), instance_id,
- context=context)
- self.db.instance_set_state(context,
- instance_id,
- power_state.SHUTDOWN)
+ except Exception as ex: # pylint: disable=W0702
+ msg = _("Instance '%(instance_id)s' failed to spawn. Is "
+ "virtualization enabled in the BIOS? Details: "
+ "%(ex)s") % locals()
+ LOG.exception(msg)
+ self._update_launched_at(context, instance_id)
self._update_state(context, instance_id)
@exception.wrap_exception
@@ -261,6 +278,33 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception
@checks_instance_lock
+ def rebuild_instance(self, context, instance_id, image_id):
+ """Destroy and re-make this instance.
+
+ A 'rebuild' effectively purges all existing data from the system and
+ remakes the VM with given 'metadata' and 'personalities'.
+
+ :param context: `nova.RequestContext` object
+ :param instance_id: Instance identifier (integer)
+ :param image_id: Image identifier (integer)
+ """
+ context = context.elevated()
+
+ instance_ref = self.db.instance_get(context, instance_id)
+ LOG.audit(_("Rebuilding instance %s"), instance_id, context=context)
+
+ self._update_state(context, instance_id, power_state.BUILDING)
+
+ self.driver.destroy(instance_ref)
+ instance_ref.image_id = image_id
+ self.driver.spawn(instance_ref)
+
+ self._update_image_id(context, instance_id, image_id)
+ self._update_launched_at(context, instance_id)
+ self._update_state(context, instance_id)
+
+ @exception.wrap_exception
+ @checks_instance_lock
def reboot_instance(self, context, instance_id):
"""Reboot an instance on this host."""
context = context.elevated()
@@ -311,21 +355,36 @@ class ComputeManager(manager.SchedulerDependentManager):
def set_admin_password(self, context, instance_id, new_pass=None):
"""Set the root/admin password for an instance on this host."""
context = context.elevated()
- instance_ref = self.db.instance_get(context, instance_id)
- instance_id = instance_ref['id']
- instance_state = instance_ref['state']
- expected_state = power_state.RUNNING
- if instance_state != expected_state:
- LOG.warn(_('trying to reset the password on a non-running '
- 'instance: %(instance_id)s (state: %(instance_state)s '
- 'expected: %(expected_state)s)') % locals())
- LOG.audit(_('instance %s: setting admin password'),
- instance_ref['name'])
+
if new_pass is None:
# Generate a random password
new_pass = utils.generate_password(FLAGS.password_length)
- self.driver.set_admin_password(instance_ref, new_pass)
- self._update_state(context, instance_id)
+
+ while True:
+ instance_ref = self.db.instance_get(context, instance_id)
+ instance_id = instance_ref["id"]
+ instance_state = instance_ref["state"]
+ expected_state = power_state.RUNNING
+
+ if instance_state != expected_state:
+ time.sleep(5)
+ continue
+ else:
+ try:
+ self.driver.set_admin_password(instance_ref, new_pass)
+ LOG.audit(_("Instance %s: Root password set"),
+ instance_ref["name"])
+ break
+ except NotImplementedError:
+ # NOTE(dprince): if the driver doesn't implement
+ # set_admin_password we break to avoid a loop
+ LOG.warn(_('set_admin_password is not implemented '
+ 'by this driver.'))
+ break
+ except Exception, e:
+ # Catch all here because this could be anything.
+ LOG.exception(e)
+ continue
@exception.wrap_exception
@checks_instance_lock
@@ -517,7 +576,7 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_type = self.db.instance_type_get_by_flavor_id(context,
migration_ref['new_flavor_id'])
self.db.instance_update(context, instance_id,
- dict(instance_type=instance_type['name'],
+ dict(instance_type_id=instance_type['id'],
memory_mb=instance_type['memory_mb'],
vcpus=instance_type['vcpus'],
local_gb=instance_type['local_gb']))
@@ -660,7 +719,8 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_ref = self.db.instance_get(context, instance_id)
LOG.audit(_("Get console output for instance %s"), instance_id,
context=context)
- return self.driver.get_console_output(instance_ref)
+ output = self.driver.get_console_output(instance_ref)
+ return output.decode('utf-8', 'replace').encode('ascii', 'replace')
@exception.wrap_exception
def get_ajax_console(self, context, instance_id):
@@ -727,6 +787,14 @@ class ComputeManager(manager.SchedulerDependentManager):
self.db.volume_detached(context, volume_id)
return True
+ def remove_volume(self, context, volume_id):
+ """Remove volume on compute host.
+
+ :param context: security context
+ :param volume_id: volume ID
+ """
+ self.volume_manager.remove_compute_volume(context, volume_id)
+
@exception.wrap_exception
def compare_cpu(self, context, cpu_info):
"""Checks that the host cpu is compatible with a cpu given by xml.
@@ -768,7 +836,7 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
tmp_file = os.path.join(FLAGS.instances_path, filename)
if not os.path.exists(tmp_file):
- raise exception.NotFound(_('%s not found') % tmp_file)
+ raise exception.FileNotFound(file_path=tmp_file)
@exception.wrap_exception
def cleanup_shared_storage_test_file(self, context, filename):
@@ -808,8 +876,7 @@ class ComputeManager(manager.SchedulerDependentManager):
# Getting fixed ips
fixed_ips = self.db.instance_get_fixed_addresses(context, instance_id)
if not fixed_ips:
- msg = _("%(instance_id)s(%(ec2_id)s) does not have fixed_ip.")
- raise exception.NotFound(msg % locals())
+ raise exception.NoFixedIpsFoundForInstance(instance_id=instance_id)
# If any volume is mounted, prepare here.
if not instance_ref['volumes']:
@@ -946,7 +1013,7 @@ class ComputeManager(manager.SchedulerDependentManager):
"Domain not found: no domain with matching name.\" "
"This error can be safely ignored."))
- def recover_live_migration(self, ctxt, instance_ref, host=None):
+ def recover_live_migration(self, ctxt, instance_ref, host=None, dest=None):
"""Recovers Instance/volume state from migrating -> running.
:param ctxt: security context
@@ -964,8 +1031,13 @@ class ComputeManager(manager.SchedulerDependentManager):
'state': power_state.RUNNING,
'host': host})
- for volume in instance_ref['volumes']:
- self.db.volume_update(ctxt, volume['id'], {'status': 'in-use'})
+ if dest:
+ volume_api = volume.API()
+ for volume_ref in instance_ref['volumes']:
+ volume_id = volume_ref['id']
+ self.db.volume_update(ctxt, volume_id, {'status': 'in-use'})
+ if dest:
+ volume_api.remove_from_compute(ctxt, volume_id, dest)
def periodic_tasks(self, context=None):
"""Tasks to be run at a periodic interval."""
@@ -982,6 +1054,13 @@ class ComputeManager(manager.SchedulerDependentManager):
error_list.append(ex)
try:
+ self._report_driver_status()
+ except Exception as ex:
+ LOG.warning(_("Error during report_driver_status(): %s"),
+ unicode(ex))
+ error_list.append(ex)
+
+ try:
self._poll_instance_states(context)
except Exception as ex:
LOG.warning(_("Error during instance poll: %s"),
@@ -990,6 +1069,16 @@ class ComputeManager(manager.SchedulerDependentManager):
return error_list
+ def _report_driver_status(self):
+ curr_time = time.time()
+ if curr_time - self._last_host_check > FLAGS.host_state_interval:
+ self._last_host_check = curr_time
+ LOG.info(_("Updating host status"))
+ # This will grab info about the host and queue it
+ # to be sent to the Schedulers.
+ self.update_service_capabilities(
+ self.driver.get_host_stats(refresh=True))
+
def _poll_instance_states(self, context):
vm_instances = self.driver.list_instances_detail()
vm_instances = dict((vm.name, vm) for vm in vm_instances)
@@ -1007,8 +1096,7 @@ class ComputeManager(manager.SchedulerDependentManager):
if vm_instance is None:
# NOTE(justinsb): We have to be very careful here, because a
# concurrent operation could be in progress (e.g. a spawn)
- if db_state == power_state.NOSTATE:
- # Assume that NOSTATE => spawning
+ if db_state == power_state.BUILDING:
# TODO(justinsb): This does mean that if we crash during a
# spawn, the machine will never leave the spawning state,
# but this is just the way nova is; this function isn't
@@ -1039,9 +1127,7 @@ class ComputeManager(manager.SchedulerDependentManager):
if vm_state != db_state:
LOG.info(_("DB/VM state mismatch. Changing state from "
"'%(db_state)s' to '%(vm_state)s'") % locals())
- self.db.instance_set_state(context,
- db_instance['id'],
- vm_state)
+ self._update_state(context, db_instance['id'], vm_state)
# NOTE(justinsb): We no longer auto-remove SHUTOFF instances
# It's quite hard to get them back when we do.
diff --git a/nova/compute/power_state.py b/nova/compute/power_state.py
index ef013b2ef..c468fe6b3 100644
--- a/nova/compute/power_state.py
+++ b/nova/compute/power_state.py
@@ -30,20 +30,23 @@ SHUTOFF = 0x05
CRASHED = 0x06
SUSPENDED = 0x07
FAILED = 0x08
+BUILDING = 0x09
# TODO(justinsb): Power state really needs to be a proper class,
# so that we're not locked into the libvirt status codes and can put mapping
# logic here rather than spread throughout the code
_STATE_MAP = {
- NOSTATE: 'pending',
- RUNNING: 'running',
- BLOCKED: 'blocked',
- PAUSED: 'paused',
- SHUTDOWN: 'shutdown',
- SHUTOFF: 'shutdown',
- CRASHED: 'crashed',
- SUSPENDED: 'suspended',
- FAILED: 'failed to spawn'}
+ NOSTATE: 'pending',
+ RUNNING: 'running',
+ BLOCKED: 'blocked',
+ PAUSED: 'paused',
+ SHUTDOWN: 'shutdown',
+ SHUTOFF: 'shutdown',
+ CRASHED: 'crashed',
+ SUSPENDED: 'suspended',
+ FAILED: 'failed to spawn',
+ BUILDING: 'building',
+}
def name(code):
diff --git a/nova/console/vmrc.py b/nova/console/vmrc.py
index 127d31121..cc8b0cdf5 100644
--- a/nova/console/vmrc.py
+++ b/nova/console/vmrc.py
@@ -90,8 +90,7 @@ class VMRCConsole(object):
vm_ds_path_name = ds_path_name
break
if vm_ref is None:
- raise exception.NotFound(_('instance - %s not present') %
- instance_name)
+ raise exception.InstanceNotFound(instance_id=instance_name)
json_data = json.dumps({'vm_id': vm_ds_path_name,
'username': username,
'password': password})
@@ -125,8 +124,7 @@ class VMRCSessionConsole(VMRCConsole):
if vm.propSet[0].val == instance_name:
vm_ref = vm.obj
if vm_ref is None:
- raise exception.NotFound(_('instance - %s not present') %
- instance_name)
+ raise exception.InstanceNotFound(instance_id=instance_name)
virtual_machine_ticket = \
vim_session._call_method(
vim_session._get_vim(),
diff --git a/nova/db/api.py b/nova/db/api.py
index 6cabfc234..b49ba4860 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -292,8 +292,13 @@ def floating_ip_update(context, address, values):
return IMPL.floating_ip_update(context, address, values)
+def floating_ip_set_auto_assigned(context, address):
+ """Set auto_assigned flag to floating ip"""
+ return IMPL.floating_ip_set_auto_assigned(context, address)
+
####################
+
def migration_update(context, id, values):
"""Update a migration instance."""
return IMPL.migration_update(context, id, values)
@@ -511,11 +516,6 @@ def instance_get_project_vpn(context, project_id):
return IMPL.instance_get_project_vpn(context, project_id)
-def instance_is_vpn(context, instance_id):
- """True if instance is a vpn."""
- return IMPL.instance_is_vpn(context, instance_id)
-
-
def instance_set_state(context, instance_id, state, description=None):
"""Set the state of an instance."""
return IMPL.instance_set_state(context, instance_id, state, description)
@@ -799,24 +799,34 @@ def auth_token_create(context, token):
###################
-def quota_create(context, values):
- """Create a quota from the values dictionary."""
- return IMPL.quota_create(context, values)
+def quota_create(context, project_id, resource, limit):
+ """Create a quota for the given project and resource."""
+ return IMPL.quota_create(context, project_id, resource, limit)
-def quota_get(context, project_id):
+def quota_get(context, project_id, resource):
"""Retrieve a quota or raise if it does not exist."""
- return IMPL.quota_get(context, project_id)
+ return IMPL.quota_get(context, project_id, resource)
-def quota_update(context, project_id, values):
- """Update a quota from the values dictionary."""
- return IMPL.quota_update(context, project_id, values)
+def quota_get_all_by_project(context, project_id):
+ """Retrieve all quotas associated with a given project."""
+ return IMPL.quota_get_all_by_project(context, project_id)
-def quota_destroy(context, project_id):
+def quota_update(context, project_id, resource, limit):
+ """Update a quota or raise if it does not exist."""
+ return IMPL.quota_update(context, project_id, resource, limit)
+
+
+def quota_destroy(context, project_id, resource):
"""Destroy the quota or raise if it does not exist."""
- return IMPL.quota_destroy(context, project_id)
+ return IMPL.quota_destroy(context, project_id, resource)
+
+
+def quota_destroy_all_by_project(context, project_id):
+ """Destroy all quotas associated with a given project."""
+ return IMPL.quota_get_all_by_project(context, project_id)
###################
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 814d0b37f..5bda38085 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -25,6 +25,7 @@ import warnings
from nova import db
from nova import exception
from nova import flags
+from nova import ipv6
from nova import utils
from nova.db.sqlalchemy import models
from nova.db.sqlalchemy.session import get_session
@@ -94,7 +95,7 @@ def require_admin_context(f):
"""
def wrapper(*args, **kwargs):
if not is_admin_context(args[0]):
- raise exception.NotAuthorized()
+ raise exception.AdminRequired()
return f(*args, **kwargs)
return wrapper
@@ -105,7 +106,7 @@ def require_context(f):
"""
def wrapper(*args, **kwargs):
if not is_admin_context(args[0]) and not is_user_context(args[0]):
- raise exception.NotAuthorized()
+ raise exception.AdminRequired()
return f(*args, **kwargs)
return wrapper
@@ -137,7 +138,7 @@ def service_get(context, service_id, session=None):
first()
if not result:
- raise exception.NotFound(_('No service for id %s') % service_id)
+ raise exception.ServiceNotFound(service_id=service_id)
return result
@@ -196,8 +197,7 @@ def service_get_all_compute_by_host(context, host):
all()
if not result:
- raise exception.NotFound(_("%s does not exist or is not "
- "a compute node.") % host)
+ raise exception.ComputeHostNotFound(host=host)
return result
@@ -284,8 +284,7 @@ def service_get_by_args(context, host, binary):
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
- raise exception.NotFound(_('No service for %(host)s, %(binary)s')
- % locals())
+ raise exception.HostBinaryNotFound(host=host, binary=binary)
return result
@@ -323,7 +322,7 @@ def compute_node_get(context, compute_id, session=None):
first()
if not result:
- raise exception.NotFound(_('No computeNode for id %s') % compute_id)
+ raise exception.ComputeHostNotFound(host=compute_id)
return result
@@ -359,7 +358,7 @@ def certificate_get(context, certificate_id, session=None):
first()
if not result:
- raise exception.NotFound('No certificate for id %s' % certificate_id)
+ raise exception.CertificateNotFound(certificate_id=certificate_id)
return result
@@ -459,8 +458,10 @@ def floating_ip_create(context, values):
def floating_ip_count_by_project(context, project_id):
authorize_project_context(context, project_id)
session = get_session()
+ # TODO(tr3buchet): why leave auto_assigned floating IPs out?
return session.query(models.FloatingIp).\
filter_by(project_id=project_id).\
+ filter_by(auto_assigned=False).\
filter_by(deleted=False).\
count()
@@ -489,6 +490,7 @@ def floating_ip_deallocate(context, address):
address,
session=session)
floating_ip_ref['project_id'] = None
+ floating_ip_ref['auto_assigned'] = False
floating_ip_ref.save(session=session)
@@ -522,6 +524,17 @@ def floating_ip_disassociate(context, address):
return fixed_ip_address
+@require_context
+def floating_ip_set_auto_assigned(context, address):
+ session = get_session()
+ with session.begin():
+ floating_ip_ref = floating_ip_get_by_address(context,
+ address,
+ session=session)
+ floating_ip_ref.auto_assigned = True
+ floating_ip_ref.save(session=session)
+
+
@require_admin_context
def floating_ip_get_all(context):
session = get_session()
@@ -545,9 +558,11 @@ def floating_ip_get_all_by_host(context, host):
def floating_ip_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
session = get_session()
+ # TODO(tr3buchet): why do we not want auto_assigned floating IPs here?
return session.query(models.FloatingIp).\
options(joinedload_all('fixed_ip.instance')).\
filter_by(project_id=project_id).\
+ filter_by(auto_assigned=False).\
filter_by(deleted=False).\
all()
@@ -559,12 +574,12 @@ def floating_ip_get_by_address(context, address, session=None):
session = get_session()
result = session.query(models.FloatingIp).\
- options(joinedload_all('fixed_ip.network')).\
+ options(joinedload_all('fixed_ip.network')).\
filter_by(address=address).\
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
- raise exception.NotFound('No floating ip for address %s' % address)
+ raise exception.FloatingIpNotFound(address=address)
return result
@@ -674,7 +689,7 @@ def fixed_ip_get_all(context, session=None):
session = get_session()
result = session.query(models.FixedIp).all()
if not result:
- raise exception.NotFound(_('No fixed ips defined'))
+ raise exception.NoFixedIpsDefined()
return result
@@ -690,7 +705,7 @@ def fixed_ip_get_all_by_host(context, host=None):
all()
if not result:
- raise exception.NotFound(_('No fixed ips for this host defined'))
+ raise exception.NoFixedIpsDefinedForHost(host=host)
return result
@@ -706,7 +721,7 @@ def fixed_ip_get_by_address(context, address, session=None):
options(joinedload('instance')).\
first()
if not result:
- raise exception.NotFound(_('No fixed ip for address %s') % address)
+ raise exception.FixedIpNotFound(address=address)
if is_user_context(context):
authorize_project_context(context, result.instance.project_id)
@@ -728,7 +743,7 @@ def fixed_ip_get_all_by_instance(context, instance_id):
filter_by(deleted=False).\
all()
if not rv:
- raise exception.NotFound(_('No address for instance %s') % instance_id)
+ raise exception.NoFixedIpsFoundForInstance(instance_id=instance_id)
return rv
@@ -740,8 +755,7 @@ def fixed_ip_get_all_by_mac_address(context, mac_address_id):
filter_by(deleted=False).\
all()
if not rv:
- raise exception.NotFound(_('No fixed_ips for mac_address_id %s') %
- instance_id)
+ raise exception.NoFixedIpFoundForMacAddress(mac_id=mac_id)
return rv
@@ -750,7 +764,7 @@ def fixed_ip_get_instance_v6(context, address):
session = get_session()
# convert IPv6 address to mac
- mac = utils.to_mac(address)
+ mac = ipv6.to_mac(address)
# get mac address row
mac_ref = mac_address_get_by_address(context, mac)
@@ -971,17 +985,17 @@ def instance_destroy(context, instance_id):
with session.begin():
session.query(models.Instance).\
filter_by(id=instance_id).\
- update({'deleted': 1,
+ update({'deleted': True,
'deleted_at': datetime.datetime.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.SecurityGroupInstanceAssociation).\
filter_by(instance_id=instance_id).\
- update({'deleted': 1,
+ update({'deleted': True,
'deleted_at': datetime.datetime.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.InstanceMetadata).\
filter_by(instance_id=instance_id).\
- update({'deleted': 1,
+ update({'deleted': True,
'deleted_at': datetime.datetime.utcnow(),
'updated_at': literal_column('updated_at')})
@@ -1017,9 +1031,7 @@ def instance_get(context, instance_id, session=None):
filter_by(deleted=False).\
first()
if not result:
- raise exception.InstanceNotFound(_('Instance %s not found')
- % instance_id,
- instance_id)
+ raise exception.InstanceNotFound(instance_id=instance_id)
return result
@@ -1032,6 +1044,7 @@ def instance_get_all(context):
options(joinedload('mac_addresses')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ips.network')).\
+ options(joinedload('metadata')).\
options(joinedload('instance_type')).\
filter_by(deleted=can_read_deleted(context)).\
all()
@@ -1045,6 +1058,7 @@ def instance_get_all_by_user(context, user_id):
options(joinedload('mac_addresses')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ips.network')).\
+ options(joinedload('metadata')).\
options(joinedload('instance_type')).\
filter_by(deleted=can_read_deleted(context)).\
filter_by(user_id=user_id).\
@@ -1117,7 +1131,7 @@ def instance_get_project_vpn(context, project_id):
options(joinedload('security_groups')).\
options(joinedload('instance_type')).\
filter_by(project_id=project_id).\
- filter_by(image_id=FLAGS.vpn_image_id).\
+ filter_by(image_id=str(FLAGS.vpn_image_id)).\
filter_by(deleted=can_read_deleted(context)).\
first()
@@ -1151,10 +1165,12 @@ def instance_get_fixed_addresses_v6(context, instance_id):
# compile of list of the mac_addresses sorted by network id
macs = [ref.mac_address for ref in
sorted(mac_refs, key=lambda ref: ref.network_id)]
- # combine prefixes and macs into (prefix,mac) pairs
- prefix_mac_pairs = zip(prefixes, macs)
+ # get project ids from instance
+ project_id = instance_ref.project_id
+ # combine prefixes, macs, and project_id into (prefix,mac,p_id) tuples
+ prefix_mac_tuples = zip(prefixes, macs, [project_id for m in macs])
# return list containing ipv6 address for each pair
- return [utils.to_global_ipv6(pair) for pair in prefix_mac_pairs]
+ return [ipv6.to_global_ipv6(*t) for t in prefix_mac_tuples]
@require_context
@@ -1171,13 +1187,6 @@ def instance_get_floating_address(context, instance_id):
@require_admin_context
-def instance_is_vpn(context, instance_id):
- # TODO(vish): Move this into image code somewhere
- instance_ref = instance_get(context, instance_id)
- return instance_ref['image_id'] == FLAGS.vpn_image_id
-
-
-@require_admin_context
def instance_set_state(context, instance_id, state, description=None):
# TODO(devcamcar): Move this out of models and into driver
from nova.compute import power_state
@@ -1316,8 +1325,7 @@ def key_pair_get(context, user_id, name, session=None):
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
- raise exception.NotFound(_('no keypair for user %(user_id)s,'
- ' name %(name)s') % locals())
+ raise exception.KeypairNotFound(user_id=user_id, name=name)
return result
@@ -1470,7 +1478,7 @@ def network_get(context, network_id, session=None):
filter_by(deleted=False).\
first()
if not result:
- raise exception.NotFound(_('No network for id %s') % network_id)
+ raise exception.NetworkNotFound(network_id=network_id)
return result
@@ -1481,7 +1489,7 @@ def network_get_all(context):
result = session.query(models.Network).\
filter_by(deleted=False)
if not result:
- raise exception.NotFound(_('No networks defined'))
+ raise exception.NoNetworksFound()
return result
@@ -1510,7 +1518,7 @@ def network_get_by_bridge(context, bridge):
first()
if not result:
- raise exception.NotFound(_('No network for bridge %s') % bridge)
+ raise exception.NetworkNotFoundForBridge(bridge=bridge)
return result
@@ -1521,8 +1529,7 @@ def network_get_by_cidr(context, cidr):
filter_by(cidr=cidr).first()
if not result:
- raise exception.NotFound(_('Network with cidr %s does not exist') %
- cidr)
+ raise exception.NetworkNotFoundForCidr(cidr=cidr)
return result
@@ -1538,7 +1545,7 @@ def network_get_by_instance(_context, instance_id):
filter_by(deleted=False).\
first()
if not rv:
- raise exception.NotFound(_('No network for instance %s') % instance_id)
+ raise exception.NetworkNotFoundForInstance(instance_id=instance_id)
return rv
@@ -1552,7 +1559,7 @@ def network_get_all_by_instance(_context, instance_id):
filter_by(deleted=False).\
all()
if not rv:
- raise exception.NotFound(_('No network for instance %s') % instance_id)
+ raise exception.NetworkNotFoundForInstance(instance_id=instance_id)
return rv
@@ -1576,7 +1583,7 @@ def network_set_host(context, network_id, host_id):
with_lockmode('update').\
first()
if not network_ref:
- raise exception.NotFound(_('No network for id %s') % network_id)
+ raise exception.NetworkNotFound(network_id=network_id)
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
@@ -1670,7 +1677,7 @@ def auth_token_get(context, token_hash, session=None):
filter_by(deleted=can_read_deleted(context)).\
first()
if not tk:
- raise exception.NotFound(_('Token %s does not exist') % token_hash)
+ raise exception.AuthTokenNotFound(token=token_hash)
return tk
@@ -1695,45 +1702,71 @@ def auth_token_create(_context, token):
@require_admin_context
-def quota_get(context, project_id, session=None):
+def quota_get(context, project_id, resource, session=None):
if not session:
session = get_session()
-
result = session.query(models.Quota).\
filter_by(project_id=project_id).\
- filter_by(deleted=can_read_deleted(context)).\
+ filter_by(resource=resource).\
+ filter_by(deleted=False).\
first()
if not result:
- raise exception.NotFound(_('No quota for project_id %s') % project_id)
+ raise exception.ProjectQuotaNotFound(project_id=project_id)
+ return result
+
+@require_admin_context
+def quota_get_all_by_project(context, project_id):
+ session = get_session()
+ result = {'project_id': project_id}
+ rows = session.query(models.Quota).\
+ filter_by(project_id=project_id).\
+ filter_by(deleted=False).\
+ all()
+ for row in rows:
+ result[row.resource] = row.hard_limit
return result
@require_admin_context
-def quota_create(context, values):
+def quota_create(context, project_id, resource, limit):
quota_ref = models.Quota()
- quota_ref.update(values)
+ quota_ref.project_id = project_id
+ quota_ref.resource = resource
+ quota_ref.hard_limit = limit
quota_ref.save()
return quota_ref
@require_admin_context
-def quota_update(context, project_id, values):
+def quota_update(context, project_id, resource, limit):
session = get_session()
with session.begin():
- quota_ref = quota_get(context, project_id, session=session)
- quota_ref.update(values)
+ quota_ref = quota_get(context, project_id, resource, session=session)
+ quota_ref.hard_limit = limit
quota_ref.save(session=session)
@require_admin_context
-def quota_destroy(context, project_id):
+def quota_destroy(context, project_id, resource):
session = get_session()
with session.begin():
- quota_ref = quota_get(context, project_id, session=session)
+ quota_ref = quota_get(context, project_id, resource, session=session)
quota_ref.delete(session=session)
+@require_admin_context
+def quota_destroy_all_by_project(context, project_id):
+ session = get_session()
+ with session.begin():
+ quotas = session.query(models.Quota).\
+ filter_by(project_id=project_id).\
+ filter_by(deleted=False).\
+ all()
+ for quota_ref in quotas:
+ quota_ref.delete(session=session)
+
+
###################
@@ -1859,8 +1892,7 @@ def volume_get(context, volume_id, session=None):
filter_by(deleted=False).\
first()
if not result:
- raise exception.VolumeNotFound(_('Volume %s not found') % volume_id,
- volume_id)
+ raise exception.VolumeNotFound(volume_id=volume_id)
return result
@@ -1892,7 +1924,7 @@ def volume_get_all_by_instance(context, instance_id):
filter_by(deleted=False).\
all()
if not result:
- raise exception.NotFound(_('No volume for instance %s') % instance_id)
+ raise exception.VolumeNotFoundForInstance(instance_id=instance_id)
return result
@@ -1917,8 +1949,7 @@ def volume_get_instance(context, volume_id):
options(joinedload('instance')).\
first()
if not result:
- raise exception.VolumeNotFound(_('Volume %s not found') % volume_id,
- volume_id)
+ raise exception.VolumeNotFound(volume_id=volume_id)
return result.instance
@@ -1930,8 +1961,7 @@ def volume_get_shelf_and_blade(context, volume_id):
filter_by(volume_id=volume_id).\
first()
if not result:
- raise exception.NotFound(_('No export device found for volume %s') %
- volume_id)
+ raise exception.ExportDeviceNotFoundForVolume(volume_id=volume_id)
return (result.shelf_id, result.blade_id)
@@ -1943,8 +1973,7 @@ def volume_get_iscsi_target_num(context, volume_id):
filter_by(volume_id=volume_id).\
first()
if not result:
- raise exception.NotFound(_('No target id found for volume %s') %
- volume_id)
+ raise exception.ISCSITargetNotFoundForVolume(volume_id=volume_id)
return result.target_num
@@ -1988,8 +2017,8 @@ def security_group_get(context, security_group_id, session=None):
options(joinedload_all('rules')).\
first()
if not result:
- raise exception.NotFound(_("No security group with id %s") %
- security_group_id)
+ raise exception.SecurityGroupNotFound(
+ security_group_id=security_group_id)
return result
@@ -2004,9 +2033,8 @@ def security_group_get_by_name(context, project_id, group_name):
options(joinedload_all('instances')).\
first()
if not result:
- raise exception.NotFound(
- _('No security group named %(group_name)s'
- ' for project: %(project_id)s') % locals())
+ raise exception.SecurityGroupNotFoundForProject(project_id=project_id,
+ security_group_id=group_name)
return result
@@ -2107,8 +2135,8 @@ def security_group_rule_get(context, security_group_rule_id, session=None):
filter_by(id=security_group_rule_id).\
first()
if not result:
- raise exception.NotFound(_("No secuity group rule with id %s") %
- security_group_rule_id)
+ raise exception.SecurityGroupNotFoundForRule(
+ rule_id=security_group_rule_id)
return result
@@ -2182,7 +2210,7 @@ def user_get(context, id, session=None):
first()
if not result:
- raise exception.NotFound(_('No user for id %s') % id)
+ raise exception.UserNotFound(user_id=id)
return result
@@ -2198,7 +2226,7 @@ def user_get_by_access_key(context, access_key, session=None):
first()
if not result:
- raise exception.NotFound(_('No user for access key %s') % access_key)
+ raise exception.AccessKeyNotFound(access_key=access_key)
return result
@@ -2330,7 +2358,7 @@ def project_get(context, id, session=None):
first()
if not result:
- raise exception.NotFound(_("No project with id %s") % id)
+ raise exception.ProjectNotFound(project_id=id)
return result
@@ -2351,7 +2379,7 @@ def project_get_by_user(context, user_id):
options(joinedload_all('projects')).\
first()
if not user:
- raise exception.NotFound(_('Invalid user_id %s') % user_id)
+ raise exception.UserNotFound(user_id=user_id)
return user.projects
@@ -2432,8 +2460,7 @@ def migration_get(context, id, session=None):
result = session.query(models.Migration).\
filter_by(id=id).first()
if not result:
- raise exception.NotFound(_("No migration found with id %s")
- % id)
+ raise exception.MigrationNotFound(migration_id=id)
return result
@@ -2444,8 +2471,8 @@ def migration_get_by_instance_and_status(context, instance_id, status):
filter_by(instance_id=instance_id).\
filter_by(status=status).first()
if not result:
- raise exception.NotFound(_("No migration found for instance "
- "%(instance_id)s with status %(status)s") % locals())
+ raise exception.MigrationNotFoundByStatus(instance_id=instance_id,
+ status=status)
return result
@@ -2466,8 +2493,7 @@ def console_pool_get(context, pool_id):
filter_by(id=pool_id).\
first()
if not result:
- raise exception.NotFound(_("No console pool with id %(pool_id)s")
- % locals())
+ raise exception.ConsolePoolNotFound(pool_id=pool_id)
return result
@@ -2483,9 +2509,9 @@ def console_pool_get_by_host_type(context, compute_host, host,
options(joinedload('consoles')).\
first()
if not result:
- raise exception.NotFound(_('No console pool of type %(console_type)s '
- 'for compute host %(compute_host)s '
- 'on proxy host %(host)s') % locals())
+ raise exception.ConsolePoolNotFoundForHostType(host=host,
+ console_type=console_type,
+ compute_host=compute_host)
return result
@@ -2523,8 +2549,8 @@ def console_get_by_pool_instance(context, pool_id, instance_id):
options(joinedload('pool')).\
first()
if not result:
- raise exception.NotFound(_('No console for instance %(instance_id)s '
- 'in pool %(pool_id)s') % locals())
+ raise exception.ConsoleNotFoundInPoolForInstance(pool_id=pool_id,
+ instance_id=instance_id)
return result
@@ -2545,9 +2571,11 @@ def console_get(context, console_id, instance_id=None):
query = query.filter_by(instance_id=instance_id)
result = query.options(joinedload('pool')).first()
if not result:
- idesc = (_("on instance %s") % instance_id) if instance_id else ""
- raise exception.NotFound(_("No console with id %(console_id)s"
- " %(idesc)s") % locals())
+ if instance_id:
+ raise exception.ConsoleNotFoundForInstance(console_id=console_id,
+ instance_id=instance_id)
+ else:
+ raise exception.ConsoleNotFound(console_id=console_id)
return result
@@ -2586,7 +2614,7 @@ def instance_type_get_all(context, inactive=False):
inst_dict[i['name']] = dict(i)
return inst_dict
else:
- raise exception.NotFound
+ raise exception.NoInstanceTypesFound()
@require_context
@@ -2597,7 +2625,7 @@ def instance_type_get_by_id(context, id):
filter_by(id=id).\
first()
if not inst_type:
- raise exception.NotFound(_("No instance type with id %s") % id)
+ raise exception.InstanceTypeNotFound(instance_type=id)
else:
return dict(inst_type)
@@ -2610,7 +2638,7 @@ def instance_type_get_by_name(context, name):
filter_by(name=name).\
first()
if not inst_type:
- raise exception.NotFound(_("No instance type with name %s") % name)
+ raise exception.InstanceTypeNotFoundByName(instance_type_name=name)
else:
return dict(inst_type)
@@ -2623,7 +2651,7 @@ def instance_type_get_by_flavor_id(context, id):
filter_by(flavorid=int(id)).\
first()
if not inst_type:
- raise exception.NotFound(_("No flavor with flavorid %s") % id)
+ raise exception.FlavorNotFound(flavor_id=id)
else:
return dict(inst_type)
@@ -2636,7 +2664,7 @@ def instance_type_destroy(context, name):
filter_by(name=name)
records = instance_type_ref.update(dict(deleted=True))
if records == 0:
- raise exception.NotFound
+ raise exception.InstanceTypeNotFoundByName(instance_type_name=name)
else:
return instance_type_ref
@@ -2651,7 +2679,7 @@ def instance_type_purge(context, name):
filter_by(name=name)
records = instance_type_ref.delete()
if records == 0:
- raise exception.NotFound
+ raise exception.InstanceTypeNotFoundByName(instance_type_name=name)
else:
return instance_type_ref
@@ -2672,7 +2700,7 @@ def zone_update(context, zone_id, values):
session = get_session()
zone = session.query(models.Zone).filter_by(id=zone_id).first()
if not zone:
- raise exception.NotFound(_("No zone with id %(zone_id)s") % locals())
+ raise exception.ZoneNotFound(zone_id=zone_id)
zone.update(values)
zone.save()
return zone
@@ -2692,7 +2720,7 @@ def zone_get(context, zone_id):
session = get_session()
result = session.query(models.Zone).filter_by(id=zone_id).first()
if not result:
- raise exception.NotFound(_("No zone with id %(zone_id)s") % locals())
+ raise exception.ZoneNotFound(zone_id=zone_id)
return result
@@ -2726,7 +2754,7 @@ def instance_metadata_delete(context, instance_id, key):
filter_by(instance_id=instance_id).\
filter_by(key=key).\
filter_by(deleted=False).\
- update({'deleted': 1,
+ update({'deleted': True,
'deleted_at': datetime.datetime.utcnow(),
'updated_at': literal_column('updated_at')})
@@ -2742,8 +2770,8 @@ def instance_metadata_get_item(context, instance_id, key):
first()
if not meta_result:
- raise exception.NotFound(_('Invalid metadata key for instance %s') %
- instance_id)
+ raise exception.InstanceMetadataNotFound(metadata_key=key,
+ instance_id=instance_id)
return meta_result
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py b/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py
index 9e7ab3554..63bbaccc1 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py
@@ -17,15 +17,13 @@
# under the License.
## Table code mostly autogenerated by genmodel.py
-from sqlalchemy import *
-from migrate import *
-
+from sqlalchemy import Boolean, Column, DateTime, ForeignKey
+from sqlalchemy import ForeignKeyConstraint, Integer, MetaData, String
+from sqlalchemy import Table, Text
from nova import log as logging
-
meta = MetaData()
-
auth_tokens = Table('auth_tokens', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py
index 413536a59..9bb8a8ada 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py
@@ -16,15 +16,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-from sqlalchemy import *
-from migrate import *
-
+from sqlalchemy import Boolean, Column, DateTime, ForeignKey
+from sqlalchemy import Integer, MetaData, String, Table, Text
from nova import log as logging
-
meta = MetaData()
-
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of instances or services.
instances = Table('instances', meta,
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py b/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py
index 5ba7910f1..8e0de4d2b 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py
@@ -15,15 +15,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-from sqlalchemy import *
-from migrate import *
-
-from nova import log as logging
-
+from sqlalchemy import Column, Integer, MetaData, String, Table
meta = MetaData()
-
networks = Table('networks', meta,
Column('id', Integer(), primary_key=True, nullable=False),
)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py b/nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py
index ade981687..0abea374c 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py
@@ -13,15 +13,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-from sqlalchemy import *
-from migrate import *
-
+from sqlalchemy import Boolean, Column, DateTime, Integer
+from sqlalchemy import MetaData, String, Table
from nova import log as logging
-
meta = MetaData()
-
#
# New Tables
#
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py b/nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py
index 4cb07e0d8..a1a86e3b4 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py
@@ -15,15 +15,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-from sqlalchemy import *
-from migrate import *
-
+from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer
+from sqlalchemy import MetaData, String, Table
from nova import log as logging
-
meta = MetaData()
-
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of instances or services.
instances = Table('instances', meta,
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py b/nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py
index 705fc8ff3..4627d3332 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py
@@ -15,11 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-from sqlalchemy import *
-from migrate import *
-
-from nova import log as logging
-
+from sqlalchemy import Column, Integer, MetaData, String, Table
meta = MetaData()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py b/nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py
index 427934d53..6f2668040 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py
@@ -13,15 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-from sqlalchemy import *
-from migrate import *
-
-from nova import log as logging
-
+from sqlalchemy import Column, Integer, MetaData, String, Table
meta = MetaData()
-
# Table stub-definitions
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of instances or services.
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py b/nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py
index 5e2cb69d9..63999f6ff 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py
@@ -13,15 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-from sqlalchemy import *
-from migrate import *
-
-from nova import api
-from nova import db
+from sqlalchemy import Boolean, Column, DateTime, Integer
+from sqlalchemy import MetaData, String, Table
from nova import log as logging
-import datetime
-
meta = MetaData()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py b/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py
index 4fda525f1..0f2d0079a 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py
@@ -15,12 +15,10 @@
# License for the specific language governing permissions and limitations
# under the License.from sqlalchemy import *
-from sqlalchemy import *
-from migrate import *
-
+from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer
+from sqlalchemy import MetaData, String, Table
from nova import log as logging
-
meta = MetaData()
# Just for the ForeignKey and column creation to succeed, these are not the
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py
index eb3066894..a5b80586e 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py
@@ -14,12 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-from sqlalchemy import *
-from sqlalchemy.sql import text
-from migrate import *
-
-from nova import log as logging
-
+from sqlalchemy import Column, Integer, MetaData, String, Table
meta = MetaData()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/011_live_migration.py b/nova/db/sqlalchemy/migrate_repo/versions/011_live_migration.py
index 23ccccb4e..b2b0256d2 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/011_live_migration.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/011_live_migration.py
@@ -16,10 +16,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-from migrate import *
+from sqlalchemy import Boolean, Column, DateTime, Integer, MetaData
+from sqlalchemy import Table, Text
from nova import log as logging
-from sqlalchemy import *
-
meta = MetaData()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py b/nova/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py
index e87085668..10d250522 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py
@@ -13,15 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-from sqlalchemy import *
-from migrate import *
-
-from nova import log as logging
-
+from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer
+from sqlalchemy import MetaData, String, Table
meta = MetaData()
-
# Table stub-definitions
# Just for the ForeignKey and column creation to succeed, these are not the
# actual definitions of instances or services.
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py b/nova/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py
index 3fb92e85c..7246839b7 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py
@@ -15,11 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.from sqlalchemy import *
-from sqlalchemy import *
-from migrate import *
-
-from nova import log as logging
-
+from sqlalchemy import Column, Integer, MetaData, Table
meta = MetaData()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py
index 334d1f255..62216be12 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py
@@ -14,16 +14,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-from sqlalchemy import *
-from sqlalchemy.sql import text
-from migrate import *
-
+from sqlalchemy import Column, Integer, MetaData, String, Table
#from nova import log as logging
-
meta = MetaData()
-
c_instance_type = Column('instance_type',
String(length=255, convert_unicode=False,
assert_unicode=None, unicode_error=None,
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py b/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py
new file mode 100644
index 000000000..375760c84
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py
@@ -0,0 +1,35 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# Copyright 2011 Grid Dynamics
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Boolean, Column, MetaData, Table
+
+meta = MetaData()
+
+c_auto_assigned = Column('auto_assigned', Boolean, default=False)
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ floating_ips = Table('floating_ips',
+ meta,
+ autoload=True,
+ autoload_with=migrate_engine)
+
+ floating_ips.create_column(c_auto_assigned)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py
new file mode 100644
index 000000000..a2d8192ca
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py
@@ -0,0 +1,203 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Boolean, Column, DateTime, Integer
+from sqlalchemy import MetaData, String, Table
+
+import datetime
+
+meta = MetaData()
+
+resources = [
+ 'instances',
+ 'cores',
+ 'volumes',
+ 'gigabytes',
+ 'floating_ips',
+ 'metadata_items',
+]
+
+
+def old_style_quotas_table(name):
+ return Table(name, meta,
+ Column('id', Integer(), primary_key=True),
+ Column('created_at', DateTime(),
+ default=datetime.datetime.utcnow),
+ Column('updated_at', DateTime(),
+ onupdate=datetime.datetime.utcnow),
+ Column('deleted_at', DateTime()),
+ Column('deleted', Boolean(), default=False),
+ Column('project_id',
+ String(length=255, convert_unicode=False,
+ assert_unicode=None, unicode_error=None,
+ _warn_on_bytestring=False)),
+ Column('instances', Integer()),
+ Column('cores', Integer()),
+ Column('volumes', Integer()),
+ Column('gigabytes', Integer()),
+ Column('floating_ips', Integer()),
+ Column('metadata_items', Integer()),
+ )
+
+
+def new_style_quotas_table(name):
+ return Table(name, meta,
+ Column('id', Integer(), primary_key=True),
+ Column('created_at', DateTime(),
+ default=datetime.datetime.utcnow),
+ Column('updated_at', DateTime(),
+ onupdate=datetime.datetime.utcnow),
+ Column('deleted_at', DateTime()),
+ Column('deleted', Boolean(), default=False),
+ Column('project_id',
+ String(length=255, convert_unicode=False,
+ assert_unicode=None, unicode_error=None,
+ _warn_on_bytestring=False)),
+ Column('resource',
+ String(length=255, convert_unicode=False,
+ assert_unicode=None, unicode_error=None,
+ _warn_on_bytestring=False),
+ nullable=False),
+ Column('hard_limit', Integer(), nullable=True),
+ )
+
+
+def existing_quotas_table(migrate_engine):
+ return Table('quotas', meta, autoload=True, autoload_with=migrate_engine)
+
+
+def _assert_no_duplicate_project_ids(quotas):
+ project_ids = set()
+ message = ('There are multiple active quotas for project "%s" '
+ '(among others, possibly). '
+ 'Please resolve all ambiguous quotas before '
+ 'reattempting the migration.')
+ for quota in quotas:
+ assert quota.project_id not in project_ids, message % quota.project_id
+ project_ids.add(quota.project_id)
+
+
+def assert_old_quotas_have_no_active_duplicates(migrate_engine, quotas):
+ """Ensure that there are no duplicate non-deleted quota entries."""
+ select = quotas.select().where(quotas.c.deleted == False)
+ results = migrate_engine.execute(select)
+ _assert_no_duplicate_project_ids(list(results))
+
+
+def assert_new_quotas_have_no_active_duplicates(migrate_engine, quotas):
+ """Ensure that there are no duplicate non-deleted quota entries."""
+ for resource in resources:
+ select = quotas.select().\
+ where(quotas.c.deleted == False).\
+ where(quotas.c.resource == resource)
+ results = migrate_engine.execute(select)
+ _assert_no_duplicate_project_ids(list(results))
+
+
+def convert_forward(migrate_engine, old_quotas, new_quotas):
+ quotas = list(migrate_engine.execute(old_quotas.select()))
+ for quota in quotas:
+ for resource in resources:
+ hard_limit = getattr(quota, resource)
+ if hard_limit is None:
+ continue
+ insert = new_quotas.insert().values(
+ created_at=quota.created_at,
+ updated_at=quota.updated_at,
+ deleted_at=quota.deleted_at,
+ deleted=quota.deleted,
+ project_id=quota.project_id,
+ resource=resource,
+ hard_limit=hard_limit)
+ migrate_engine.execute(insert)
+
+
+def earliest(date1, date2):
+ if date1 is None and date2 is None:
+ return None
+ if date1 is None:
+ return date2
+ if date2 is None:
+ return date1
+ if date1 < date2:
+ return date1
+ return date2
+
+
+def latest(date1, date2):
+ if date1 is None and date2 is None:
+ return None
+ if date1 is None:
+ return date2
+ if date2 is None:
+ return date1
+ if date1 > date2:
+ return date1
+ return date2
+
+
+def convert_backward(migrate_engine, old_quotas, new_quotas):
+ quotas = {}
+ for quota in migrate_engine.execute(new_quotas.select()):
+ if (quota.resource not in resources
+ or quota.hard_limit is None or quota.deleted):
+ continue
+ if not quota.project_id in quotas:
+ quotas[quota.project_id] = {
+ 'project_id': quota.project_id,
+ 'created_at': quota.created_at,
+ 'updated_at': quota.updated_at,
+ quota.resource: quota.hard_limit
+ }
+ else:
+ quotas[quota.project_id]['created_at'] = earliest(
+ quota.created_at, quotas[quota.project_id]['created_at'])
+ quotas[quota.project_id]['updated_at'] = latest(
+ quota.updated_at, quotas[quota.project_id]['updated_at'])
+ quotas[quota.project_id][quota.resource] = quota.hard_limit
+
+ for quota in quotas.itervalues():
+ insert = old_quotas.insert().values(**quota)
+ migrate_engine.execute(insert)
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ old_quotas = existing_quotas_table(migrate_engine)
+ assert_old_quotas_have_no_active_duplicates(migrate_engine, old_quotas)
+
+ new_quotas = new_style_quotas_table('quotas_new')
+ new_quotas.create()
+ convert_forward(migrate_engine, old_quotas, new_quotas)
+ old_quotas.drop()
+ new_quotas.rename('quotas')
+
+
+def downgrade(migrate_engine):
+ # Operations to reverse the above upgrade go here.
+ meta.bind = migrate_engine
+
+ new_quotas = existing_quotas_table(migrate_engine)
+ assert_new_quotas_have_no_active_duplicates(migrate_engine, new_quotas)
+
+ old_quotas = old_style_quotas_table('quotas_old')
+ old_quotas.create()
+ convert_backward(migrate_engine, old_quotas, new_quotas)
+ new_quotas.drop()
+ old_quotas.rename('quotas')
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py b/nova/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py
new file mode 100644
index 000000000..cda890c94
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py
@@ -0,0 +1,68 @@
+from sqlalchemy import Column, Integer, MetaData, String, Table
+from nova import log as logging
+
+meta = MetaData()
+
+
+def upgrade(migrate_engine):
+ meta.bind = migrate_engine
+ instances = Table('instances', meta, autoload=True,
+ autoload_with=migrate_engine)
+
+ types = {}
+ for instance in migrate_engine.execute(instances.select()):
+ if instance.instance_type_id is None:
+ types[instance.id] = None
+ continue
+ try:
+ types[instance.id] = int(instance.instance_type_id)
+ except ValueError:
+ logging.warn("Instance %s did not have instance_type_id "
+ "converted to an integer because its value is %s" %
+ (instance.id, instance.instance_type_id))
+ types[instance.id] = None
+
+ integer_column = Column('instance_type_id_int', Integer(), nullable=True)
+ string_column = instances.c.instance_type_id
+
+ integer_column.create(instances)
+ for instance_id, instance_type_id in types.iteritems():
+ update = instances.update().\
+ where(instances.c.id == instance_id).\
+ values(instance_type_id_int=instance_type_id)
+ migrate_engine.execute(update)
+
+ string_column.alter(name='instance_type_id_str')
+ integer_column.alter(name='instance_type_id')
+ string_column.drop()
+
+
+def downgrade(migrate_engine):
+ meta.bind = migrate_engine
+ instances = Table('instances', meta, autoload=True,
+ autoload_with=migrate_engine)
+
+ integer_column = instances.c.instance_type_id
+ string_column = Column('instance_type_id_str',
+ String(length=255, convert_unicode=False,
+ assert_unicode=None, unicode_error=None,
+ _warn_on_bytestring=False),
+ nullable=True)
+
+ types = {}
+ for instance in migrate_engine.execute(instances.select()):
+ if instance.instance_type_id is None:
+ types[instance.id] = None
+ else:
+ types[instance.id] = str(instance.instance_type_id)
+
+ string_column.create(instances)
+ for instance_id, instance_type_id in types.iteritems():
+ update = instances.update().\
+ where(instances.c.id == instance_id).\
+ values(instance_type_id_str=instance_type_id)
+ migrate_engine.execute(update)
+
+ integer_column.alter(name='instance_type_id_int')
+ string_column.alter(name='instance_type_id')
+ integer_column.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py b/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py
new file mode 100644
index 000000000..a169afb40
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py
@@ -0,0 +1,60 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Column, Integer, MetaData, String, Table
+#from nova import log as logging
+
+meta = MetaData()
+
+c_manageent = Column('server_manageent_url',
+ String(length=255, convert_unicode=False,
+ assert_unicode=None, unicode_error=None,
+ _warn_on_bytestring=False),
+ nullable=True)
+
+c_management = Column('server_management_url',
+ String(length=255, convert_unicode=False,
+ assert_unicode=None, unicode_error=None,
+ _warn_on_bytestring=False),
+ nullable=True)
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ tokens = Table('auth_tokens', meta, autoload=True,
+ autoload_with=migrate_engine)
+
+ tokens.create_column(c_management)
+ migrate_engine.execute(tokens.update()
+ .values(server_management_url=tokens.c.server_manageent_url))
+
+ tokens.c.server_manageent_url.drop()
+
+
+def downgrade(migrate_engine):
+ meta.bind = migrate_engine
+
+ tokens = Table('auth_tokens', meta, autoload=True,
+ autoload_with=migrate_engine)
+
+ tokens.create_column(c_manageent)
+ migrate_engine.execute(tokens.update()
+ .values(server_manageent_url=tokens.c.server_management_url))
+
+ tokens.c.server_management_url.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/015_multi_nic.py b/nova/db/sqlalchemy/migrate_repo/versions/019_multi_nic.py
index 54a70d23c..54a70d23c 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/015_multi_nic.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/019_multi_nic.py
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 4bc9ef373..322414bec 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -217,7 +217,7 @@ class Instance(BASE, NovaBase):
host = Column(String(255)) # , ForeignKey('hosts.id'))
# aka flavor_id
- instance_type_id = Column(String(255))
+ instance_type_id = Column(Integer)
user_data = Column(Text)
@@ -320,18 +320,20 @@ class Volume(BASE, NovaBase):
class Quota(BASE, NovaBase):
- """Represents quota overrides for a project."""
+ """Represents a single quota override for a project.
+
+ If there is no row for a given project id and resource, then
+ the default for the deployment is used. If the row is present
+ but the hard limit is Null, then the resource is unlimited.
+ """
+
__tablename__ = 'quotas'
id = Column(Integer, primary_key=True)
- project_id = Column(String(255))
+ project_id = Column(String(255), index=True)
- instances = Column(Integer)
- cores = Column(Integer)
- volumes = Column(Integer)
- gigabytes = Column(Integer)
- floating_ips = Column(Integer)
- metadata_items = Column(Integer)
+ resource = Column(String(255))
+ hard_limit = Column(Integer, nullable=True)
class ExportDevice(BASE, NovaBase):
@@ -525,6 +527,7 @@ class FloatingIp(BASE, NovaBase):
'FloatingIp.deleted == False)')
project_id = Column(String(255))
host = Column(String(255)) # , ForeignKey('hosts.id'))
+ auto_assigned = Column(Boolean, default=False, nullable=False)
class MacAddress(BASE, NovaBase):
diff --git a/nova/exception.py b/nova/exception.py
index 072663435..020243220 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -49,53 +49,17 @@ class Error(Exception):
class ApiError(Error):
- def __init__(self, message='Unknown', code='ApiError'):
- self.message = message
+ def __init__(self, message='Unknown', code=None):
+ self.msg = message
self.code = code
- super(ApiError, self).__init__('%s: %s' % (code, message))
+ if code:
+ outstr = '%s: %s' % (code, message)
+ else:
+ outstr = '%s' % message
+ super(ApiError, self).__init__(outstr)
-class NotFound(Error):
- pass
-
-
-class InstanceNotFound(NotFound):
- def __init__(self, message, instance_id):
- self.instance_id = instance_id
- super(InstanceNotFound, self).__init__(message)
-
-
-class VolumeNotFound(NotFound):
- def __init__(self, message, volume_id):
- self.volume_id = volume_id
- super(VolumeNotFound, self).__init__(message)
-
-
-class Duplicate(Error):
- pass
-
-
-class NotAuthorized(Error):
- pass
-
-
-class NotEmpty(Error):
- pass
-
-
-class Invalid(Error):
- pass
-
-
-class InvalidInputException(Error):
- pass
-
-
-class InvalidContentType(Error):
- pass
-
-
-class TimeoutException(Error):
+class BuildInProgress(Error):
pass
@@ -134,3 +98,477 @@ def wrap_exception(f):
class MacAddress(Error):
pass
+
+
+class NovaException(Exception):
+ """Base Nova Exception
+
+ To correctly use this class, inherit from it and define
+ a 'message' property. That message will get printf'd
+ with the keyword arguments provided to the constructor.
+
+ """
+ message = _("An unknown exception occurred.")
+
+ def __init__(self, **kwargs):
+ try:
+ self._error_string = self.message % kwargs
+
+ except Exception:
+ # at least get the core message out if something happened
+ self._error_string = self.message
+
+ def __str__(self):
+ return self._error_string
+
+
+class NotAuthorized(NovaException):
+ message = _("Not authorized.")
+
+ def __init__(self, *args, **kwargs):
+ super(NotFound, self).__init__(**kwargs)
+
+
+class AdminRequired(NotAuthorized):
+ message = _("User does not have admin privileges")
+
+
+class Invalid(NovaException):
+ message = _("Unacceptable parameters.")
+
+
+class InvalidSignature(Invalid):
+ message = _("Invalid signature %(signature)s for user %(user)s.")
+
+
+class InvalidInput(Invalid):
+ message = _("Invalid input received") + ": %(reason)s"
+
+
+class InvalidInstanceType(Invalid):
+ message = _("Invalid instance type %(instance_type)s.")
+
+
+class InvalidPortRange(Invalid):
+ message = _("Invalid port range %(from_port)s:%(to_port)s.")
+
+
+class InvalidIpProtocol(Invalid):
+ message = _("Invalid IP protocol %(protocol)s.")
+
+
+class InvalidContentType(Invalid):
+ message = _("Invalid content type %(content_type)s.")
+
+
+class InstanceNotRunning(Invalid):
+ message = _("Instance %(instance_id)s is not running.")
+
+
+class InstanceNotSuspended(Invalid):
+ message = _("Instance %(instance_id)s is not suspended.")
+
+
+class InstanceNotInRescueMode(Invalid):
+ message = _("Instance %(instance_id)s is not in rescue mode")
+
+
+class InstanceSuspendFailure(Invalid):
+ message = _("Failed to suspend instance") + ": %(reason)s"
+
+
+class InstanceResumeFailure(Invalid):
+ message = _("Failed to resume server") + ": %(reason)s."
+
+
+class InstanceRebootFailure(Invalid):
+ message = _("Failed to reboot instance") + ": %(reason)s"
+
+
+class ServiceUnavailable(Invalid):
+ message = _("Service is unavailable at this time.")
+
+
+class VolumeServiceUnavailable(ServiceUnavailable):
+ message = _("Volume service is unavailable at this time.")
+
+
+class ComputeServiceUnavailable(ServiceUnavailable):
+ message = _("Compute service is unavailable at this time.")
+
+
+class UnableToMigrateToSelf(Invalid):
+ message = _("Unable to migrate instance (%(instance_id)s) "
+ "to current host (%(host)s).")
+
+
+class SourceHostUnavailable(Invalid):
+ message = _("Original compute host is unavailable at this time.")
+
+
+class InvalidHypervisorType(Invalid):
+ message = _("The supplied hypervisor type of is invalid.")
+
+
+class DestinationHypervisorTooOld(Invalid):
+ message = _("The instance requires a newer hypervisor version than "
+ "has been provided.")
+
+
+class InvalidDevicePath(Invalid):
+ message = _("The supplied device path (%(path)s) is invalid.")
+
+
+class InvalidCPUInfo(Invalid):
+ message = _("Unacceptable CPU info") + ": %(reason)s"
+
+
+class InvalidVLANTag(Invalid):
+ message = _("VLAN tag is not appropriate for the port group "
+ "%(bridge)s. Expected VLAN tag is %(tag)s, "
+ "but the one associated with the port group is %(pgroup)s.")
+
+
+class InvalidVLANPortGroup(Invalid):
+ message = _("vSwitch which contains the port group %(bridge)s is "
+ "not associated with the desired physical adapter. "
+ "Expected vSwitch is %(expected)s, but the one associated "
+ "is %(actual)s.")
+
+
+class InvalidDiskFormat(Invalid):
+ message = _("Disk format %(disk_format)s is not acceptable")
+
+
+class ImageUnacceptable(Invalid):
+ message = _("Image %(image_id)s is unacceptable") + ": %(reason)s"
+
+
+class InstanceUnacceptable(Invalid):
+ message = _("Instance %(instance_id)s is unacceptable") + ": %(reason)s"
+
+
+class InvalidEc2Id(Invalid):
+ message = _("Ec2 id %(ec2_id)s is unacceptable.")
+
+
+class NotFound(NovaException):
+ message = _("Resource could not be found.")
+
+ def __init__(self, *args, **kwargs):
+ super(NotFound, self).__init__(**kwargs)
+
+
+class InstanceNotFound(NotFound):
+ message = _("Instance %(instance_id)s could not be found.")
+
+
+class VolumeNotFound(NotFound):
+ message = _("Volume %(volume_id)s could not be found.")
+
+
+class VolumeNotFoundForInstance(VolumeNotFound):
+ message = _("Volume not found for instance %(instance_id)s.")
+
+
+class ExportDeviceNotFoundForVolume(NotFound):
+ message = _("No export device found for volume %(volume_id)s.")
+
+
+class ISCSITargetNotFoundForVolume(NotFound):
+ message = _("No target id found for volume %(volume_id)s.")
+
+
+class DiskNotFound(NotFound):
+ message = _("No disk at %(location)s")
+
+
+class ImageNotFound(NotFound):
+ message = _("Image %(image_id)s could not be found.")
+
+
+class KernelNotFoundForImage(ImageNotFound):
+ message = _("Kernel not found for image %(image_id)s.")
+
+
+class RamdiskNotFoundForImage(ImageNotFound):
+ message = _("Ramdisk not found for image %(image_id)s.")
+
+
+class UserNotFound(NotFound):
+ message = _("User %(user_id)s could not be found.")
+
+
+class ProjectNotFound(NotFound):
+ message = _("Project %(project_id)s could not be found.")
+
+
+class ProjectMembershipNotFound(NotFound):
+ message = _("User %(user_id)s is not a member of project %(project_id)s.")
+
+
+class UserRoleNotFound(NotFound):
+ message = _("Role %(role_id)s could not be found.")
+
+
+class StorageRepositoryNotFound(NotFound):
+ message = _("Cannot find SR to read/write VDI.")
+
+
+class NetworkNotFound(NotFound):
+ message = _("Network %(network_id)s could not be found.")
+
+
+class NetworkNotFoundForBridge(NetworkNotFound):
+ message = _("Network could not be found for bridge %(bridge)s")
+
+
+class NetworkNotFoundForCidr(NetworkNotFound):
+ message = _("Network could not be found with cidr %(cidr)s.")
+
+
+class NetworkNotFoundForInstance(NetworkNotFound):
+ message = _("Network could not be found for instance %(instance_id)s.")
+
+
+class NoNetworksFound(NotFound):
+ message = _("No networks defined.")
+
+
+class DatastoreNotFound(NotFound):
+ message = _("Could not find the datastore reference(s) which the VM uses.")
+
+
+class NoFixedIpsFoundForInstance(NotFound):
+ message = _("Instance %(instance_id)s has zero fixed ips.")
+
+
+class NoFixedIpsFoundForMacAddress(NotFound):
+ message = _("Mac Address %(mac_id)s has zero associated fixed ips.")
+
+
+class NoFixedIpFound(NotFound):
+ message = _("No fixed IP associated with address %(address)s.")
+
+
+class NoFixedIpsDefined(NotFound):
+ message = _("Zero fixed ips could be found.")
+
+
+class NoFixedIpsDefinedForHost(NotFound):
+ message = _("Zero fixed ips defined for host %(host)s.")
+
+
+class FloatingIpNotFound(NotFound):
+ message = _("Floating ip not found for address %(address)s.")
+
+
+class NoFloatingIpsDefined(NotFound):
+ message = _("Zero floating ips could be found.")
+
+
+class NoFloatingIpsDefinedForHost(NoFloatingIpsDefined):
+ message = _("Zero floating ips defined for host %(host)s.")
+
+
+class NoFloatingIpsDefinedForInstance(NoFloatingIpsDefined):
+ message = _("Zero floating ips defined for instance %(instance_id)s.")
+
+
+class KeypairNotFound(NotFound):
+ message = _("Keypair %(keypair_name)s not found for user %(user_id)s")
+
+
+class CertificateNotFound(NotFound):
+ message = _("Certificate %(certificate_id)s not found.")
+
+
+class ServiceNotFound(NotFound):
+ message = _("Service %(service_id)s could not be found.")
+
+
+class HostNotFound(NotFound):
+ message = _("Host %(host)s could not be found.")
+
+
+class ComputeHostNotFound(HostNotFound):
+ message = _("Compute host %(host)s could not be found.")
+
+
+class HostBinaryNotFound(NotFound):
+ message = _("Could not find binary %(binary)s on host %(host)s.")
+
+
+class AuthTokenNotFound(NotFound):
+ message = _("Auth token %(token)s could not be found.")
+
+
+class AccessKeyNotFound(NotFound):
+ message = _("Access Key %(access_key)s could not be found.")
+
+
+class QuotaNotFound(NotFound):
+ message = _("Quota could not be found")
+
+
+class ProjectQuotaNotFound(QuotaNotFound):
+ message = _("Quota for project %(project_id)s could not be found.")
+
+
+class SecurityGroupNotFound(NotFound):
+ message = _("Security group %(security_group_id)s not found.")
+
+
+class SecurityGroupNotFoundForProject(SecurityGroupNotFound):
+ message = _("Security group %(security_group_id)s not found "
+ "for project %(project_id)s.")
+
+
+class SecurityGroupNotFoundForRule(SecurityGroupNotFound):
+ message = _("Security group with rule %(rule_id)s not found.")
+
+
+class MigrationNotFound(NotFound):
+ message = _("Migration %(migration_id)s could not be found.")
+
+
+class MigrationNotFoundByStatus(MigrationNotFound):
+ message = _("Migration not found for instance %(instance_id)s "
+ "with status %(status)s.")
+
+
+class ConsolePoolNotFound(NotFound):
+ message = _("Console pool %(pool_id)s could not be found.")
+
+
+class ConsolePoolNotFoundForHostType(NotFound):
+ message = _("Console pool of type %(console_type)s "
+ "for compute host %(compute_host)s "
+ "on proxy host %(host)s not found.")
+
+
+class ConsoleNotFound(NotFound):
+ message = _("Console %(console_id)s could not be found.")
+
+
+class ConsoleNotFoundForInstance(ConsoleNotFound):
+ message = _("Console for instance %(instance_id)s could not be found.")
+
+
+class ConsoleNotFoundInPoolForInstance(ConsoleNotFound):
+ message = _("Console for instance %(instance_id)s "
+ "in pool %(pool_id)s could not be found.")
+
+
+class NoInstanceTypesFound(NotFound):
+ message = _("Zero instance types found.")
+
+
+class InstanceTypeNotFound(NotFound):
+ message = _("Instance type %(instance_type_id)s could not be found.")
+
+
+class InstanceTypeNotFoundByName(InstanceTypeNotFound):
+ message = _("Instance type with name %(instance_type_name)s "
+ "could not be found.")
+
+
+class FlavorNotFound(NotFound):
+ message = _("Flavor %(flavor_id)s could not be found.")
+
+
+class ZoneNotFound(NotFound):
+ message = _("Zone %(zone_id)s could not be found.")
+
+
+class SchedulerHostFilterDriverNotFound(NotFound):
+ message = _("Scheduler Host Filter Driver %(driver_name)s could"
+ " not be found.")
+
+
+class InstanceMetadataNotFound(NotFound):
+ message = _("Instance %(instance_id)s has no metadata with "
+ "key %(metadata_key)s.")
+
+
+class LDAPObjectNotFound(NotFound):
+ message = _("LDAP object could not be found")
+
+
+class LDAPUserNotFound(LDAPObjectNotFound):
+ message = _("LDAP user %(user_id)s could not be found.")
+
+
+class LDAPGroupNotFound(LDAPObjectNotFound):
+ message = _("LDAP group %(group_id)s could not be found.")
+
+
+class LDAPGroupMembershipNotFound(NotFound):
+ message = _("LDAP user %(user_id)s is not a member of group %(group_id)s.")
+
+
+class FileNotFound(NotFound):
+ message = _("File %(file_path)s could not be found.")
+
+
+class NoFilesFound(NotFound):
+ message = _("Zero files could be found.")
+
+
+class SwitchNotFoundForNetworkAdapter(NotFound):
+ message = _("Virtual switch associated with the "
+ "network adapter %(adapter)s not found.")
+
+
+class NetworkAdapterNotFound(NotFound):
+ message = _("Network adapter %(adapter)s could not be found.")
+
+
+class ClassNotFound(NotFound):
+ message = _("Class %(class_name)s could not be found")
+
+
+class NotAllowed(NovaException):
+ message = _("Action not allowed.")
+
+
+class GlobalRoleNotAllowed(NotAllowed):
+ message = _("Unable to use global role %(role_id)s")
+
+
+#TODO(bcwaldon): EOL this exception!
+class Duplicate(NovaException):
+ pass
+
+
+class KeyPairExists(Duplicate):
+ message = _("Key pair %(key_name)s already exists.")
+
+
+class UserExists(Duplicate):
+ message = _("User %(user)s already exists.")
+
+
+class LDAPUserExists(UserExists):
+ message = _("LDAP user %(user)s already exists.")
+
+
+class LDAPGroupExists(Duplicate):
+ message = _("LDAP group %(group)s already exists.")
+
+
+class LDAPMembershipExists(Duplicate):
+ message = _("User %(uid)s is already a member of "
+ "the group %(group_dn)s")
+
+
+class ProjectExists(Duplicate):
+ message = _("Project %(project)s already exists.")
+
+
+class InstanceExists(Duplicate):
+ message = _("Instance %(name)s already exists.")
+
+
+class MigrationError(NovaException):
+ message = _("Migration error") + ": %(reason)s"
diff --git a/nova/flags.py b/nova/flags.py
index d1b93f0a8..32cb6efa8 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -324,7 +324,7 @@ DEFINE_string('null_kernel', 'nokernel',
'kernel image that indicates not to use a kernel,'
' but to use a raw disk image instead')
-DEFINE_string('vpn_image_id', 'ami-cloudpipe', 'AMI for cloudpipe vpn server')
+DEFINE_integer('vpn_image_id', 0, 'integer id for cloudpipe vpn server')
DEFINE_string('vpn_key_suffix',
'-vpn',
'Suffix to add to project name for vpn key and secgroups')
@@ -369,6 +369,12 @@ DEFINE_string('host', socket.gethostname(),
DEFINE_string('node_availability_zone', 'nova',
'availability zone of this node')
+DEFINE_string('notification_driver',
+ 'nova.notifier.no_op_notifier',
+ 'Default driver for sending notifications')
+DEFINE_list('memcached_servers', None,
+ 'Memcached servers or None for in process cache.')
+
DEFINE_string('zone_name', 'nova', 'name of this zone')
DEFINE_list('zone_capabilities',
['hypervisor=xenserver;kvm', 'os=linux;windows'],
diff --git a/nova/image/fake.py b/nova/image/fake.py
index 3bc2a8287..b400b2adb 100644
--- a/nova/image/fake.py
+++ b/nova/image/fake.py
@@ -72,7 +72,7 @@ class FakeImageService(service.BaseImageService):
return copy.deepcopy(image)
LOG.warn('Unable to find image id %s. Have images: %s',
image_id, self.images)
- raise exception.NotFound
+ raise exception.ImageNotFound(image_id=image_id)
def create(self, context, data):
"""Store the image data and return the new image id.
@@ -89,24 +89,24 @@ class FakeImageService(service.BaseImageService):
def update(self, context, image_id, data):
"""Replace the contents of the given image with the new data.
- :raises: NotFound if the image does not exist.
+ :raises: ImageNotFound if the image does not exist.
"""
image_id = int(image_id)
if not self.images.get(image_id):
- raise exception.NotFound
+ raise exception.ImageNotFound(image_id=image_id)
self.images[image_id] = copy.deepcopy(data)
def delete(self, context, image_id):
"""Delete the given image.
- :raises: NotFound if the image does not exist.
+ :raises: ImageNotFound if the image does not exist.
"""
image_id = int(image_id)
removed = self.images.pop(image_id, None)
if not removed:
- raise exception.NotFound
+ raise exception.ImageNotFound(image_id=image_id)
def delete_all(self):
"""Clears out all images."""
diff --git a/nova/image/glance.py b/nova/image/glance.py
index 81661b3b0..193e37273 100644
--- a/nova/image/glance.py
+++ b/nova/image/glance.py
@@ -86,10 +86,10 @@ class GlanceImageService(service.BaseImageService):
try:
image_meta = self.client.get_image_meta(image_id)
except glance_exception.NotFound:
- raise exception.NotFound
+ raise exception.ImageNotFound(image_id=image_id)
if not self._is_image_available(context, image_meta):
- raise exception.NotFound
+ raise exception.ImageNotFound(image_id=image_id)
base_image_meta = self._translate_to_base(image_meta)
return base_image_meta
@@ -102,14 +102,14 @@ class GlanceImageService(service.BaseImageService):
for image_meta in image_metas:
if name == image_meta.get('name'):
return image_meta
- raise exception.NotFound
+ raise exception.ImageNotFound(image_id=name)
def get(self, context, image_id, data):
"""Calls out to Glance for metadata and data and writes data."""
try:
image_meta, image_chunks = self.client.get_image(image_id)
except glance_exception.NotFound:
- raise exception.NotFound
+ raise exception.ImageNotFound(image_id=image_id)
for chunk in image_chunks:
data.write(chunk)
@@ -142,7 +142,7 @@ class GlanceImageService(service.BaseImageService):
def update(self, context, image_id, image_meta, data=None):
"""Replace the contents of the given image with the new data.
- :raises: NotFound if the image does not exist.
+ :raises: ImageNotFound if the image does not exist.
"""
# NOTE(vish): show is to check if image is available
@@ -150,7 +150,7 @@ class GlanceImageService(service.BaseImageService):
try:
image_meta = self.client.update_image(image_id, image_meta, data)
except glance_exception.NotFound:
- raise exception.NotFound
+ raise exception.ImageNotFound(image_id=image_id)
base_image_meta = self._translate_to_base(image_meta)
return base_image_meta
@@ -158,7 +158,7 @@ class GlanceImageService(service.BaseImageService):
def delete(self, context, image_id):
"""Delete the given image.
- :raises: NotFound if the image does not exist.
+ :raises: ImageNotFound if the image does not exist.
"""
# NOTE(vish): show is to check if image is available
@@ -166,7 +166,7 @@ class GlanceImageService(service.BaseImageService):
try:
result = self.client.delete_image(image_id)
except glance_exception.NotFound:
- raise exception.NotFound
+ raise exception.ImageNotFound(image_id=image_id)
return result
def delete_all(self):
diff --git a/nova/image/local.py b/nova/image/local.py
index 50f00bee1..918180bae 100644
--- a/nova/image/local.py
+++ b/nova/image/local.py
@@ -86,10 +86,10 @@ class LocalImageService(service.BaseImageService):
with open(self._path_to(image_id)) as metadata_file:
image_meta = json.load(metadata_file)
if not self._is_image_available(context, image_meta):
- raise exception.NotFound
+ raise exception.ImageNotFound(image_id=image_id)
return image_meta
except (IOError, ValueError):
- raise exception.NotFound
+ raise exception.ImageNotFound(image_id=image_id)
def show_by_name(self, context, name):
"""Returns a dict containing image data for the given name."""
@@ -102,7 +102,7 @@ class LocalImageService(service.BaseImageService):
image = cantidate
break
if image is None:
- raise exception.NotFound
+ raise exception.ImageNotFound(image_id=name)
return image
def get(self, context, image_id, data):
@@ -113,7 +113,7 @@ class LocalImageService(service.BaseImageService):
with open(self._path_to(image_id, 'image')) as image_file:
shutil.copyfileobj(image_file, data)
except (IOError, ValueError):
- raise exception.NotFound
+ raise exception.ImageNotFound(image_id=image_id)
return metadata
def create(self, context, metadata, data=None):
@@ -143,13 +143,13 @@ class LocalImageService(service.BaseImageService):
with open(self._path_to(image_id), 'w') as metadata_file:
json.dump(metadata, metadata_file)
except (IOError, ValueError):
- raise exception.NotFound
+ raise exception.ImageNotFound(image_id=image_id)
return metadata
def delete(self, context, image_id):
"""Delete the given image.
- :raises: NotFound if the image does not exist.
+ :raises: ImageNotFound if the image does not exist.
"""
# NOTE(vish): show is to check if image is available
@@ -157,7 +157,7 @@ class LocalImageService(service.BaseImageService):
try:
shutil.rmtree(self._path_to(image_id, None))
except (IOError, ValueError):
- raise exception.NotFound
+ raise exception.ImageNotFound(image_id=image_id)
def delete_all(self):
"""Clears out all images in local directory."""
diff --git a/nova/ipv6/__init__.py b/nova/ipv6/__init__.py
new file mode 100644
index 000000000..da4567cfb
--- /dev/null
+++ b/nova/ipv6/__init__.py
@@ -0,0 +1,17 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Openstack, LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.ipv6.api import *
diff --git a/nova/ipv6/account_identifier.py b/nova/ipv6/account_identifier.py
new file mode 100644
index 000000000..258678f0a
--- /dev/null
+++ b/nova/ipv6/account_identifier.py
@@ -0,0 +1,45 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""IPv6 address generation with account identifier embedded"""
+
+import hashlib
+import netaddr
+
+
+def to_global(prefix, mac, project_id):
+ project_hash = netaddr.IPAddress(int(hashlib.sha1(project_id).\
+ hexdigest()[:8], 16) << 32)
+ static_num = netaddr.IPAddress(0xff << 24)
+
+ try:
+ mac_suffix = netaddr.EUI(mac).words[3:]
+ int_addr = int(''.join(['%02x' % i for i in mac_suffix]), 16)
+ mac_addr = netaddr.IPAddress(int_addr)
+ maskIP = netaddr.IPNetwork(prefix).ip
+ return (project_hash ^ static_num ^ mac_addr | maskIP).format()
+ except TypeError:
+ raise TypeError(_('Bad mac for to_global_ipv6: %s') % mac)
+
+
+def to_mac(ipv6_address):
+ address = netaddr.IPAddress(ipv6_address)
+ mask1 = netaddr.IPAddress('::ff:ffff')
+ mac = netaddr.EUI(int(address & mask1)).words
+ return ':'.join(['02', '16', '3e'] + ['%02x' % i for i in mac[3:6]])
diff --git a/nova/ipv6/api.py b/nova/ipv6/api.py
new file mode 100644
index 000000000..da003645a
--- /dev/null
+++ b/nova/ipv6/api.py
@@ -0,0 +1,41 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Openstack, LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import flags
+from nova import utils
+
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('ipv6_backend',
+ 'rfc2462',
+ 'Backend to use for IPv6 generation')
+
+
+def reset_backend():
+ global IMPL
+ IMPL = utils.LazyPluggable(FLAGS['ipv6_backend'],
+ rfc2462='nova.ipv6.rfc2462',
+ account_identifier='nova.ipv6.account_identifier')
+
+
+def to_global(prefix, mac, project_id):
+ return IMPL.to_global(prefix, mac, project_id)
+
+
+def to_mac(ipv6_address):
+ return IMPL.to_mac(ipv6_address)
+
+reset_backend()
diff --git a/nova/ipv6/rfc2462.py b/nova/ipv6/rfc2462.py
new file mode 100644
index 000000000..0074efe98
--- /dev/null
+++ b/nova/ipv6/rfc2462.py
@@ -0,0 +1,42 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""RFC2462 style IPv6 address generation"""
+
+import netaddr
+
+
+def to_global(prefix, mac, project_id):
+ try:
+ mac64 = netaddr.EUI(mac).eui64().words
+ int_addr = int(''.join(['%02x' % i for i in mac64]), 16)
+ mac64_addr = netaddr.IPAddress(int_addr)
+ maskIP = netaddr.IPNetwork(prefix).ip
+ return (mac64_addr ^ netaddr.IPAddress('::0200:0:0:0') | maskIP).\
+ format()
+ except TypeError:
+ raise TypeError(_('Bad mac for to_global_ipv6: %s') % mac)
+
+
+def to_mac(ipv6_address):
+ address = netaddr.IPAddress(ipv6_address)
+ mask1 = netaddr.IPAddress('::ffff:ffff:ffff:ffff')
+ mask2 = netaddr.IPAddress('::0200:0:0:0')
+ mac64 = netaddr.EUI(int(address & mask1 ^ mask2)).words
+ return ':'.join(['%02x' % i for i in mac64[0:3] + mac64[5:8]])
diff --git a/nova/network/api.py b/nova/network/api.py
index 78d39cd65..44b3c802b 100644
--- a/nova/network/api.py
+++ b/nova/network/api.py
@@ -16,9 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""
-Handles all requests relating to instances (guest vms).
-"""
+"""Handles all requests relating to instances (guest vms)."""
import pickle
@@ -26,10 +24,10 @@ from nova import db
from nova import exception
from nova import flags
from nova import log as logging
-from nova import quota
from nova import rpc
from nova.db import base
+
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.network')
@@ -38,33 +36,31 @@ class API(base.Base):
"""API for interacting with the network manager."""
def allocate_floating_ip(self, context):
- if quota.allowed_floating_ips(context, 1) < 1:
- LOG.warn(_("Quota exceeeded for %s, tried to allocate "
- "address"),
- context.project_id)
- raise quota.QuotaError(_("Address quota exceeded. You cannot "
- "allocate any more addresses"))
# NOTE(vish): We don't know which network host should get the ip
# when we allocate, so just send it to any one. This
# will probably need to move into a network supervisor
# at some point.
return rpc.call(context,
FLAGS.network_topic,
- {"method": "allocate_floating_ip",
- "args": {"project_id": context.project_id}})
+ {'method': 'allocate_floating_ip',
+ 'args': {'project_id': context.project_id}})
- def release_floating_ip(self, context, address):
+ def release_floating_ip(self, context, address,
+ affect_auto_assigned=False):
floating_ip = self.db.floating_ip_get_by_address(context, address)
+ if not affect_auto_assigned and floating_ip.get('auto_assigned'):
+ return
# NOTE(vish): We don't know which network host should get the ip
# when we deallocate, so just send it to any one. This
# will probably need to move into a network supervisor
# at some point.
rpc.cast(context,
FLAGS.network_topic,
- {"method": "deallocate_floating_ip",
- "args": {"floating_address": floating_ip['address']}})
+ {'method': 'deallocate_floating_ip',
+ 'args': {'floating_address': floating_ip['address']}})
- def associate_floating_ip(self, context, floating_ip, fixed_ip):
+ def associate_floating_ip(self, context, floating_ip, fixed_ip,
+ affect_auto_assigned=False):
"""rpc.casts to network associate_floating_ip
fixed_ip is either a fixed_ip object or a string fixed ip address
@@ -73,19 +69,21 @@ class API(base.Base):
if isinstance(fixed_ip, basestring):
fixed_ip = self.db.fixed_ip_get_by_address(context, fixed_ip)
floating_ip = self.db.floating_ip_get_by_address(context, floating_ip)
+ if not affect_auto_assigned and floating_ip.get('auto_assigned'):
+ return
# Check if the floating ip address is allocated
if floating_ip['project_id'] is None:
- raise exception.ApiError(_("Address (%s) is not allocated") %
+ raise exception.ApiError(_('Address (%s) is not allocated') %
floating_ip['address'])
# Check if the floating ip address is allocated to the same project
if floating_ip['project_id'] != context.project_id:
- LOG.warn(_("Address (%(address)s) is not allocated to your "
- "project (%(project)s)"),
+ LOG.warn(_('Address (%(address)s) is not allocated to your '
+ 'project (%(project)s)'),
{'address': floating_ip['address'],
'project': context.project_id})
- raise exception.ApiError(_("Address (%(address)s) is not "
- "allocated to your project"
- "(%(project)s)") %
+ raise exception.ApiError(_('Address (%(address)s) is not '
+ 'allocated to your project'
+ '(%(project)s)') %
{'address': floating_ip['address'],
'project': context.project_id})
# NOTE(vish): Perhaps we should just pass this on to compute and
@@ -93,47 +91,48 @@ class API(base.Base):
host = fixed_ip['network']['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.network_topic, host),
- {"method": "associate_floating_ip",
- "args": {"floating_address": floating_ip['address'],
- "fixed_address": fixed_ip['address']}})
+ {'method': 'associate_floating_ip',
+ 'args': {'floating_address': floating_ip['address'],
+ 'fixed_address': fixed_ip['address']}})
- def disassociate_floating_ip(self, context, address):
+ def disassociate_floating_ip(self, context, address,
+ affect_auto_assigned=False):
floating_ip = self.db.floating_ip_get_by_address(context, address)
+ if not affect_auto_assigned and floating_ip.get('auto_assigned'):
+ return
if not floating_ip.get('fixed_ip'):
raise exception.ApiError('Address is not associated.')
host = floating_ip['host']
rpc.call(context,
self.db.queue_get_for(context, FLAGS.network_topic, host),
- {"method": "disassociate_floating_ip",
- "args": {"floating_address": floating_ip['address']}})
+ {'method': 'disassociate_floating_ip',
+ 'args': {'floating_address': floating_ip['address']}})
def allocate_for_instance(self, context, instance, **kwargs):
"""rpc.calls network manager allocate_for_instance
- handles args and return value serialization
+ returns network info
"""
args = kwargs
- args['instance'] = pickle.dumps(instance)
- rval = rpc.call(context, FLAGS.network_topic,
+ args['instance_id'] = instance['id']
+ args['project_id'] = instance['project_id']
+ args['instance_type_id'] = instance['instance_type_id']
+ return rpc.call(context, FLAGS.network_topic,
{'method': 'allocate_for_instance',
'args': args})
- return pickle.loads(rval)
def deallocate_for_instance(self, context, instance, **kwargs):
- """rpc.casts network manager allocate_for_instance
- handles argument serialization
- """
+ """rpc.casts network manager allocate_for_instance"""
args = kwargs
- args['instance'] = pickle.dumps(instance)
+ args['instance_id'] = instance['id']
+ args['project_id'] = instance['project_id']
rpc.cast(context, FLAGS.network_topic,
{'method': 'deallocate_for_instance',
'args': args})
def get_instance_nw_info(self, context, instance):
- """rpc.calls network manager get_instance_nw_info
- handles the args and return value serialization
- """
- args = {'instance': pickle.dumps(instance)}
- rval = rpc.call(context, FLAGS.network_topic,
+ """rpc.calls network manager get_instance_nw_info"""
+ args = {'instance_id': instance['id'],
+ 'instance_type_id': instance['instance_type_id']}
+ return rpc.call(context, FLAGS.network_topic,
{'method': 'get_instance_nw_info',
'args': args})
- return pickle.loads(rval)
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index 8ee779422..51bd6f2b2 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -15,13 +15,12 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-"""
-Implements vlans, bridges, and iptables rules using linux utilities.
-"""
+"""Implements vlans, bridges, and iptables rules using linux utilities."""
+
+import calendar
import inspect
import os
-import calendar
from nova import db
from nova import exception
@@ -29,12 +28,13 @@ from nova import flags
from nova import log as logging
from nova import utils
+
LOG = logging.getLogger("nova.linux_net")
def _bin_file(script):
- """Return the absolute path to scipt in the bin directory"""
- return os.path.abspath(os.path.join(__file__, "../../../bin", script))
+ """Return the absolute path to scipt in the bin directory."""
+ return os.path.abspath(os.path.join(__file__, '../../../bin', script))
FLAGS = flags.FLAGS
@@ -56,22 +56,23 @@ flags.DEFINE_string('input_chain', 'INPUT',
'chain to add nova_input to')
flags.DEFINE_integer('dhcp_lease_time', 120,
'Lifetime of a DHCP lease')
-
flags.DEFINE_string('dns_server', None,
'if set, uses specific dns server for dnsmasq')
flags.DEFINE_string('dmz_cidr', '10.128.0.0/24',
'dmz range that should be accepted')
-
-
+flags.DEFINE_string('dnsmasq_config_file', "",
+ 'Override the default dnsmasq settings with this file')
binary_name = os.path.basename(inspect.stack()[-1][1])
class IptablesRule(object):
- """An iptables rule
+ """An iptables rule.
You shouldn't need to use this class directly, it's only used by
- IptablesManager
+ IptablesManager.
+
"""
+
def __init__(self, chain, rule, wrap=True, top=False):
self.chain = chain
self.rule = rule
@@ -96,7 +97,7 @@ class IptablesRule(object):
class IptablesTable(object):
- """An iptables table"""
+ """An iptables table."""
def __init__(self):
self.rules = []
@@ -104,15 +105,16 @@ class IptablesTable(object):
self.unwrapped_chains = set()
def add_chain(self, name, wrap=True):
- """Adds a named chain to the table
+ """Adds a named chain to the table.
The chain name is wrapped to be unique for the component creating
it, so different components of Nova can safely create identically
named chains without interfering with one another.
At the moment, its wrapped name is <binary name>-<chain name>,
- so if nova-compute creates a chain named "OUTPUT", it'll actually
- end up named "nova-compute-OUTPUT".
+ so if nova-compute creates a chain named 'OUTPUT', it'll actually
+ end up named 'nova-compute-OUTPUT'.
+
"""
if wrap:
self.chains.add(name)
@@ -120,12 +122,13 @@ class IptablesTable(object):
self.unwrapped_chains.add(name)
def remove_chain(self, name, wrap=True):
- """Remove named chain
+ """Remove named chain.
This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
If the chain is not found, this is merely logged.
+
"""
if wrap:
chain_set = self.chains
@@ -133,7 +136,7 @@ class IptablesTable(object):
chain_set = self.unwrapped_chains
if name not in chain_set:
- LOG.debug(_("Attempted to remove chain %s which doesn't exist"),
+ LOG.debug(_('Attempted to remove chain %s which does not exist'),
name)
return
@@ -148,17 +151,18 @@ class IptablesTable(object):
self.rules = filter(lambda r: jump_snippet not in r.rule, self.rules)
def add_rule(self, chain, rule, wrap=True, top=False):
- """Add a rule to the table
+ """Add a rule to the table.
This is just like what you'd feed to iptables, just without
- the "-A <chain name>" bit at the start.
+ the '-A <chain name>' bit at the start.
However, if you need to jump to one of your wrapped chains,
prepend its name with a '$' which will ensure the wrapping
is applied correctly.
+
"""
if wrap and chain not in self.chains:
- raise ValueError(_("Unknown chain: %r") % chain)
+ raise ValueError(_('Unknown chain: %r') % chain)
if '$' in rule:
rule = ' '.join(map(self._wrap_target_chain, rule.split(' ')))
@@ -171,23 +175,24 @@ class IptablesTable(object):
return s
def remove_rule(self, chain, rule, wrap=True, top=False):
- """Remove a rule from a chain
+ """Remove a rule from a chain.
Note: The rule must be exactly identical to the one that was added.
You cannot switch arguments around like you can with the iptables
CLI tool.
+
"""
try:
self.rules.remove(IptablesRule(chain, rule, wrap, top))
except ValueError:
- LOG.debug(_("Tried to remove rule that wasn't there:"
- " %(chain)r %(rule)r %(wrap)r %(top)r"),
+ LOG.debug(_('Tried to remove rule that was not there:'
+ ' %(chain)r %(rule)r %(wrap)r %(top)r'),
{'chain': chain, 'rule': rule,
'top': top, 'wrap': wrap})
class IptablesManager(object):
- """Wrapper for iptables
+ """Wrapper for iptables.
See IptablesTable for some usage docs
@@ -206,7 +211,9 @@ class IptablesManager(object):
For ipv4, the builtin PREROUTING, OUTPUT, and POSTROUTING nat chains are
wrapped in the same was as the builtin filter chains. Additionally, there's
a snat chain that is applied after the POSTROUTING chain.
+
"""
+
def __init__(self, execute=None):
if not execute:
self.execute = _execute
@@ -268,11 +275,12 @@ class IptablesManager(object):
@utils.synchronized('iptables', external=True)
def apply(self):
- """Apply the current in-memory set of iptables rules
+ """Apply the current in-memory set of iptables rules.
This will blow away any rules left over from previous runs of the
same component of Nova, and replace them with our current set of
rules. This happens atomically, thanks to iptables-restore.
+
"""
s = [('iptables', self.ipv4)]
if FLAGS.use_ipv6:
@@ -349,61 +357,65 @@ class IptablesManager(object):
def metadata_forward():
- """Create forwarding rule for metadata"""
- iptables_manager.ipv4['nat'].add_rule("PREROUTING",
- "-s 0.0.0.0/0 -d 169.254.169.254/32 "
- "-p tcp -m tcp --dport 80 -j DNAT "
- "--to-destination %s:%s" % \
+ """Create forwarding rule for metadata."""
+ iptables_manager.ipv4['nat'].add_rule('PREROUTING',
+ '-s 0.0.0.0/0 -d 169.254.169.254/32 '
+ '-p tcp -m tcp --dport 80 -j DNAT '
+ '--to-destination %s:%s' % \
(FLAGS.ec2_dmz_host, FLAGS.ec2_port))
iptables_manager.apply()
def init_host():
- """Basic networking setup goes here"""
+ """Basic networking setup goes here."""
# NOTE(devcamcar): Cloud public SNAT entries and the default
# SNAT rule for outbound traffic.
- iptables_manager.ipv4['nat'].add_rule("snat",
- "-s %s -j SNAT --to-source %s" % \
+ iptables_manager.ipv4['nat'].add_rule('snat',
+ '-s %s -j SNAT --to-source %s' % \
(FLAGS.fixed_range,
FLAGS.routing_source_ip))
- iptables_manager.ipv4['nat'].add_rule("POSTROUTING",
- "-s %s -d %s -j ACCEPT" % \
+ iptables_manager.ipv4['nat'].add_rule('POSTROUTING',
+ '-s %s -d %s -j ACCEPT' % \
(FLAGS.fixed_range, FLAGS.dmz_cidr))
- iptables_manager.ipv4['nat'].add_rule("POSTROUTING",
- "-s %(range)s -d %(range)s "
- "-j ACCEPT" % \
+ iptables_manager.ipv4['nat'].add_rule('POSTROUTING',
+ '-s %(range)s -d %(range)s '
+ '-j ACCEPT' % \
{'range': FLAGS.fixed_range})
iptables_manager.apply()
def bind_floating_ip(floating_ip, check_exit_code=True):
- """Bind ip to public interface"""
+ """Bind ip to public interface."""
_execute('sudo', 'ip', 'addr', 'add', floating_ip,
'dev', FLAGS.public_interface,
check_exit_code=check_exit_code)
def unbind_floating_ip(floating_ip):
- """Unbind a public ip from public interface"""
+ """Unbind a public ip from public interface."""
_execute('sudo', 'ip', 'addr', 'del', floating_ip,
'dev', FLAGS.public_interface)
def ensure_metadata_ip():
- """Sets up local metadata ip"""
+ """Sets up local metadata ip."""
_execute('sudo', 'ip', 'addr', 'add', '169.254.169.254/32',
'scope', 'link', 'dev', 'lo', check_exit_code=False)
def ensure_vlan_forward(public_ip, port, private_ip):
- """Sets up forwarding rules for vlan"""
- iptables_manager.ipv4['filter'].add_rule("FORWARD",
- "-d %s -p udp "
- "--dport 1194 "
- "-j ACCEPT" % private_ip)
- iptables_manager.ipv4['nat'].add_rule("PREROUTING",
+ """Sets up forwarding rules for vlan."""
+ iptables_manager.ipv4['filter'].add_rule('FORWARD',
+ '-d %s -p udp '
+ '--dport 1194 '
+ '-j ACCEPT' % private_ip)
+ iptables_manager.ipv4['nat'].add_rule('PREROUTING',
+ '-d %s -p udp '
+ '--dport %s -j DNAT --to %s:1194' %
+ (public_ip, port, private_ip))
+ iptables_manager.ipv4['nat'].add_rule("OUTPUT",
"-d %s -p udp "
"--dport %s -j DNAT --to %s:1194" %
(public_ip, port, private_ip))
@@ -411,24 +423,24 @@ def ensure_vlan_forward(public_ip, port, private_ip):
def ensure_floating_forward(floating_ip, fixed_ip):
- """Ensure floating ip forwarding rule"""
+ """Ensure floating ip forwarding rule."""
for chain, rule in floating_forward_rules(floating_ip, fixed_ip):
iptables_manager.ipv4['nat'].add_rule(chain, rule)
iptables_manager.apply()
def remove_floating_forward(floating_ip, fixed_ip):
- """Remove forwarding for floating ip"""
+ """Remove forwarding for floating ip."""
for chain, rule in floating_forward_rules(floating_ip, fixed_ip):
iptables_manager.ipv4['nat'].remove_rule(chain, rule)
iptables_manager.apply()
def floating_forward_rules(floating_ip, fixed_ip):
- return [("PREROUTING", "-d %s -j DNAT --to %s" % (floating_ip, fixed_ip)),
- ("OUTPUT", "-d %s -j DNAT --to %s" % (floating_ip, fixed_ip)),
- ("floating-snat",
- "-s %s -j SNAT --to %s" % (fixed_ip, floating_ip))]
+ return [('PREROUTING', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)),
+ ('OUTPUT', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)),
+ ('floating-snat',
+ '-s %s -j SNAT --to %s' % (fixed_ip, floating_ip))]
def ensure_vlan_bridge(vlan_num, bridge, bridge_interface, net_attrs=None):
@@ -439,9 +451,9 @@ def ensure_vlan_bridge(vlan_num, bridge, bridge_interface, net_attrs=None):
def ensure_vlan(vlan_num, bridge_interface):
"""Create a vlan unless it already exists"""
- interface = "vlan%s" % vlan_num
+ interface = 'vlan%s' % vlan_num
if not _device_exists(interface):
- LOG.debug(_("Starting VLAN inteface %s"), interface)
+ LOG.debug(_('Starting VLAN inteface %s'), interface)
_execute('sudo', 'vconfig', 'set_name_type', 'VLAN_PLUS_VID_NO_PAD')
_execute('sudo', 'vconfig', 'add', bridge_interface, vlan_num)
_execute('sudo', 'ip', 'link', 'set', interface, 'up')
@@ -461,12 +473,13 @@ def ensure_bridge(bridge, interface, net_attrs=None):
The code will attempt to move any ips that already exist on the interface
onto the bridge and reset the default gateway if necessary.
+
"""
if not _device_exists(bridge):
- LOG.debug(_("Starting Bridge interface for %s"), interface)
+ LOG.debug(_('Starting Bridge interface for %s'), interface)
_execute('sudo', 'brctl', 'addbr', bridge)
_execute('sudo', 'brctl', 'setfd', bridge, 0)
- # _execute("sudo brctl setageing %s 10" % bridge)
+ # _execute('sudo brctl setageing %s 10' % bridge)
_execute('sudo', 'brctl', 'stp', bridge, 'off')
_execute('sudo', 'ip', 'link', 'set', bridge, 'up')
if net_attrs:
@@ -474,15 +487,15 @@ def ensure_bridge(bridge, interface, net_attrs=None):
# bridge for it to respond to reqests properly
suffix = net_attrs['cidr'].rpartition('/')[2]
out, err = _execute('sudo', 'ip', 'addr', 'add',
- "%s/%s" %
+ '%s/%s' %
(net_attrs['gateway'], suffix),
'brd',
net_attrs['broadcast'],
'dev',
bridge,
check_exit_code=False)
- if err and err != "RTNETLINK answers: File exists\n":
- raise exception.Error("Failed to add ip: %s" % err)
+ if err and err != 'RTNETLINK answers: File exists\n':
+ raise exception.Error('Failed to add ip: %s' % err)
if(FLAGS.use_ipv6):
_execute('sudo', 'ip', '-f', 'inet6', 'addr',
'change', net_attrs['cidr_v6'],
@@ -498,17 +511,17 @@ def ensure_bridge(bridge, interface, net_attrs=None):
# interface, so we move any ips to the bridge
gateway = None
out, err = _execute('sudo', 'route', '-n')
- for line in out.split("\n"):
+ for line in out.split('\n'):
fields = line.split()
- if fields and fields[0] == "0.0.0.0" and fields[-1] == interface:
+ if fields and fields[0] == '0.0.0.0' and fields[-1] == interface:
gateway = fields[1]
_execute('sudo', 'route', 'del', 'default', 'gw', gateway,
'dev', interface, check_exit_code=False)
out, err = _execute('sudo', 'ip', 'addr', 'show', 'dev', interface,
'scope', 'global')
- for line in out.split("\n"):
+ for line in out.split('\n'):
fields = line.split()
- if fields and fields[0] == "inet":
+ if fields and fields[0] == 'inet':
params = fields[1:-1]
_execute(*_ip_bridge_cmd('del', params, fields[-1]))
_execute(*_ip_bridge_cmd('add', params, bridge))
@@ -519,18 +532,18 @@ def ensure_bridge(bridge, interface, net_attrs=None):
if (err and err != "device %s is already a member of a bridge; can't "
"enslave it to bridge %s.\n" % (interface, bridge)):
- raise exception.Error("Failed to add interface: %s" % err)
+ raise exception.Error('Failed to add interface: %s' % err)
- iptables_manager.ipv4['filter'].add_rule("FORWARD",
- "--in-interface %s -j ACCEPT" % \
+ iptables_manager.ipv4['filter'].add_rule('FORWARD',
+ '--in-interface %s -j ACCEPT' % \
bridge)
- iptables_manager.ipv4['filter'].add_rule("FORWARD",
- "--out-interface %s -j ACCEPT" % \
+ iptables_manager.ipv4['filter'].add_rule('FORWARD',
+ '--out-interface %s -j ACCEPT' % \
bridge)
def get_dhcp_leases(context, network_id):
- """Return a network's hosts config in dnsmasq leasefile format"""
+ """Return a network's hosts config in dnsmasq leasefile format."""
hosts = []
for fixed_ip_ref in db.network_get_associated_fixed_ips(context,
network_id):
@@ -539,7 +552,7 @@ def get_dhcp_leases(context, network_id):
def get_dhcp_hosts(context, network_id):
- """Get a string containing a network's hosts config in dhcp-host format"""
+ """Get network's hosts config in dhcp-host format."""
hosts = []
for fixed_ip_ref in db.network_get_associated_fixed_ips(context,
network_id):
@@ -552,10 +565,11 @@ def get_dhcp_hosts(context, network_id):
# aren't reloaded.
@utils.synchronized('dnsmasq_start')
def update_dhcp(context, network_id):
- """(Re)starts a dnsmasq server for a given network
+ """(Re)starts a dnsmasq server for a given network.
+
+ If a dnsmasq instance is already running then send a HUP
+ signal causing it to reload, otherwise spawn a new instance.
- if a dnsmasq instance is already running then send a HUP
- signal causing it to reload, otherwise spawn a new instance
"""
network_ref = db.network_get(context, network_id)
@@ -570,16 +584,16 @@ def update_dhcp(context, network_id):
# if dnsmasq is already running, then tell it to reload
if pid:
- out, _err = _execute('cat', "/proc/%d/cmdline" % pid,
+ out, _err = _execute('cat', '/proc/%d/cmdline' % pid,
check_exit_code=False)
if conffile in out:
try:
_execute('sudo', 'kill', '-HUP', pid)
return
except Exception as exc: # pylint: disable=W0703
- LOG.debug(_("Hupping dnsmasq threw %s"), exc)
+ LOG.debug(_('Hupping dnsmasq threw %s'), exc)
else:
- LOG.debug(_("Pid %d is stale, relaunching dnsmasq"), pid)
+ LOG.debug(_('Pid %d is stale, relaunching dnsmasq'), pid)
# FLAGFILE and DNSMASQ_INTERFACE in env
env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile,
@@ -622,18 +636,18 @@ interface %s
try:
_execute('sudo', 'kill', pid)
except Exception as exc: # pylint: disable=W0703
- LOG.debug(_("killing radvd threw %s"), exc)
+ LOG.debug(_('killing radvd threw %s'), exc)
else:
- LOG.debug(_("Pid %d is stale, relaunching radvd"), pid)
+ LOG.debug(_('Pid %d is stale, relaunching radvd'), pid)
command = _ra_cmd(network_ref)
_execute(*command)
db.network_update(context, network_id,
- {"gateway_v6":
+ {'gateway_v6':
utils.get_my_linklocal(network_ref['bridge'])})
def _host_lease(fixed_ip_ref):
- """Return a host string for an address in leasefile format"""
+ """Return a host string for an address in leasefile format."""
instance_ref = fixed_ip_ref['instance']
if instance_ref['updated_at']:
timestamp = instance_ref['updated_at']
@@ -642,43 +656,43 @@ def _host_lease(fixed_ip_ref):
seconds_since_epoch = calendar.timegm(timestamp.utctimetuple())
- return "%d %s %s %s *" % (seconds_since_epoch + FLAGS.dhcp_lease_time,
+ return '%d %s %s %s *' % (seconds_since_epoch + FLAGS.dhcp_lease_time,
fixed_ip_ref['mac_address']['address'],
fixed_ip_ref['address'],
instance_ref['hostname'] or '*')
def _host_dhcp(fixed_ip_ref):
- """Return a host string for an address in dhcp-host format"""
+ """Return a host string for an address in dhcp-host format."""
instance_ref = fixed_ip_ref['instance']
- return "%s,%s.%s,%s" % (fixed_ip_ref['mac_address']['address'],
+ return '%s,%s.%s,%s' % (fixed_ip_ref['mac_address']['address'],
instance_ref['hostname'],
FLAGS.dhcp_domain,
fixed_ip_ref['address'])
def _execute(*cmd, **kwargs):
- """Wrapper around utils._execute for fake_network"""
+ """Wrapper around utils._execute for fake_network."""
if FLAGS.fake_network:
- LOG.debug("FAKE NET: %s", " ".join(map(str, cmd)))
- return "fake", 0
+ LOG.debug('FAKE NET: %s', ' '.join(map(str, cmd)))
+ return 'fake', 0
else:
return utils.execute(*cmd, **kwargs)
def _device_exists(device):
- """Check if ethernet device exists"""
+ """Check if ethernet device exists."""
(_out, err) = _execute('ip', 'link', 'show', 'dev', device,
check_exit_code=False)
return not err
def _dnsmasq_cmd(net):
- """Builds dnsmasq command"""
+ """Builds dnsmasq command."""
cmd = ['sudo', '-E', 'dnsmasq',
'--strict-order',
'--bind-interfaces',
- '--conf-file=',
+ '--conf-file=%s' % FLAGS.dnsmasq_config_file,
'--domain=%s' % FLAGS.dhcp_domain,
'--pid-file=%s' % _dhcp_file(net['bridge'], 'pid'),
'--listen-address=%s' % net['gateway'],
@@ -693,7 +707,7 @@ def _dnsmasq_cmd(net):
def _ra_cmd(net):
- """Builds radvd command"""
+ """Builds radvd command."""
cmd = ['sudo', '-E', 'radvd',
# '-u', 'nobody',
'-C', '%s' % _ra_file(net['bridge'], 'conf'),
@@ -702,44 +716,43 @@ def _ra_cmd(net):
def _stop_dnsmasq(network):
- """Stops the dnsmasq instance for a given network"""
+ """Stops the dnsmasq instance for a given network."""
pid = _dnsmasq_pid_for(network)
if pid:
try:
_execute('sudo', 'kill', '-TERM', pid)
except Exception as exc: # pylint: disable=W0703
- LOG.debug(_("Killing dnsmasq threw %s"), exc)
+ LOG.debug(_('Killing dnsmasq threw %s'), exc)
def _dhcp_file(bridge, kind):
- """Return path to a pid, leases or conf file for a bridge"""
-
+ """Return path to a pid, leases or conf file for a bridge."""
if not os.path.exists(FLAGS.networks_path):
os.makedirs(FLAGS.networks_path)
- return os.path.abspath("%s/nova-%s.%s" % (FLAGS.networks_path,
+ return os.path.abspath('%s/nova-%s.%s' % (FLAGS.networks_path,
bridge,
kind))
def _ra_file(bridge, kind):
- """Return path to a pid or conf file for a bridge"""
+ """Return path to a pid or conf file for a bridge."""
if not os.path.exists(FLAGS.networks_path):
os.makedirs(FLAGS.networks_path)
- return os.path.abspath("%s/nova-ra-%s.%s" % (FLAGS.networks_path,
+ return os.path.abspath('%s/nova-ra-%s.%s' % (FLAGS.networks_path,
bridge,
kind))
def _dnsmasq_pid_for(bridge):
- """Returns the pid for prior dnsmasq instance for a bridge
+ """Returns the pid for prior dnsmasq instance for a bridge.
- Returns None if no pid file exists
+ Returns None if no pid file exists.
- If machine has rebooted pid might be incorrect (caller should check)
- """
+ If machine has rebooted pid might be incorrect (caller should check).
+ """
pid_file = _dhcp_file(bridge, 'pid')
if os.path.exists(pid_file):
@@ -748,13 +761,13 @@ def _dnsmasq_pid_for(bridge):
def _ra_pid_for(bridge):
- """Returns the pid for prior radvd instance for a bridge
+ """Returns the pid for prior radvd instance for a bridge.
- Returns None if no pid file exists
+ Returns None if no pid file exists.
- If machine has rebooted pid might be incorrect (caller should check)
- """
+ If machine has rebooted pid might be incorrect (caller should check).
+ """
pid_file = _ra_file(bridge, 'pid')
if os.path.exists(pid_file):
@@ -763,8 +776,7 @@ def _ra_pid_for(bridge):
def _ip_bridge_cmd(action, params, device):
- """Build commands to add/del ips to bridges/devices"""
-
+ """Build commands to add/del ips to bridges/devices."""
cmd = ['sudo', 'ip', 'addr', action]
cmd.extend(params)
cmd.extend(['dev', device])
diff --git a/nova/network/manager.py b/nova/network/manager.py
index a309cdbcb..ba5b80522 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -16,8 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""
-Network Hosts are responsible for allocating ips and setting up network.
+"""Network Hosts are responsible for allocating ips and setting up network.
There are multiple backend drivers that handle specific types of networking
topologies. All of the network commands are issued to a subclass of
@@ -58,6 +57,7 @@ from nova import exception
from nova import flags
from nova import log as logging
from nova import manager
+from nova import quota
from nova import utils
from nova import rpc
from nova.network import api as network_api
@@ -65,6 +65,8 @@ import random
LOG = logging.getLogger("nova.network.manager")
+
+
FLAGS = flags.FLAGS
flags.DEFINE_string('flat_network_bridge', 'br100',
'Bridge for simple network instances')
@@ -117,7 +119,7 @@ class RPCAllocateFixedIP(object):
used since they share code to RPC.call allocate_fixed_ip on the
correct network host to configure dnsmasq
"""
- def _allocate_fixed_ips(self, context, instance, networks, **kwargs):
+ def _allocate_fixed_ips(self, context, instance_id, networks, **kwargs):
"""calls allocate_fixed_ip once for each network"""
green_pool = greenpool.GreenPool()
for network in networks:
@@ -126,32 +128,33 @@ class RPCAllocateFixedIP(object):
topic = self.db.queue_get_for(context, FLAGS.network_topic,
network['host'])
args = kwargs
- args['instance'] = pickle.dumps(instance)
- args['network'] = pickle.dumps(network)
+ args['instance_id'] = instance_id
+ args['network_id'] = network_id
green_pool.spawn_n(rpc.call, context, topic,
{'method': '_rpc_allocate_fixed_ip',
'args': args})
else:
# i am the correct host, run here
- self.allocate_fixed_ip(context, instance, network, **kwargs)
+ self.allocate_fixed_ip(context, instance_id, network, **kwargs)
# wait for all of the allocates (if any) to finish
green_pool.waitall()
- def _rpc_allocate_fixed_ip(self, context, instance, network, **kwargs):
+ def _rpc_allocate_fixed_ip(self, context, instance_id, network_id,
+ **kwargs):
"""sits in between _allocate_fixed_ips and allocate_fixed_ip to
- perform pickling for rpc
+ perform network lookup on the far side of rpc
"""
- instance = pickle.loads(instance)
- network = pickle.loads(network)
- self.allocate_fixed_ip(context, instance, network, **kwargs)
+ network = self.db.network_get(context, network_id)
+ self.allocate_fixed_ip(context, instance_id, network, **kwargs)
class FloatingIP(object):
"""mixin class for adding floating IP functionality to a manager"""
def init_host_floating_ips(self):
"""configures floating ips owned by host"""
+
admin_context = context.get_admin_context()
floating_ips = self.db.floating_ip_get_all_by_host(admin_context,
self.host)
@@ -164,8 +167,76 @@ class FloatingIP(object):
self.driver.ensure_floating_forward(floating_ip['address'],
fixed_address)
+ def allocate_for_instance(self, context, **kwargs):
+ """handles allocating the floating IP resources for an instance
+ calls super class allocate_for_instance() as well
+
+ rpc.called by network_api
+ """
+ instance_id = kwargs.get('instance_id')
+ project_id = kwargs.get('project_id')
+ LOG.debug(_("floating IP allocation for instance |%s|"), instance_id,
+ context=context)
+ # call the next inherited class's allocate_for_instance()
+ # which is currently the NetworkManager version
+ # do this first so fixed ip is already allocated
+ super(FloatingIP, self).allocate_for_instance(context, **kwargs)
+ if FLAGS.auto_assign_floating_ip:
+ # allocate a floating ip (public_ip is just the address string)
+ public_ip = self.allocate_floating_ip(context, project_id)
+ # set auto_assigned column to true for the floating ip
+ self.db.floating_ip_set_auto_assigned(context, public_ip)
+ # get the floating ip object from public_ip string
+ floating_ip = self.db.floating_ip_get_by_address(context,
+ public_ip)
+
+ # get the first fixed_ip belonging to the instance
+ fixed_ips = self.db.fixed_ip_get_all_by_instance(context,
+ instance_id)
+ fixed_ip = fixed_ips[0] if fixed_ips else None
+
+ # call to correct network host to associate the floating ip
+ network_api.associate_floating_ip(context,
+ floating_ip,
+ fixed_ip,
+ affect_auto_assigned=True)
+
+ def deallocate_for_instance(self, context, **kwargs):
+ """handles deallocating floating IP resources for an instance
+ calls super class deallocate_for_instance() as well
+
+ rpc.called by network_api
+ """
+ instance_id = kwargs.get('instance_id')
+ LOG.debug(_("floating IP deallocation for instance |%s|"), instance_id,
+ context=context)
+
+ fixed_ips = self.db.fixed_ip_get_all_by_instance(context,
+ instance_id)
+ # add to kwargs so we can pass to super to save a db lookup there
+ kwargs['fixed_ips'] = fixed_ips
+ for fixed_ip in fixed_ips:
+ # disassociate floating ips related to fixed_ip
+ for floating_ip in fixed_ip.floating_ips:
+ address = floating_ip['address']
+ network_api.disassociate_floating_ip(context, address)
+ # deallocate if auto_assigned
+ if floating_ip['auto_assigned']:
+ network_api.release_floating_ip(context, address, True)
+
+ # call the next inherited class's deallocate_for_instance()
+ # which is currently the NetworkManager version
+ # call this after so floating IPs are handled first
+ super(FloatingIP, self).deallocate_for_instance(context, **kwargs)
+
def allocate_floating_ip(self, context, project_id):
"""Gets an floating ip from the pool."""
+ if quota.allowed_floating_ips(context, 1) < 1:
+ LOG.warn(_('Quota exceeeded for %s, tried to allocate '
+ 'address'),
+ context.project_id)
+ raise quota.QuotaError(_('Address quota exceeded. You cannot '
+ 'allocate any more addresses'))
# TODO(vish): add floating ips through manage command
return self.db.floating_ip_allocate_address(context,
self.host,
@@ -203,6 +274,7 @@ class NetworkManager(manager.SchedulerDependentManager):
as the hosts pick them up one at time during their periodic task.
The one at a time part is to flatten the layout to help scale
"""
+
timeout_fixed_ips = True
def __init__(self, network_driver=None, *args, **kwargs):
@@ -233,14 +305,14 @@ class NetworkManager(manager.SchedulerDependentManager):
self.host,
time)
if num:
- LOG.debug(_("Dissassociated %s stale fixed ip(s)"), num)
+ LOG.debug(_('Dissassociated %s stale fixed ip(s)'), num)
# setup any new networks which have been created
self.set_network_hosts(context)
def set_network_host(self, context, network_id):
"""Safely sets the host of the network."""
- LOG.debug(_("setting network host"), context=context)
+ LOG.debug(_('setting network host'), context=context)
host = self.db.network_set_host(context,
network_id,
self.host)
@@ -257,7 +329,7 @@ class NetworkManager(manager.SchedulerDependentManager):
# return so worker will only grab 1 (to help scale flatter)
return self.set_network_host(context, network['id'])
- def _get_networks_for_instance(self, context, instance=None):
+ def _get_networks_for_instance(self, context, instance_id, project_id):
"""determine which networks an instance should connect to"""
# TODO(tr3buchet) maybe this needs to be updated in the future if
# there is a better way to determine which networks
@@ -268,73 +340,58 @@ class NetworkManager(manager.SchedulerDependentManager):
return [network for network in networks if
not network['vlan'] and network['host']]
- def allocate_for_instance(self, context, instance, **kwargs):
+ def allocate_for_instance(self, context, **kwargs):
"""handles allocating the various network resources for an instance
rpc.called by network_api
- instance expected to be pickled
- return value is also pickled
"""
- instance = pickle.loads(instance)
- LOG.debug(_("network allocations for instance %s"), instance['id'],
- context=context)
+ instance_id = kwargs.get('instance_id')
+ project_id = kwargs.get('project_id')
+ type_id = kwargs.get('instance_type_id')
admin_context = context.elevated()
- networks = self._get_networks_for_instance(admin_context, instance)
- self._allocate_mac_addresses(context, instance, networks)
- self._allocate_fixed_ips(admin_context, instance, networks, **kwargs)
- return pickle.dumps(self._create_nw_info(admin_context, instance))
-
- def deallocate_for_instance(self, context, instance, **kwargs):
+ LOG.debug(_("network allocations for instance %s"), instance_id,
+ context=context)
+ networks = self._get_networks_for_instance(admin_context, instance_id,
+ project_id)
+ self._allocate_mac_addresses(context, instance_id, networks)
+ self._allocate_fixed_ips(admin_context, instance_id, networks,
+ **kwargs)
+ return self.get_instance_nw_info(context, instance_id, type_id)
+
+ def deallocate_for_instance(self, context, **kwargs):
"""handles deallocating various network resources for an instance
rpc.called by network_api
- instance is expected to be pickled
+ kwargs can contain fixed_ips to circumvent another db lookup
"""
- instance = pickle.loads(instance)
- LOG.debug(_("network deallocation for instance |%s|"), instance['id'],
+ instance_id = kwargs.get('instance_id')
+ fixed_ips = kwargs.get('fixed_ips') or \
+ self.db.fixed_ip_get_all_by_instance(context, instance_id)
+ LOG.debug(_("network deallocation for instance |%s|"), instance_id,
context=context)
-
# deallocate mac addresses
self.db.mac_address_delete_by_instance(context, instance_id)
# deallocate fixed ips
- for fixed_ip in self.db.fixed_ip_get_all_by_instance(context,
- instance['id']):
- # disassociate floating ips related to fixed_ip
- for floating_ip in fixed_ip.floating_ips:
- network_api.disassociate_floating_ip(context,
- floating_ip['address'])
- # then deallocate fixed_ip
+ for fixed_ip in fixed_ips:
self.deallocate_fixed_ip(context, fixed_ip['address'], **kwargs)
- def get_instance_nw_info(self, context, instance):
- """unpickles instance and calls _create_nw_info
- then pickles its return for passing through rpc
-
- rpc.called by network_api
- instance is expected to be pickled
- return value is also pickled
- """
- instance = pickle.loads(instance)
- LOG.debug(_("getting network info for instance |%s|"), instance['id'])
- admin_context = context.elevated()
- return pickle.dumps(self._create_nw_info(admin_context, instance))
-
- def _create_nw_info(self, context, instance):
+ def get_instance_nw_info(self, context, instance_id, instance_type_id):
"""creates network info list for instance
- called by allocate_for_instance and get_instance_nw_info
+ called by allocate_for_instance and netowrk_api
context needs to be elevated
returns network info list [(network,info),(network,info)...]
- where network is a db object and info is a dict
+ where network = dict containing pertinent data from a network db object
+ and info = dict containing pertinent networking data
"""
# TODO(tr3buchet) should handle floating IPs as well?
fixed_ips = self.db.fixed_ip_get_all_by_instance(context,
- instance['id'])
+ instance_id)
mac_addresses = self.db.mac_address_get_all_by_instance(context,
- instance['id'])
+ instance_id)
flavor = self.db.instance_type_get_by_id(context,
- instance['instance_type_id'])
+ instance_type_id)
network_info = []
# a mac_address contains address, instance_id, network_id
# it is also joined to the instance and network given by those IDs
@@ -358,7 +415,8 @@ class NetworkManager(manager.SchedulerDependentManager):
mac_address['address']),
"netmask": network['netmask_v6'],
"enabled": "1"}
-
+ network_dict = {
+ 'bridge': network['bridge']}
info = {
'label': network['label'],
'gateway': network['gateway'],
@@ -372,14 +430,14 @@ class NetworkManager(manager.SchedulerDependentManager):
# TODO(tr3buchet): handle ip6 routes here as well
if network['gateway_v6']:
info['gateway6'] = network['gateway_v6']
- network_info.append((network, info))
+ network_info.append((network_dict, info))
return network_info
- def _allocate_mac_addresses(self, context, instance, networks):
+ def _allocate_mac_addresses(self, context, instance_id, networks):
"""generates and stores mac addresses"""
for network in networks:
mac_address = {'address': self.generate_mac_address(),
- 'instance_id': instance['id'],
+ 'instance_id': instance_id,
'network_id': network['id']}
# try 5 times to create a unique mac_address
for i in range(5):
@@ -400,7 +458,7 @@ class NetworkManager(manager.SchedulerDependentManager):
random.randint(0x00, 0xff)]
return ':'.join(map(lambda x: "%02x" % x, mac))
- def allocate_fixed_ip(self, context, instance, network, **kwargs):
+ def allocate_fixed_ip(self, context, instance_id, network, **kwargs):
"""Gets a fixed ip from the pool."""
# TODO(vish): when this is called by compute, we can associate compute
# with a network, or a cluster of computes with a network
@@ -408,9 +466,9 @@ class NetworkManager(manager.SchedulerDependentManager):
# network_get_by_compute_host
address = self.db.fixed_ip_associate_pool(context.elevated(),
network['id'],
- instance['id'])
+ instance_id)
mac = self.db.mac_address_get_by_instance_and_network(context,
- instance['id'],
+ instance_id,
network['id'])
values = {'allocated': True,
'mac_address_id': mac['id']}
@@ -423,39 +481,39 @@ class NetworkManager(manager.SchedulerDependentManager):
def lease_fixed_ip(self, context, mac, address):
"""Called by dhcp-bridge when ip is leased."""
- LOG.debug(_("Leasing IP %s"), address, context=context)
+ LOG.debug(_('Leasing IP %s'), address, context=context)
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
instance_ref = fixed_ip_ref['instance']
if not instance_ref:
- raise exception.Error(_("IP %s leased that isn't associated") %
+ raise exception.Error(_('IP %s leased that is not associated') %
address)
mac_address = fixed_ip_ref['mac_address']['address']
if mac_address != mac:
- raise exception.Error(_("IP %(address)s leased to bad"
- " mac %(mac_address)s vs %(mac)s") % locals())
+ raise exception.Error(_('IP %(address)s leased to bad'
+ ' mac %(mac_address)s vs %(mac)s') % locals())
now = datetime.datetime.utcnow()
self.db.fixed_ip_update(context,
fixed_ip_ref['address'],
{'leased': True,
'updated_at': now})
if not fixed_ip_ref['allocated']:
- LOG.warn(_("IP %s leased that was already deallocated"), address,
+ LOG.warn(_('IP %s leased that was already deallocated'), address,
context=context)
def release_fixed_ip(self, context, mac, address):
"""Called by dhcp-bridge when ip is released."""
- LOG.debug(_("Releasing IP %s"), address, context=context)
+ LOG.debug(_('Releasing IP %s'), address, context=context)
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
instance_ref = fixed_ip_ref['instance']
if not instance_ref:
- raise exception.Error(_("IP %s released that isn't associated") %
+ raise exception.Error(_('IP %s released that is not associated') %
address)
mac_address = fixed_ip_ref['mac_address']['address']
if mac_address != mac:
- raise exception.Error(_("IP %(address)s released from"
- " bad mac %(mac_address)s vs %(mac)s") % locals())
+ raise exception.Error(_('IP %(address)s released from'
+ ' bad mac %(mac_address)s vs %(mac)s') % locals())
if not fixed_ip_ref['leased']:
- LOG.warn(_("IP %s released that was not leased"), address,
+ LOG.warn(_('IP %s released that was not leased'), address,
context=context)
self.db.fixed_ip_update(context,
fixed_ip_ref['address'],
@@ -480,7 +538,7 @@ class NetworkManager(manager.SchedulerDependentManager):
start = index * network_size
start_v6 = index * network_size_v6
significant_bits = 32 - int(math.log(network_size, 2))
- cidr = "%s/%s" % (fixed_net[start], significant_bits)
+ cidr = '%s/%s' % (fixed_net[start], significant_bits)
project_net = IPy.IP(cidr)
net = {}
net['bridge'] = bridge
@@ -492,12 +550,12 @@ class NetworkManager(manager.SchedulerDependentManager):
net['broadcast'] = str(project_net.broadcast())
net['dhcp_start'] = str(project_net[2])
if num_networks > 1:
- net['label'] = "%s_%d" % (label, index)
+ net['label'] = '%s_%d' % (label, index)
else:
net['label'] = label
if FLAGS.use_ipv6:
- cidr_v6 = "%s/%s" % (fixed_net_v6[start_v6],
+ cidr_v6 = '%s/%s' % (fixed_net_v6[start_v6],
significant_bits_v6)
net['cidr_v6'] = cidr_v6
project_net_v6 = IPy.IP(cidr_v6)
@@ -555,7 +613,7 @@ class NetworkManager(manager.SchedulerDependentManager):
'address': address,
'reserved': reserved})
- def _allocate_fixed_ips(self, context, instance, networks, **kwargs):
+ def _allocate_fixed_ips(self, context, instance_id, networks, **kwargs):
"""calls allocate_fixed_ip once for each network"""
raise NotImplementedError()
@@ -596,13 +654,15 @@ class FlatManager(NetworkManager):
Metadata forwarding must be handled by the gateway, and since nova does
not do any setup in this mode, it must be done manually. Requests to
169.254.169.254 port 80 will need to be forwarded to the api server.
+
"""
+
timeout_fixed_ips = False
- def _allocate_fixed_ips(self, context, instance, networks, **kwargs):
+ def _allocate_fixed_ips(self, context, instance_id, networks, **kwargs):
"""calls allocate_fixed_ip once for each network"""
for network in networks:
- self.allocate_fixed_ip(context, instance, network, **kwargs)
+ self.allocate_fixed_ip(context, instance_id, network, **kwargs)
def deallocate_fixed_ip(self, context, address, **kwargs):
"""Returns a fixed ip to the pool."""
@@ -612,8 +672,7 @@ class FlatManager(NetworkManager):
def setup_compute_network(self, context, instance_id):
"""Network is created manually.
- this code is run on and by the compute host, not on network
- hosts
+ this code is run on and by the compute host, not on network hosts
"""
pass
@@ -625,12 +684,13 @@ class FlatManager(NetworkManager):
self.db.network_update(context, network_id, net)
-class FlatDHCPManager(NetworkManager, RPCAllocateFixedIP, FloatingIP):
+class FlatDHCPManager(FloatingIP, RPCAllocateFixedIP, NetworkManager):
"""Flat networking with dhcp.
FlatDHCPManager will start up one dhcp server to give out addresses.
It never injects network settings into the guest. Otherwise it behaves
like FlatDHCPManager.
+
"""
def init_host(self):
@@ -647,18 +707,17 @@ class FlatDHCPManager(NetworkManager, RPCAllocateFixedIP, FloatingIP):
def setup_compute_network(self, context, instance_id):
"""Sets up matching networks for compute hosts.
- this code is run on and by the compute host, not on network
- hosts
+ this code is run on and by the compute host, not on network hosts
"""
networks = db.network_get_all_by_instance(context, instance_id)
for network in networks:
self.driver.ensure_bridge(network['bridge'],
network['bridge_interface'])
- def allocate_fixed_ip(self, context, instance, network, **kwargs):
+ def allocate_fixed_ip(self, context, instance_id, network, **kwargs):
"""Allocate flat_network fixed_ip, then setup dhcp for this network."""
address = super(FlatDHCPManager, self).allocate_fixed_ip(context,
- instance,
+ instance_id,
network,
**kwargs)
if not FLAGS.fake_network:
@@ -691,6 +750,7 @@ class VlanManager(NetworkManager, RPCAllocateFixedIP, FloatingIP):
A dhcp server is run for each subnet, so each project will have its own.
For this mode to be useful, each project will need a vpn to access the
instances in its subnet.
+
"""
def init_host(self):
@@ -706,19 +766,19 @@ class VlanManager(NetworkManager, RPCAllocateFixedIP, FloatingIP):
self.driver.metadata_forward()
- def allocate_fixed_ip(self, context, instance, network, **kwargs):
+ def allocate_fixed_ip(self, context, instance_id, network, **kwargs):
"""Gets a fixed ip from the pool."""
if kwargs.get('vpn', None):
address = network['vpn_private_address']
self.db.fixed_ip_associate(context,
address,
- instance['id'])
+ instance_id)
else:
address = self.db.fixed_ip_associate_pool(context,
network['id'],
- instance['id'])
+ instance_id)
mac = self.db.mac_address_get_by_instance_and_network(context,
- instance['id'],
+ instance_id,
network['id'])
values = {'allocated': True,
'mac_address_id': mac['id']}
@@ -728,8 +788,7 @@ class VlanManager(NetworkManager, RPCAllocateFixedIP, FloatingIP):
def setup_compute_network(self, context, instance_id):
"""Sets up matching network for compute hosts.
- this code is run on and by the compute host, not on network
- hosts
+ this code is run on and by the compute host, not on network hosts
"""
networks = self.db.network_get_all_by_instance(context, instance_id)
for network in networks:
@@ -737,11 +796,10 @@ class VlanManager(NetworkManager, RPCAllocateFixedIP, FloatingIP):
network['bridge'],
network['bridge_interface'])
- def _get_networks_for_instance(self, context, instance):
+ def _get_networks_for_instance(self, context, instance_id, project_id):
"""determine which networks an instance should connect to"""
# get networks associated with project
- networks = self.db.project_get_networks(context,
- instance['project_id'])
+ networks = self.db.project_get_networks(context, project_id)
# return only networks which have host set
return [network for network in networks if network['host']]
diff --git a/nova/network/vmwareapi_net.py b/nova/network/vmwareapi_net.py
index 5c94ab155..00d41eee2 100644
--- a/nova/network/vmwareapi_net.py
+++ b/nova/network/vmwareapi_net.py
@@ -15,9 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""
-Implements vlans for vmwareapi.
-"""
+"""Implements vlans for vmwareapi."""
from nova import db
from nova import exception
@@ -27,8 +25,10 @@ from nova import utils
from nova.virt.vmwareapi_conn import VMWareAPISession
from nova.virt.vmwareapi import network_utils
+
LOG = logging.getLogger("nova.network.vmwareapi_net")
+
FLAGS = flags.FLAGS
flags.DEFINE_string('vlan_interface', 'vmnic0',
'Physical network adapter name in VMware ESX host for '
@@ -42,26 +42,23 @@ def ensure_vlan_bridge(vlan_num, bridge, bridge_interface, net_attrs=None):
host_username = FLAGS.vmwareapi_host_username
host_password = FLAGS.vmwareapi_host_password
if not host_ip or host_username is None or host_password is None:
- raise Exception(_("Must specify vmwareapi_host_ip,"
- "vmwareapi_host_username "
- "and vmwareapi_host_password to use"
- "connection_type=vmwareapi"))
+ raise Exception(_('Must specify vmwareapi_host_ip, '
+ 'vmwareapi_host_username '
+ 'and vmwareapi_host_password to use '
+ 'connection_type=vmwareapi'))
session = VMWareAPISession(host_ip, host_username, host_password,
FLAGS.vmwareapi_api_retry_count)
vlan_interface = bridge_interface
# Check if the vlan_interface physical network adapter exists on the host
if not network_utils.check_if_vlan_interface_exists(session,
vlan_interface):
- raise exception.NotFound(_("There is no physical network adapter with "
- "the name %s on the ESX host") % vlan_interface)
+ raise exception.NetworkAdapterNotFound(adapter=vlan_interface)
# Get the vSwitch associated with the Physical Adapter
vswitch_associated = network_utils.get_vswitch_for_vlan_interface(
session, vlan_interface)
if vswitch_associated is None:
- raise exception.NotFound(_("There is no virtual switch associated "
- "with the physical network adapter with name %s") %
- vlan_interface)
+ raise exception.SwicthNotFoundForNetworkAdapter(adapter=vlan_interface)
# Check whether bridge already exists and retrieve the the ref of the
# network whose name_label is "bridge"
network_ref = network_utils.get_network_with_the_name(session, bridge)
@@ -75,17 +72,13 @@ def ensure_vlan_bridge(vlan_num, bridge, bridge_interface, net_attrs=None):
pg_vlanid, pg_vswitch = \
network_utils.get_vlanid_and_vswitch_for_portgroup(session, bridge)
- # Check if the vsiwtch associated is proper
+ # Check if the vswitch associated is proper
if pg_vswitch != vswitch_associated:
- raise exception.Invalid(_("vSwitch which contains the port group "
- "%(bridge)s is not associated with the desired "
- "physical adapter. Expected vSwitch is "
- "%(vswitch_associated)s, but the one associated"
- " is %(pg_vswitch)s") % locals())
+ raise exception.InvalidVLANPortGroup(bridge=bridge,
+ expected=vswitch_associated,
+ actual=pg_vswitch)
# Check if the vlan id is proper for the port group
if pg_vlanid != vlan_num:
- raise exception.Invalid(_("VLAN tag is not appropriate for the "
- "port group %(bridge)s. Expected VLAN tag is "
- "%(vlan_num)s, but the one associated with the "
- "port group is %(pg_vlanid)s") % locals())
+ raise exception.InvalidVLANTag(bridge=bridge, tag=vlan_num,
+ pgroup=pg_vlanid)
diff --git a/nova/network/xenapi_net.py b/nova/network/xenapi_net.py
index 6d16be3e9..34a598ead 100644
--- a/nova/network/xenapi_net.py
+++ b/nova/network/xenapi_net.py
@@ -15,9 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""
-Implements vlans, bridges, and iptables rules using linux utilities.
-"""
+"""Implements vlans, bridges, and iptables rules using linux utilities."""
import os
@@ -26,22 +24,24 @@ from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
-from nova.virt.xenapi_conn import XenAPISession
+from nova.virt import xenapi_conn
from nova.virt.xenapi import network_utils
+
LOG = logging.getLogger("nova.xenapi_net")
+
FLAGS = flags.FLAGS
def ensure_vlan_bridge(vlan_num, bridge, bridge_interface, net_attrs=None):
"""Create a vlan and bridge unless they already exist."""
# Open xenapi session
- LOG.debug("ENTERING ensure_vlan_bridge in xenapi net")
+ LOG.debug('ENTERING ensure_vlan_bridge in xenapi net')
url = FLAGS.xenapi_connection_url
username = FLAGS.xenapi_connection_username
password = FLAGS.xenapi_connection_password
- session = XenAPISession(url, username, password)
+ session = xenapi_conn.XenAPISession(url, username, password)
# Check whether bridge already exists
# Retrieve network whose name_label is "bridge"
network_ref = network_utils.NetworkHelper.find_network_with_name_label(
@@ -50,14 +50,14 @@ def ensure_vlan_bridge(vlan_num, bridge, bridge_interface, net_attrs=None):
if network_ref is None:
# If bridge does not exists
# 1 - create network
- description = "network for nova bridge %s" % bridge
+ description = 'network for nova bridge %s' % bridge
network_rec = {'name_label': bridge,
'name_description': description,
'other_config': {}}
network_ref = session.call_xenapi('network.create', network_rec)
# 2 - find PIF for VLAN
- expr = 'field "device" = "%s" and \
- field "VLAN" = "-1"' % bridge_interface
+ expr = "field 'device' = '%s' and \
+ field 'VLAN' = '-1'" % bridge_interface
pifs = session.call_xenapi('PIF.get_all_records_where', expr)
pif_ref = None
# Multiple PIF are ok: we are dealing with a pool
diff --git a/nova/notifier/__init__.py b/nova/notifier/__init__.py
new file mode 100644
index 000000000..482d54e4f
--- /dev/null
+++ b/nova/notifier/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
diff --git a/nova/notifier/api.py b/nova/notifier/api.py
new file mode 100644
index 000000000..a3e7a039e
--- /dev/null
+++ b/nova/notifier/api.py
@@ -0,0 +1,83 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.import datetime
+
+import datetime
+import uuid
+
+from nova import flags
+from nova import utils
+
+
+FLAGS = flags.FLAGS
+
+flags.DEFINE_string('default_notification_level', 'INFO',
+ 'Default notification level for outgoing notifications')
+
+WARN = 'WARN'
+INFO = 'INFO'
+ERROR = 'ERROR'
+CRITICAL = 'CRITICAL'
+DEBUG = 'DEBUG'
+
+log_levels = (DEBUG, WARN, INFO, ERROR, CRITICAL)
+
+
+class BadPriorityException(Exception):
+ pass
+
+
+def notify(publisher_id, event_type, priority, payload):
+ """
+ Sends a notification using the specified driver
+
+ Notify parameters:
+
+ publisher_id - the source worker_type.host of the message
+ event_type - the literal type of event (ex. Instance Creation)
+ priority - patterned after the enumeration of Python logging levels in
+ the set (DEBUG, WARN, INFO, ERROR, CRITICAL)
+ payload - A python dictionary of attributes
+
+ Outgoing message format includes the above parameters, and appends the
+ following:
+
+ message_id - a UUID representing the id for this notification
+ timestamp - the GMT timestamp the notification was sent at
+
+ The composite message will be constructed as a dictionary of the above
+ attributes, which will then be sent via the transport mechanism defined
+ by the driver.
+
+ Message example:
+
+ {'message_id': str(uuid.uuid4()),
+ 'publisher_id': 'compute.host1',
+ 'timestamp': datetime.datetime.utcnow(),
+ 'priority': 'WARN',
+ 'event_type': 'compute.create_instance',
+ 'payload': {'instance_id': 12, ... }}
+
+ """
+ if priority not in log_levels:
+ raise BadPriorityException(
+ _('%s not in valid priorities' % priority))
+ driver = utils.import_object(FLAGS.notification_driver)
+ msg = dict(message_id=str(uuid.uuid4()),
+ publisher_id=publisher_id,
+ event_type=event_type,
+ priority=priority,
+ payload=payload,
+ timestamp=str(datetime.datetime.utcnow()))
+ driver.notify(msg)
diff --git a/nova/notifier/log_notifier.py b/nova/notifier/log_notifier.py
new file mode 100644
index 000000000..25dfc693b
--- /dev/null
+++ b/nova/notifier/log_notifier.py
@@ -0,0 +1,34 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+
+from nova import flags
+from nova import log as logging
+
+
+FLAGS = flags.FLAGS
+
+
+def notify(message):
+ """Notifies the recipient of the desired event given the model.
+ Log notifications using nova's default logging system"""
+
+ priority = message.get('priority',
+ FLAGS.default_notification_level)
+ priority = priority.lower()
+ logger = logging.getLogger(
+ 'nova.notification.%s' % message['event_type'])
+ getattr(logger, priority)(json.dumps(message))
diff --git a/nova/notifier/no_op_notifier.py b/nova/notifier/no_op_notifier.py
new file mode 100644
index 000000000..029710505
--- /dev/null
+++ b/nova/notifier/no_op_notifier.py
@@ -0,0 +1,19 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+def notify(message):
+ """Notifies the recipient of the desired event given the model"""
+ pass
diff --git a/nova/notifier/rabbit_notifier.py b/nova/notifier/rabbit_notifier.py
new file mode 100644
index 000000000..d46670b58
--- /dev/null
+++ b/nova/notifier/rabbit_notifier.py
@@ -0,0 +1,36 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import nova.context
+
+from nova import flags
+from nova import rpc
+
+
+FLAGS = flags.FLAGS
+
+flags.DEFINE_string('notification_topic', 'notifications',
+ 'RabbitMQ topic used for Nova notifications')
+
+
+def notify(message):
+ """Sends a notification to the RabbitMQ"""
+ context = nova.context.get_admin_context()
+ priority = message.get('priority',
+ FLAGS.default_notification_level)
+ priority = priority.lower()
+ topic = '%s.%s' % (FLAGS.notification_topic, priority)
+ rpc.cast(context, topic, message)
diff --git a/nova/quota.py b/nova/quota.py
index d8b5d9a93..a93cd0766 100644
--- a/nova/quota.py
+++ b/nova/quota.py
@@ -52,26 +52,31 @@ def get_quota(context, project_id):
'floating_ips': FLAGS.quota_floating_ips,
'metadata_items': FLAGS.quota_metadata_items}
- try:
- quota = db.quota_get(context, project_id)
- for key in rval.keys():
- if quota[key] is not None:
- rval[key] = quota[key]
- except exception.NotFound:
- pass
+ quota = db.quota_get_all_by_project(context, project_id)
+ for key in rval.keys():
+ if key in quota:
+ rval[key] = quota[key]
return rval
+def _get_request_allotment(requested, used, quota):
+ if quota is None:
+ return requested
+ return quota - used
+
+
def allowed_instances(context, num_instances, instance_type):
"""Check quota and return min(num_instances, allowed_instances)."""
project_id = context.project_id
context = context.elevated()
+ num_cores = num_instances * instance_type['vcpus']
used_instances, used_cores = db.instance_data_get_for_project(context,
project_id)
quota = get_quota(context, project_id)
- allowed_instances = quota['instances'] - used_instances
- allowed_cores = quota['cores'] - used_cores
- num_cores = num_instances * instance_type['vcpus']
+ allowed_instances = _get_request_allotment(num_instances, used_instances,
+ quota['instances'])
+ allowed_cores = _get_request_allotment(num_cores, used_cores,
+ quota['cores'])
allowed_instances = min(allowed_instances,
int(allowed_cores // instance_type['vcpus']))
return min(num_instances, allowed_instances)
@@ -81,13 +86,15 @@ def allowed_volumes(context, num_volumes, size):
"""Check quota and return min(num_volumes, allowed_volumes)."""
project_id = context.project_id
context = context.elevated()
+ size = int(size)
+ num_gigabytes = num_volumes * size
used_volumes, used_gigabytes = db.volume_data_get_for_project(context,
project_id)
quota = get_quota(context, project_id)
- allowed_volumes = quota['volumes'] - used_volumes
- allowed_gigabytes = quota['gigabytes'] - used_gigabytes
- size = int(size)
- num_gigabytes = num_volumes * size
+ allowed_volumes = _get_request_allotment(num_volumes, used_volumes,
+ quota['volumes'])
+ allowed_gigabytes = _get_request_allotment(num_gigabytes, used_gigabytes,
+ quota['gigabytes'])
allowed_volumes = min(allowed_volumes,
int(allowed_gigabytes // size))
return min(num_volumes, allowed_volumes)
@@ -99,7 +106,9 @@ def allowed_floating_ips(context, num_floating_ips):
context = context.elevated()
used_floating_ips = db.floating_ip_count_by_project(context, project_id)
quota = get_quota(context, project_id)
- allowed_floating_ips = quota['floating_ips'] - used_floating_ips
+ allowed_floating_ips = _get_request_allotment(num_floating_ips,
+ used_floating_ips,
+ quota['floating_ips'])
return min(num_floating_ips, allowed_floating_ips)
@@ -108,8 +117,9 @@ def allowed_metadata_items(context, num_metadata_items):
project_id = context.project_id
context = context.elevated()
quota = get_quota(context, project_id)
- num_allowed_metadata_items = quota['metadata_items']
- return min(num_metadata_items, num_allowed_metadata_items)
+ allowed_metadata_items = _get_request_allotment(num_metadata_items, 0,
+ quota['metadata_items'])
+ return min(num_metadata_items, allowed_metadata_items)
def allowed_injected_files(context):
diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py
index 6bb3bf3cd..816ae5513 100644
--- a/nova/scheduler/api.py
+++ b/nova/scheduler/api.py
@@ -76,11 +76,9 @@ def zone_update(context, zone_id, data):
return db.zone_update(context, zone_id, data)
-def get_zone_capabilities(context, service=None):
- """Returns a dict of key, value capabilities for this zone,
- or for a particular class of services running in this zone."""
- return _call_scheduler('get_zone_capabilities', context=context,
- params=dict(service=service))
+def get_zone_capabilities(context):
+ """Returns a dict of key, value capabilities for this zone."""
+ return _call_scheduler('get_zone_capabilities', context=context)
def update_service_capabilities(context, service_name, host, capabilities):
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index ce05d9f6a..2094e3565 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -129,15 +129,14 @@ class Scheduler(object):
if (power_state.RUNNING != instance_ref['state'] or \
'running' != instance_ref['state_description']):
ec2_id = instance_ref['hostname']
- raise exception.Invalid(_('Instance(%s) is not running') % ec2_id)
+ raise exception.InstanceNotRunning(instance_id=ec2_id)
# Checing volume node is running when any volumes are mounted
# to the instance.
if len(instance_ref['volumes']) != 0:
services = db.service_get_all_by_topic(context, 'volume')
if len(services) < 1 or not self.service_is_up(services[0]):
- raise exception.Invalid(_("volume node is not alive"
- "(time synchronize problem?)"))
+ raise exception.VolumeServiceUnavailable()
# Checking src host exists and compute node
src = instance_ref['host']
@@ -145,8 +144,7 @@ class Scheduler(object):
# Checking src host is alive.
if not self.service_is_up(services[0]):
- raise exception.Invalid(_("%s is not alive(time "
- "synchronize problem?)") % src)
+ raise exception.ComputeServiceUnavailable(host=src)
def _live_migration_dest_check(self, context, instance_ref, dest):
"""Live migration check routine (for destination host).
@@ -163,17 +161,15 @@ class Scheduler(object):
# Checking dest host is alive.
if not self.service_is_up(dservice_ref):
- raise exception.Invalid(_("%s is not alive(time "
- "synchronize problem?)") % dest)
+ raise exception.ComputeServiceUnavailable(host=dest)
# Checking whether The host where instance is running
# and dest is not same.
src = instance_ref['host']
if dest == src:
ec2_id = instance_ref['hostname']
- raise exception.Invalid(_("%(dest)s is where %(ec2_id)s is "
- "running now. choose other host.")
- % locals())
+ raise exception.UnableToMigrateToSelf(instance_id=ec2_id,
+ host=dest)
# Checking dst host still has enough capacities.
self.assert_compute_node_has_enough_resources(context,
@@ -204,26 +200,20 @@ class Scheduler(object):
oservice_refs = db.service_get_all_compute_by_host(context,
instance_ref['launched_on'])
except exception.NotFound:
- raise exception.Invalid(_("host %s where instance was launched "
- "does not exist.")
- % instance_ref['launched_on'])
+ raise exception.SourceHostUnavailable()
oservice_ref = oservice_refs[0]['compute_node'][0]
# Checking hypervisor is same.
orig_hypervisor = oservice_ref['hypervisor_type']
dest_hypervisor = dservice_ref['hypervisor_type']
if orig_hypervisor != dest_hypervisor:
- raise exception.Invalid(_("Different hypervisor type"
- "(%(orig_hypervisor)s->"
- "%(dest_hypervisor)s)')" % locals()))
+ raise exception.InvalidHypervisorType()
# Checkng hypervisor version.
orig_hypervisor = oservice_ref['hypervisor_version']
dest_hypervisor = dservice_ref['hypervisor_version']
if orig_hypervisor > dest_hypervisor:
- raise exception.Invalid(_("Older hypervisor version"
- "(%(orig_hypervisor)s->"
- "%(dest_hypervisor)s)") % locals())
+ raise exception.DestinationHypervisorTooOld()
# Checking cpuinfo.
try:
@@ -265,11 +255,9 @@ class Scheduler(object):
mem_avail = mem_total - mem_used
mem_inst = instance_ref['memory_mb']
if mem_avail <= mem_inst:
- raise exception.NotEmpty(_("Unable to migrate %(ec2_id)s "
- "to destination: %(dest)s "
- "(host:%(mem_avail)s "
- "<= instance:%(mem_inst)s)")
- % locals())
+ reason = _("Unable to migrate %(ec2_id)s to destination: %(dest)s "
+ "(host:%(mem_avail)s <= instance:%(mem_inst)s)")
+ raise exception.MigrationError(reason=reason % locals())
def mounted_on_same_shared_storage(self, context, instance_ref, dest):
"""Check if the src and dest host mount same shared storage.
diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py
new file mode 100644
index 000000000..483f3225c
--- /dev/null
+++ b/nova/scheduler/host_filter.py
@@ -0,0 +1,288 @@
+# Copyright (c) 2011 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Host Filter is a driver mechanism for requesting instance resources.
+Three drivers are included: AllHosts, Flavor & JSON. AllHosts just
+returns the full, unfiltered list of hosts. Flavor is a hard coded
+matching mechanism based on flavor criteria and JSON is an ad-hoc
+filter grammar.
+
+Why JSON? The requests for instances may come in through the
+REST interface from a user or a parent Zone.
+Currently Flavors and/or InstanceTypes are used for
+specifing the type of instance desired. Specific Nova users have
+noted a need for a more expressive way of specifying instances.
+Since we don't want to get into building full DSL this is a simple
+form as an example of how this could be done. In reality, most
+consumers will use the more rigid filters such as FlavorFilter.
+
+Note: These are "required" capability filters. These capabilities
+used must be present or the host will be excluded. The hosts
+returned are then weighed by the Weighted Scheduler. Weights
+can take the more esoteric factors into consideration (such as
+server affinity and customer separation).
+"""
+
+import json
+
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import utils
+
+LOG = logging.getLogger('nova.scheduler.host_filter')
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('default_host_filter_driver',
+ 'nova.scheduler.host_filter.AllHostsFilter',
+ 'Which driver to use for filtering hosts.')
+
+
+class HostFilter(object):
+ """Base class for host filter drivers."""
+
+ def instance_type_to_filter(self, instance_type):
+ """Convert instance_type into a filter for most common use-case."""
+ raise NotImplementedError()
+
+ def filter_hosts(self, zone_manager, query):
+ """Return a list of hosts that fulfill the filter."""
+ raise NotImplementedError()
+
+ def _full_name(self):
+ """module.classname of the filter driver"""
+ return "%s.%s" % (self.__module__, self.__class__.__name__)
+
+
+class AllHostsFilter(HostFilter):
+ """NOP host filter driver. Returns all hosts in ZoneManager.
+ This essentially does what the old Scheduler+Chance used
+ to give us."""
+
+ def instance_type_to_filter(self, instance_type):
+ """Return anything to prevent base-class from raising
+ exception."""
+ return (self._full_name(), instance_type)
+
+ def filter_hosts(self, zone_manager, query):
+ """Return a list of hosts from ZoneManager list."""
+ return [(host, services)
+ for host, services in zone_manager.service_states.iteritems()]
+
+
+class FlavorFilter(HostFilter):
+ """HostFilter driver hard-coded to work with flavors."""
+
+ def instance_type_to_filter(self, instance_type):
+ """Use instance_type to filter hosts."""
+ return (self._full_name(), instance_type)
+
+ def filter_hosts(self, zone_manager, query):
+ """Return a list of hosts that can create instance_type."""
+ instance_type = query
+ selected_hosts = []
+ for host, services in zone_manager.service_states.iteritems():
+ capabilities = services.get('compute', {})
+ host_ram_mb = capabilities['host_memory_free']
+ disk_bytes = capabilities['disk_available']
+ if host_ram_mb >= instance_type['memory_mb'] and \
+ disk_bytes >= instance_type['local_gb']:
+ selected_hosts.append((host, capabilities))
+ return selected_hosts
+
+#host entries (currently) are like:
+# {'host_name-description': 'Default install of XenServer',
+# 'host_hostname': 'xs-mini',
+# 'host_memory_total': 8244539392,
+# 'host_memory_overhead': 184225792,
+# 'host_memory_free': 3868327936,
+# 'host_memory_free_computed': 3840843776},
+# 'host_other-config': {},
+# 'host_ip_address': '192.168.1.109',
+# 'host_cpu_info': {},
+# 'disk_available': 32954957824,
+# 'disk_total': 50394562560,
+# 'disk_used': 17439604736},
+# 'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f',
+# 'host_name-label': 'xs-mini'}
+
+# instance_type table has:
+#name = Column(String(255), unique=True)
+#memory_mb = Column(Integer)
+#vcpus = Column(Integer)
+#local_gb = Column(Integer)
+#flavorid = Column(Integer, unique=True)
+#swap = Column(Integer, nullable=False, default=0)
+#rxtx_quota = Column(Integer, nullable=False, default=0)
+#rxtx_cap = Column(Integer, nullable=False, default=0)
+
+
+class JsonFilter(HostFilter):
+ """Host Filter driver to allow simple JSON-based grammar for
+ selecting hosts."""
+
+ def _equals(self, args):
+ """First term is == all the other terms."""
+ if len(args) < 2:
+ return False
+ lhs = args[0]
+ for rhs in args[1:]:
+ if lhs != rhs:
+ return False
+ return True
+
+ def _less_than(self, args):
+ """First term is < all the other terms."""
+ if len(args) < 2:
+ return False
+ lhs = args[0]
+ for rhs in args[1:]:
+ if lhs >= rhs:
+ return False
+ return True
+
+ def _greater_than(self, args):
+ """First term is > all the other terms."""
+ if len(args) < 2:
+ return False
+ lhs = args[0]
+ for rhs in args[1:]:
+ if lhs <= rhs:
+ return False
+ return True
+
+ def _in(self, args):
+ """First term is in set of remaining terms"""
+ if len(args) < 2:
+ return False
+ return args[0] in args[1:]
+
+ def _less_than_equal(self, args):
+ """First term is <= all the other terms."""
+ if len(args) < 2:
+ return False
+ lhs = args[0]
+ for rhs in args[1:]:
+ if lhs > rhs:
+ return False
+ return True
+
+ def _greater_than_equal(self, args):
+ """First term is >= all the other terms."""
+ if len(args) < 2:
+ return False
+ lhs = args[0]
+ for rhs in args[1:]:
+ if lhs < rhs:
+ return False
+ return True
+
+ def _not(self, args):
+ """Flip each of the arguments."""
+ if len(args) == 0:
+ return False
+ return [not arg for arg in args]
+
+ def _or(self, args):
+ """True if any arg is True."""
+ return True in args
+
+ def _and(self, args):
+ """True if all args are True."""
+ return False not in args
+
+ commands = {
+ '=': _equals,
+ '<': _less_than,
+ '>': _greater_than,
+ 'in': _in,
+ '<=': _less_than_equal,
+ '>=': _greater_than_equal,
+ 'not': _not,
+ 'or': _or,
+ 'and': _and,
+ }
+
+ def instance_type_to_filter(self, instance_type):
+ """Convert instance_type into JSON filter object."""
+ required_ram = instance_type['memory_mb']
+ required_disk = instance_type['local_gb']
+ query = ['and',
+ ['>=', '$compute.host_memory_free', required_ram],
+ ['>=', '$compute.disk_available', required_disk]
+ ]
+ return (self._full_name(), json.dumps(query))
+
+ def _parse_string(self, string, host, services):
+ """Strings prefixed with $ are capability lookups in the
+ form '$service.capability[.subcap*]'"""
+ if not string:
+ return None
+ if string[0] != '$':
+ return string
+
+ path = string[1:].split('.')
+ for item in path:
+ services = services.get(item, None)
+ if not services:
+ return None
+ return services
+
+ def _process_filter(self, zone_manager, query, host, services):
+ """Recursively parse the query structure."""
+ if len(query) == 0:
+ return True
+ cmd = query[0]
+ method = self.commands[cmd] # Let exception fly.
+ cooked_args = []
+ for arg in query[1:]:
+ if isinstance(arg, list):
+ arg = self._process_filter(zone_manager, arg, host, services)
+ elif isinstance(arg, basestring):
+ arg = self._parse_string(arg, host, services)
+ if arg != None:
+ cooked_args.append(arg)
+ result = method(self, cooked_args)
+ return result
+
+ def filter_hosts(self, zone_manager, query):
+ """Return a list of hosts that can fulfill filter."""
+ expanded = json.loads(query)
+ hosts = []
+ for host, services in zone_manager.service_states.iteritems():
+ r = self._process_filter(zone_manager, expanded, host, services)
+ if isinstance(r, list):
+ r = True in r
+ if r:
+ hosts.append((host, services))
+ return hosts
+
+
+DRIVERS = [AllHostsFilter, FlavorFilter, JsonFilter]
+
+
+def choose_driver(driver_name=None):
+ """Since the caller may specify which driver to use we need
+ to have an authoritative list of what is permissible. This
+ function checks the driver name against a predefined set
+ of acceptable drivers."""
+
+ if not driver_name:
+ driver_name = FLAGS.default_host_filter_driver
+ for driver in DRIVERS:
+ if "%s.%s" % (driver.__module__, driver.__name__) == driver_name:
+ return driver()
+ raise exception.SchedulerHostFilterDriverNotFound(driver_name=driver_name)
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 7d62cfc4e..55cd7208b 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -60,10 +60,9 @@ class SchedulerManager(manager.Manager):
"""Get a list of zones from the ZoneManager."""
return self.zone_manager.get_zone_list()
- def get_zone_capabilities(self, context=None, service=None):
- """Get the normalized set of capabilites for this zone,
- or for a particular service."""
- return self.zone_manager.get_zone_capabilities(context, service)
+ def get_zone_capabilities(self, context=None):
+ """Get the normalized set of capabilites for this zone."""
+ return self.zone_manager.get_zone_capabilities(context)
def update_service_capabilities(self, context=None, service_name=None,
host=None, capabilities={}):
diff --git a/nova/scheduler/zone_manager.py b/nova/scheduler/zone_manager.py
index 198f9d4cc..3ddf6f3c3 100644
--- a/nova/scheduler/zone_manager.py
+++ b/nova/scheduler/zone_manager.py
@@ -106,28 +106,26 @@ class ZoneManager(object):
def __init__(self):
self.last_zone_db_check = datetime.min
self.zone_states = {} # { <zone_id> : ZoneState }
- self.service_states = {} # { <service> : { <host> : { cap k : v }}}
+ self.service_states = {} # { <host> : { <service> : { cap k : v }}}
self.green_pool = greenpool.GreenPool()
def get_zone_list(self):
"""Return the list of zones we know about."""
return [zone.to_dict() for zone in self.zone_states.values()]
- def get_zone_capabilities(self, context, service=None):
+ def get_zone_capabilities(self, context):
"""Roll up all the individual host info to generic 'service'
capabilities. Each capability is aggregated into
<cap>_min and <cap>_max values."""
- service_dict = self.service_states
- if service:
- service_dict = {service: self.service_states.get(service, {})}
+ hosts_dict = self.service_states
# TODO(sandy) - be smarter about fabricating this structure.
# But it's likely to change once we understand what the Best-Match
# code will need better.
combined = {} # { <service>_<cap> : (min, max), ... }
- for service_name, host_dict in service_dict.iteritems():
- for host, caps_dict in host_dict.iteritems():
- for cap, value in caps_dict.iteritems():
+ for host, host_dict in hosts_dict.iteritems():
+ for service_name, service_dict in host_dict.iteritems():
+ for cap, value in service_dict.iteritems():
key = "%s_%s" % (service_name, cap)
min_value, max_value = combined.get(key, (value, value))
min_value = min(min_value, value)
@@ -171,6 +169,6 @@ class ZoneManager(object):
"""Update the per-service capabilities based on this notification."""
logging.debug(_("Received %(service_name)s service update from "
"%(host)s: %(capabilities)s") % locals())
- service_caps = self.service_states.get(service_name, {})
- service_caps[host] = capabilities
- self.service_states[service_name] = service_caps
+ service_caps = self.service_states.get(host, {})
+ service_caps[service_name] = capabilities
+ self.service_states[host] = service_caps
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index 8b0729c35..bf51239e6 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -228,6 +228,9 @@ class FakeToken(object):
# FIXME(sirp): let's not use id here
id = 0
+ def __getitem__(self, key):
+ return getattr(self, key)
+
def __init__(self, **kwargs):
FakeToken.id += 1
self.id = FakeToken.id
diff --git a/nova/tests/api/openstack/test_flavors.py b/nova/tests/api/openstack/test_flavors.py
index 954d72adf..d1c62e454 100644
--- a/nova/tests/api/openstack/test_flavors.py
+++ b/nova/tests/api/openstack/test_flavors.py
@@ -47,8 +47,8 @@ def return_instance_types(context, num=2):
return instance_types
-def return_instance_type_not_found(context, flavorid):
- raise exception.NotFound()
+def return_instance_type_not_found(context, flavor_id):
+ raise exception.InstanceTypeNotFound(flavor_id=flavor_id)
class FlavorsTest(test.TestCase):
diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py
index ae86d0686..2c329f920 100644
--- a/nova/tests/api/openstack/test_images.py
+++ b/nova/tests/api/openstack/test_images.py
@@ -75,6 +75,18 @@ class _BaseImageServiceTests(test.TestCase):
self.context,
'bad image id')
+ def test_create_and_show_non_existing_image_by_name(self):
+ fixture = self._make_fixture('test image')
+ num_images = len(self.service.index(self.context))
+
+ image_id = self.service.create(self.context, fixture)['id']
+
+ self.assertNotEquals(None, image_id)
+ self.assertRaises(exception.ImageNotFound,
+ self.service.show_by_name,
+ self.context,
+ 'bad image id')
+
def test_update(self):
fixture = self._make_fixture('test image')
image_id = self.service.create(self.context, fixture)['id']
@@ -538,7 +550,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
},
{
'id': 127,
- 'name': 'killed backup', 'serverId': 42,
+ 'name': 'killed backup',
+ 'serverId': 42,
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'FAILED',
@@ -584,7 +597,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
{
'id': 124,
'name': 'queued backup',
- 'serverId': 42,
+ 'serverRef': "http://localhost/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'QUEUED',
@@ -606,7 +619,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
{
'id': 125,
'name': 'saving backup',
- 'serverId': 42,
+ 'serverRef': "http://localhost/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'SAVING',
@@ -629,7 +642,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
{
'id': 126,
'name': 'active backup',
- 'serverId': 42,
+ 'serverRef': "http://localhost/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'ACTIVE',
@@ -650,7 +663,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
},
{
'id': 127,
- 'name': 'killed backup', 'serverId': 42,
+ 'name': 'killed backup',
+ 'serverRef': "http://localhost/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'FAILED',
diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py
index df367005d..45bd4d501 100644
--- a/nova/tests/api/openstack/test_limits.py
+++ b/nova/tests/api/openstack/test_limits.py
@@ -28,15 +28,14 @@ import webob
from xml.dom.minidom import parseString
from nova.api.openstack import limits
-from nova.api.openstack.limits import Limit
TEST_LIMITS = [
- Limit("GET", "/delayed", "^/delayed", 1, limits.PER_MINUTE),
- Limit("POST", "*", ".*", 7, limits.PER_MINUTE),
- Limit("POST", "/servers", "^/servers", 3, limits.PER_MINUTE),
- Limit("PUT", "*", "", 10, limits.PER_MINUTE),
- Limit("PUT", "/servers", "^/servers", 5, limits.PER_MINUTE),
+ limits.Limit("GET", "/delayed", "^/delayed", 1, limits.PER_MINUTE),
+ limits.Limit("POST", "*", ".*", 7, limits.PER_MINUTE),
+ limits.Limit("POST", "/servers", "^/servers", 3, limits.PER_MINUTE),
+ limits.Limit("PUT", "*", "", 10, limits.PER_MINUTE),
+ limits.Limit("PUT", "/servers", "^/servers", 5, limits.PER_MINUTE),
]
@@ -58,15 +57,15 @@ class BaseLimitTestSuite(unittest.TestCase):
return self.time
-class LimitsControllerTest(BaseLimitTestSuite):
+class LimitsControllerV10Test(BaseLimitTestSuite):
"""
- Tests for `limits.LimitsController` class.
+ Tests for `limits.LimitsControllerV10` class.
"""
def setUp(self):
"""Run before each test."""
BaseLimitTestSuite.setUp(self)
- self.controller = limits.LimitsController()
+ self.controller = limits.LimitsControllerV10()
def _get_index_request(self, accept_header="application/json"):
"""Helper to set routing arguments."""
@@ -81,8 +80,8 @@ class LimitsControllerTest(BaseLimitTestSuite):
def _populate_limits(self, request):
"""Put limit info into a request."""
_limits = [
- Limit("GET", "*", ".*", 10, 60).display(),
- Limit("POST", "*", ".*", 5, 60 * 60).display(),
+ limits.Limit("GET", "*", ".*", 10, 60).display(),
+ limits.Limit("POST", "*", ".*", 5, 60 * 60).display(),
]
request.environ["nova.limits"] = _limits
return request
@@ -171,6 +170,100 @@ class LimitsControllerTest(BaseLimitTestSuite):
self.assertEqual(expected.toxml(), body.toxml())
+class LimitsControllerV11Test(BaseLimitTestSuite):
+ """
+ Tests for `limits.LimitsControllerV11` class.
+ """
+
+ def setUp(self):
+ """Run before each test."""
+ BaseLimitTestSuite.setUp(self)
+ self.controller = limits.LimitsControllerV11()
+
+ def _get_index_request(self, accept_header="application/json"):
+ """Helper to set routing arguments."""
+ request = webob.Request.blank("/")
+ request.accept = accept_header
+ request.environ["wsgiorg.routing_args"] = (None, {
+ "action": "index",
+ "controller": "",
+ })
+ return request
+
+ def _populate_limits(self, request):
+ """Put limit info into a request."""
+ _limits = [
+ limits.Limit("GET", "*", ".*", 10, 60).display(),
+ limits.Limit("POST", "*", ".*", 5, 60 * 60).display(),
+ limits.Limit("GET", "changes-since*", "changes-since",
+ 5, 60).display(),
+ ]
+ request.environ["nova.limits"] = _limits
+ return request
+
+ def test_empty_index_json(self):
+ """Test getting empty limit details in JSON."""
+ request = self._get_index_request()
+ response = request.get_response(self.controller)
+ expected = {
+ "limits": {
+ "rate": [],
+ "absolute": {},
+ },
+ }
+ body = json.loads(response.body)
+ self.assertEqual(expected, body)
+
+ def test_index_json(self):
+ """Test getting limit details in JSON."""
+ request = self._get_index_request()
+ request = self._populate_limits(request)
+ response = request.get_response(self.controller)
+ expected = {
+ "limits": {
+ "rate": [
+ {
+ "regex": ".*",
+ "uri": "*",
+ "limit": [
+ {
+ "verb": "GET",
+ "next-available": 0,
+ "unit": "MINUTE",
+ "value": 10,
+ "remaining": 10,
+ },
+ {
+ "verb": "POST",
+ "next-available": 0,
+ "unit": "HOUR",
+ "value": 5,
+ "remaining": 5,
+ },
+ ],
+ },
+ {
+ "regex": "changes-since",
+ "uri": "changes-since*",
+ "limit": [
+ {
+ "verb": "GET",
+ "next-available": 0,
+ "unit": "MINUTE",
+ "value": 5,
+ "remaining": 5,
+ },
+ ],
+ },
+
+ ],
+ "absolute": {},
+ },
+ }
+ body = json.loads(response.body)
+ self.assertEqual(expected, body)
+
+
class LimitMiddlewareTest(BaseLimitTestSuite):
"""
Tests for the `limits.RateLimitingMiddleware` class.
@@ -185,7 +278,7 @@ class LimitMiddlewareTest(BaseLimitTestSuite):
"""Prepare middleware for use through fake WSGI app."""
BaseLimitTestSuite.setUp(self)
_limits = [
- Limit("GET", "*", ".*", 1, 60),
+ limits.Limit("GET", "*", ".*", 1, 60),
]
self.app = limits.RateLimitingMiddleware(self._empty_app, _limits)
@@ -238,7 +331,7 @@ class LimitTest(BaseLimitTestSuite):
def test_GET_no_delay(self):
"""Test a limit handles 1 GET per second."""
- limit = Limit("GET", "*", ".*", 1, 1)
+ limit = limits.Limit("GET", "*", ".*", 1, 1)
delay = limit("GET", "/anything")
self.assertEqual(None, delay)
self.assertEqual(0, limit.next_request)
@@ -246,7 +339,7 @@ class LimitTest(BaseLimitTestSuite):
def test_GET_delay(self):
"""Test two calls to 1 GET per second limit."""
- limit = Limit("GET", "*", ".*", 1, 1)
+ limit = limits.Limit("GET", "*", ".*", 1, 1)
delay = limit("GET", "/anything")
self.assertEqual(None, delay)
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index 556046e9d..e8182b6a9 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -33,6 +33,7 @@ import nova.api.openstack
from nova.api.openstack import servers
import nova.compute.api
from nova.compute import instance_types
+from nova.compute import power_state
import nova.db.api
from nova.db.sqlalchemy.models import Instance
from nova.db.sqlalchemy.models import InstanceMetadata
@@ -56,6 +57,12 @@ def return_server_with_addresses(private, public):
return _return_server
+def return_server_with_power_state(power_state):
+ def _return_server(context, id):
+ return stub_instance(id, power_state=power_state)
+ return _return_server
+
+
def return_servers(context, user_id=1):
return [stub_instance(i, user_id) for i in xrange(5)]
@@ -73,7 +80,7 @@ def instance_address(context, instance_id):
def stub_instance(id, user_id=1, private_address=None, public_addresses=None,
- host=None):
+ host=None, power_state=0):
metadata = []
metadata.append(InstanceMetadata(key='seq', value=id))
@@ -96,7 +103,7 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None,
"launch_index": 0,
"key_name": "",
"key_data": "",
- "state": 0,
+ "state": power_state,
"state_description": "",
"memory_mb": 0,
"vcpus": 0,
@@ -127,6 +134,10 @@ def fake_compute_api(cls, req, id):
return True
+def find_host(self, context, instance_id):
+ return "nova"
+
+
class ServersTest(test.TestCase):
def setUp(self):
@@ -172,7 +183,7 @@ class ServersTest(test.TestCase):
self.assertEqual(res_dict['server']['id'], 1)
self.assertEqual(res_dict['server']['name'], 'server1')
- def test_get_server_by_id_v11(self):
+ def test_get_server_by_id_v1_1(self):
req = webob.Request.blank('/v1.1/servers/1')
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
@@ -235,7 +246,7 @@ class ServersTest(test.TestCase):
self.assertEqual(len(addresses["private"]), 1)
self.assertEqual(addresses["private"][0], private)
- def test_get_server_addresses_V10(self):
+ def test_get_server_addresses_v1_0(self):
private = '192.168.0.3'
public = ['1.2.3.4']
new_return_server = return_server_with_addresses(private, public)
@@ -246,7 +257,7 @@ class ServersTest(test.TestCase):
self.assertEqual(res_dict, {
'addresses': {'public': public, 'private': [private]}})
- def test_get_server_addresses_xml_V10(self):
+ def test_get_server_addresses_xml_v1_0(self):
private_expected = "192.168.0.3"
public_expected = ["1.2.3.4"]
new_return_server = return_server_with_addresses(private_expected,
@@ -265,7 +276,7 @@ class ServersTest(test.TestCase):
(ip,) = private.getElementsByTagName('ip')
self.assertEquals(ip.getAttribute('addr'), private_expected)
- def test_get_server_addresses_public_V10(self):
+ def test_get_server_addresses_public_v1_0(self):
private = "192.168.0.3"
public = ["1.2.3.4"]
new_return_server = return_server_with_addresses(private, public)
@@ -275,7 +286,7 @@ class ServersTest(test.TestCase):
res_dict = json.loads(res.body)
self.assertEqual(res_dict, {'public': public})
- def test_get_server_addresses_private_V10(self):
+ def test_get_server_addresses_private_v1_0(self):
private = "192.168.0.3"
public = ["1.2.3.4"]
new_return_server = return_server_with_addresses(private, public)
@@ -285,7 +296,7 @@ class ServersTest(test.TestCase):
res_dict = json.loads(res.body)
self.assertEqual(res_dict, {'private': [private]})
- def test_get_server_addresses_public_xml_V10(self):
+ def test_get_server_addresses_public_xml_v1_0(self):
private = "192.168.0.3"
public = ["1.2.3.4"]
new_return_server = return_server_with_addresses(private, public)
@@ -299,7 +310,7 @@ class ServersTest(test.TestCase):
(ip,) = public_node.getElementsByTagName('ip')
self.assertEquals(ip.getAttribute('addr'), public[0])
- def test_get_server_addresses_private_xml_V10(self):
+ def test_get_server_addresses_private_xml_v1_0(self):
private = "192.168.0.3"
public = ["1.2.3.4"]
new_return_server = return_server_with_addresses(private, public)
@@ -313,7 +324,7 @@ class ServersTest(test.TestCase):
(ip,) = private_node.getElementsByTagName('ip')
self.assertEquals(ip.getAttribute('addr'), private)
- def test_get_server_by_id_with_addresses_v11(self):
+ def test_get_server_by_id_with_addresses_v1_1(self):
private = "192.168.0.3"
public = ["1.2.3.4"]
new_return_server = return_server_with_addresses(private, public)
@@ -343,7 +354,7 @@ class ServersTest(test.TestCase):
self.assertEqual(s.get('imageId', None), None)
i += 1
- def test_get_server_list_v11(self):
+ def test_get_server_list_v1_1(self):
req = webob.Request.blank('/v1.1/servers')
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
@@ -466,6 +477,7 @@ class ServersTest(test.TestCase):
"_get_kernel_ramdisk_from_image", kernel_ramdisk_mapping)
self.stubs.Set(nova.api.openstack.common,
"get_image_id_from_image_hash", image_id_from_hash)
+ self.stubs.Set(nova.compute.api.API, "_find_host", find_host)
def _test_create_instance_helper(self):
self._setup_for_create_instance()
@@ -564,16 +576,16 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
- def test_create_instance_v11(self):
+ def test_create_instance_v1_1(self):
self._setup_for_create_instance()
- imageRef = 'http://localhost/v1.1/images/2'
- flavorRef = 'http://localhost/v1.1/flavors/3'
+ image_ref = 'http://localhost/v1.1/images/2'
+ flavor_ref = 'http://localhost/v1.1/flavors/3'
body = {
'server': {
'name': 'server_test',
- 'imageRef': imageRef,
- 'flavorRef': flavorRef,
+ 'imageRef': image_ref,
+ 'flavorRef': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
@@ -593,17 +605,17 @@ class ServersTest(test.TestCase):
self.assertEqual(16, len(server['adminPass']))
self.assertEqual('server_test', server['name'])
self.assertEqual(1, server['id'])
- self.assertEqual(flavorRef, server['flavorRef'])
- self.assertEqual(imageRef, server['imageRef'])
+ self.assertEqual(flavor_ref, server['flavorRef'])
+ self.assertEqual(image_ref, server['imageRef'])
self.assertEqual(res.status_int, 200)
- def test_create_instance_v11_bad_href(self):
+ def test_create_instance_v1_1_bad_href(self):
self._setup_for_create_instance()
- imageRef = 'http://localhost/v1.1/images/asdf'
- flavorRef = 'http://localhost/v1.1/flavors/3'
+ image_ref = 'http://localhost/v1.1/images/asdf'
+ flavor_ref = 'http://localhost/v1.1/flavors/3'
body = dict(server=dict(
- name='server_test', imageRef=imageRef, flavorRef=flavorRef,
+ name='server_test', imageRef=image_ref, flavorRef=flavor_ref,
metadata={'hello': 'world', 'open': 'stack'},
personality={}))
req = webob.Request.blank('/v1.1/servers')
@@ -613,7 +625,34 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
- def test_create_instance_with_admin_pass_v10(self):
+ def test_create_instance_v1_1_local_href(self):
+ self._setup_for_create_instance()
+
+ image_ref = 'http://localhost/v1.1/images/2'
+ image_ref_local = '2'
+ flavor_ref = 'http://localhost/v1.1/flavors/3'
+ body = {
+ 'server': {
+ 'name': 'server_test',
+ 'imageRef': image_ref_local,
+ 'flavorRef': flavor_ref,
+ },
+ }
+
+ req = webob.Request.blank('/v1.1/servers')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(fakes.wsgi_app())
+
+ server = json.loads(res.body)['server']
+ self.assertEqual(1, server['id'])
+ self.assertEqual(flavor_ref, server['flavorRef'])
+ self.assertEqual(image_ref, server['imageRef'])
+ self.assertEqual(res.status_int, 200)
+
+ def test_create_instance_with_admin_pass_v1_0(self):
self._setup_for_create_instance()
body = {
@@ -634,16 +673,16 @@ class ServersTest(test.TestCase):
self.assertNotEqual(res['server']['adminPass'],
body['server']['adminPass'])
- def test_create_instance_with_admin_pass_v11(self):
+ def test_create_instance_with_admin_pass_v1_1(self):
self._setup_for_create_instance()
- imageRef = 'http://localhost/v1.1/images/2'
- flavorRef = 'http://localhost/v1.1/flavors/3'
+ image_ref = 'http://localhost/v1.1/images/2'
+ flavor_ref = 'http://localhost/v1.1/flavors/3'
body = {
'server': {
'name': 'server_test',
- 'imageRef': imageRef,
- 'flavorRef': flavorRef,
+ 'imageRef': image_ref,
+ 'flavorRef': flavor_ref,
'adminPass': 'testpass',
},
}
@@ -656,16 +695,16 @@ class ServersTest(test.TestCase):
server = json.loads(res.body)['server']
self.assertEqual(server['adminPass'], body['server']['adminPass'])
- def test_create_instance_with_empty_admin_pass_v11(self):
+ def test_create_instance_with_empty_admin_pass_v1_1(self):
self._setup_for_create_instance()
- imageRef = 'http://localhost/v1.1/images/2'
- flavorRef = 'http://localhost/v1.1/flavors/3'
+ image_ref = 'http://localhost/v1.1/images/2'
+ flavor_ref = 'http://localhost/v1.1/flavors/3'
body = {
'server': {
'name': 'server_test',
- 'imageRef': imageRef,
- 'flavorRef': flavorRef,
+ 'imageRef': image_ref,
+ 'flavorRef': flavor_ref,
'adminPass': '',
},
}
@@ -719,7 +758,7 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
- def test_update_server_v10(self):
+ def test_update_server_v1_0(self):
inst_dict = dict(name='server_test', adminPass='bacon')
self.body = json.dumps(dict(server=inst_dict))
@@ -733,6 +772,7 @@ class ServersTest(test.TestCase):
self.stubs.Set(nova.db.api, 'instance_update',
server_update)
+ self.stubs.Set(nova.compute.api.API, "_find_host", find_host)
req = webob.Request.blank('/v1.0/servers/1')
req.method = 'PUT'
@@ -741,7 +781,7 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 204)
- def test_update_server_adminPass_ignored_v11(self):
+ def test_update_server_adminPass_ignored_v1_1(self):
inst_dict = dict(name='server_test', adminPass='bacon')
self.body = json.dumps(dict(server=inst_dict))
@@ -782,7 +822,7 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 501)
- def test_server_backup_schedule_deprecated_v11(self):
+ def test_server_backup_schedule_deprecated_v1_1(self):
req = webob.Request.blank('/v1.1/servers/1/backup_schedule')
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 404)
@@ -1024,15 +1064,175 @@ class ServersTest(test.TestCase):
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
- def test_server_rebuild(self):
- body = dict(server=dict(
- name='server_test', imageId=2, flavorId=2, metadata={},
- personality={}))
+ def test_server_rebuild_accepted(self):
+ body = {
+ "rebuild": {
+ "imageId": 2,
+ },
+ }
+
req = webob.Request.blank('/v1.0/servers/1/action')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 202)
+ self.assertEqual(res.body, "")
+
+ def test_server_rebuild_rejected_when_building(self):
+ body = {
+ "rebuild": {
+ "imageId": 2,
+ },
+ }
+
+ state = power_state.BUILDING
+ new_return_server = return_server_with_power_state(state)
+ self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
+
+ req = webob.Request.blank('/v1.0/servers/1/action')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 409)
+
+ def test_server_rebuild_bad_entity(self):
+ body = {
+ "rebuild": {
+ },
+ }
+
+ req = webob.Request.blank('/v1.0/servers/1/action')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_server_rebuild_accepted_minimum_v1_1(self):
+ body = {
+ "rebuild": {
+ "imageRef": "http://localhost/images/2",
+ },
+ }
+
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 202)
+
+ def test_server_rebuild_rejected_when_building_v1_1(self):
+ body = {
+ "rebuild": {
+ "imageRef": "http://localhost/images/2",
+ },
+ }
+
+ state = power_state.BUILDING
+ new_return_server = return_server_with_power_state(state)
+ self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
+
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 409)
+
+ def test_server_rebuild_accepted_with_metadata_v1_1(self):
+ body = {
+ "rebuild": {
+ "imageRef": "http://localhost/images/2",
+ "metadata": {
+ "new": "metadata",
+ },
+ },
+ }
+
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 202)
+
+ def test_server_rebuild_accepted_with_bad_metadata_v1_1(self):
+ body = {
+ "rebuild": {
+ "imageRef": "http://localhost/images/2",
+ "metadata": "stack",
+ },
+ }
+
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+
res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_server_rebuild_bad_entity_v1_1(self):
+ body = {
+ "rebuild": {
+ "imageId": 2,
+ },
+ }
+
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_server_rebuild_bad_personality_v1_1(self):
+ body = {
+ "rebuild": {
+ "imageRef": "http://localhost/images/2",
+ "personality": [{
+ "path": "/path/to/file",
+ "contents": "INVALID b64",
+ }]
+ },
+ }
+
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_server_rebuild_personality_v1_1(self):
+ body = {
+ "rebuild": {
+ "imageRef": "http://localhost/images/2",
+ "personality": [{
+ "path": "/path/to/file",
+ "contents": base64.b64encode("Test String"),
+ }]
+ },
+ }
+
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 202)
def test_delete_server_instance(self):
req = webob.Request.blank('/v1.0/servers/1')
@@ -1155,6 +1355,24 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
+ def test_shutdown_status(self):
+ new_server = return_server_with_power_state(power_state.SHUTDOWN)
+ self.stubs.Set(nova.db.api, 'instance_get', new_server)
+ req = webob.Request.blank('/v1.0/servers/1')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ res_dict = json.loads(res.body)
+ self.assertEqual(res_dict['server']['status'], 'SHUTDOWN')
+
+ def test_shutoff_status(self):
+ new_server = return_server_with_power_state(power_state.SHUTOFF)
+ self.stubs.Set(nova.db.api, 'instance_get', new_server)
+ req = webob.Request.blank('/v1.0/servers/1')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ res_dict = json.loads(res.body)
+ self.assertEqual(res_dict['server']['status'], 'SHUTOFF')
+
class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
@@ -1436,6 +1654,19 @@ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==""",
request = self.deserializer.deserialize(serial_request)
self.assertEqual(request, expected)
+ def test_request_xmlser_with_flavor_image_ref(self):
+ serial_request = """
+ <server xmlns="http://docs.openstack.org/compute/api/v1.1"
+ name="new-server-test"
+ imageRef="http://localhost:8774/v1.1/images/1"
+ flavorRef="http://localhost:8774/v1.1/flavors/1">
+ </server>"""
+ request = self.deserializer.deserialize(serial_request)
+ self.assertEquals(request["server"]["flavorRef"],
+ "http://localhost:8774/v1.1/flavors/1")
+ self.assertEquals(request["server"]["imageRef"],
+ "http://localhost:8774/v1.1/images/1")
+
class TestServerInstanceCreation(test.TestCase):
diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py
index a3f191aaa..5d5799b59 100644
--- a/nova/tests/api/openstack/test_zones.py
+++ b/nova/tests/api/openstack/test_zones.py
@@ -75,7 +75,7 @@ def zone_get_all_db(context):
]
-def zone_capabilities(method, context, params):
+def zone_capabilities(method, context):
return dict()
diff --git a/nova/tests/api/test_wsgi.py b/nova/tests/api/test_wsgi.py
index 1ecdd1cfb..5820ecdc2 100644
--- a/nova/tests/api/test_wsgi.py
+++ b/nova/tests/api/test_wsgi.py
@@ -136,6 +136,12 @@ class RequestTest(test.TestCase):
request.body = "asdf<br />"
self.assertRaises(webob.exc.HTTPBadRequest, request.get_content_type)
+ def test_request_content_type_with_charset(self):
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Content-Type"] = "application/json; charset=UTF-8"
+ result = request.get_content_type()
+ self.assertEqual(result, "application/json")
+
def test_content_type_from_accept_xml(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/xml"
diff --git a/nova/tests/db/fakes.py b/nova/tests/db/fakes.py
index 58d251b1e..8bdea359a 100644
--- a/nova/tests/db/fakes.py
+++ b/nova/tests/db/fakes.py
@@ -124,7 +124,6 @@ def stub_out_db_instance_api(stubs, injected=True):
return FakeModel(vlan_network_fields)
else:
return FakeModel(flat_network_fields)
- return FakeModel(network_fields)
def fake_network_get_all_by_instance(context, instance_id):
# Even instance numbers are on vlan networks
diff --git a/nova/tests/network/base.py b/nova/tests/network/base.py
index 988a1de72..b06271c99 100644
--- a/nova/tests/network/base.py
+++ b/nova/tests/network/base.py
@@ -25,6 +25,7 @@ from nova import context
from nova import db
from nova import exception
from nova import flags
+from nova import ipv6
from nova import log as logging
from nova import test
from nova import utils
@@ -117,15 +118,15 @@ class NetworkTestCase(test.TestCase):
context.get_admin_context(),
instance_ref['id'])
self.assertEqual(instance_ref['mac_address'],
- utils.to_mac(address_v6))
+ ipv6.to_mac(address_v6))
instance_ref2 = db.fixed_ip_get_instance_v6(
context.get_admin_context(),
address_v6)
self.assertEqual(instance_ref['id'], instance_ref2['id'])
self.assertEqual(address_v6,
- utils.to_global_ipv6(
- network_ref['cidr_v6'],
- instance_ref['mac_address']))
+ ipv6.to_global(network_ref['cidr_v6'],
+ instance_ref['mac_address'],
+ 'test'))
self._deallocate_address(0, address)
db.instance_destroy(context.get_admin_context(),
instance_ref['id'])
diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py
index fa0e56597..97f401b87 100644
--- a/nova/tests/test_api.py
+++ b/nova/tests/test_api.py
@@ -28,10 +28,12 @@ import StringIO
import webob
from nova import context
+from nova import exception
from nova import test
from nova.api import ec2
-from nova.api.ec2 import cloud
from nova.api.ec2 import apirequest
+from nova.api.ec2 import cloud
+from nova.api.ec2 import ec2utils
from nova.auth import manager
@@ -101,6 +103,21 @@ class XmlConversionTestCase(test.TestCase):
self.assertEqual(conv('-0'), 0)
+class Ec2utilsTestCase(test.TestCase):
+ def test_ec2_id_to_id(self):
+ self.assertEqual(ec2utils.ec2_id_to_id('i-0000001e'), 30)
+ self.assertEqual(ec2utils.ec2_id_to_id('ami-1d'), 29)
+
+ def test_bad_ec2_id(self):
+ self.assertRaises(exception.InvalidEc2Id,
+ ec2utils.ec2_id_to_id,
+ 'badone')
+
+ def test_id_to_ec2_id(self):
+ self.assertEqual(ec2utils.id_to_ec2_id(30), 'i-0000001e')
+ self.assertEqual(ec2utils.id_to_ec2_id(29, 'ami-%08x'), 'ami-0000001d')
+
+
class ApiEc2TestCase(test.TestCase):
"""Unit test for the cloud controller on an EC2 API"""
def setUp(self):
diff --git a/nova/tests/test_auth.py b/nova/tests/test_auth.py
index f8a1b1564..f02dd94b7 100644
--- a/nova/tests/test_auth.py
+++ b/nova/tests/test_auth.py
@@ -101,9 +101,43 @@ class _AuthManagerBaseTestCase(test.TestCase):
self.assertEqual('private-party', u.access)
def test_004_signature_is_valid(self):
- #self.assertTrue(self.manager.authenticate(**boto.generate_url ...? ))
- pass
- #raise NotImplementedError
+ with user_generator(self.manager, name='admin', secret='admin',
+ access='admin'):
+ with project_generator(self.manager, name="admin",
+ manager_user='admin'):
+ accesskey = 'admin:admin'
+ expected_result = (self.manager.get_user('admin'),
+ self.manager.get_project('admin'))
+ # captured sig and query string using boto 1.9b/euca2ools 1.2
+ sig = 'd67Wzd9Bwz8xid9QU+lzWXcF2Y3tRicYABPJgrqfrwM='
+ auth_params = {'AWSAccessKeyId': 'admin:admin',
+ 'Action': 'DescribeAvailabilityZones',
+ 'SignatureMethod': 'HmacSHA256',
+ 'SignatureVersion': '2',
+ 'Timestamp': '2011-04-22T11:29:29',
+ 'Version': '2009-11-30'}
+ self.assertTrue(expected_result, self.manager.authenticate(
+ accesskey,
+ sig,
+ auth_params,
+ 'GET',
+ '127.0.0.1:8773',
+ '/services/Cloud/'))
+ # captured sig and query string using RightAWS 1.10.0
+ sig = 'ECYLU6xdFG0ZqRVhQybPJQNJ5W4B9n8fGs6+/fuGD2c='
+ auth_params = {'AWSAccessKeyId': 'admin:admin',
+ 'Action': 'DescribeAvailabilityZones',
+ 'SignatureMethod': 'HmacSHA256',
+ 'SignatureVersion': '2',
+ 'Timestamp': '2011-04-22T11:29:49.000Z',
+ 'Version': '2008-12-01'}
+ self.assertTrue(expected_result, self.manager.authenticate(
+ accesskey,
+ sig,
+ auth_params,
+ 'GET',
+ '127.0.0.1',
+ '/services/Cloud'))
def test_005_can_get_credentials(self):
return
diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py
index c45bdd12c..c8559615a 100644
--- a/nova/tests/test_cloud.py
+++ b/nova/tests/test_cloud.py
@@ -279,6 +279,26 @@ class CloudTestCase(test.TestCase):
user_group=['all'])
self.assertEqual(True, result['is_public'])
+ def test_deregister_image(self):
+ deregister_image = self.cloud.deregister_image
+
+ def fake_delete(self, context, id):
+ return None
+
+ self.stubs.Set(local.LocalImageService, 'delete', fake_delete)
+ # valid image
+ result = deregister_image(self.context, 'ami-00000001')
+ self.assertEqual(result['imageId'], 'ami-00000001')
+ # invalid image
+ self.stubs.UnsetAll()
+
+ def fake_detail_empty(self, context):
+ return []
+
+ self.stubs.Set(local.LocalImageService, 'detail', fake_detail_empty)
+ self.assertRaises(exception.ImageNotFound, deregister_image,
+ self.context, 'ami-bad001')
+
def test_console_output(self):
instance_type = FLAGS.default_instance_type
max_count = 1
@@ -290,7 +310,7 @@ class CloudTestCase(test.TestCase):
instance_id = rv['instancesSet'][0]['instanceId']
output = self.cloud.get_console_output(context=self.context,
instance_id=[instance_id])
- self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE OUTPUT')
+ self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE?OUTPUT')
# TODO(soren): We need this until we can stop polling in the rpc code
# for unit tests.
greenthread.sleep(0.3)
@@ -338,41 +358,6 @@ class CloudTestCase(test.TestCase):
self._create_key('test')
self.cloud.delete_key_pair(self.context, 'test')
- def test_run_instances(self):
- if FLAGS.connection_type == 'fake':
- LOG.debug(_("Can't test instances without a real virtual env."))
- return
- image_id = FLAGS.default_image
- instance_type = FLAGS.default_instance_type
- max_count = 1
- kwargs = {'image_id': image_id,
- 'instance_type': instance_type,
- 'max_count': max_count}
- rv = self.cloud.run_instances(self.context, **kwargs)
- # TODO: check for proper response
- instance_id = rv['reservationSet'][0].keys()[0]
- instance = rv['reservationSet'][0][instance_id][0]
- LOG.debug(_("Need to watch instance %s until it's running..."),
- instance['instance_id'])
- while True:
- greenthread.sleep(1)
- info = self.cloud._get_instance(instance['instance_id'])
- LOG.debug(info['state'])
- if info['state'] == power_state.RUNNING:
- break
- self.assert_(rv)
-
- if FLAGS.connection_type != 'fake':
- time.sleep(45) # Should use boto for polling here
- for reservations in rv['reservationSet']:
- # for res_id in reservations.keys():
- # LOG.debug(reservations[res_id])
- # for instance in reservations[res_id]:
- for instance in reservations[reservations.keys()[0]]:
- instance_id = instance['instance_id']
- LOG.debug(_("Terminating instance %s"), instance_id)
- rv = self.compute.terminate_instance(instance_id)
-
def test_terminate_instances(self):
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
'image_id': 1,
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index 393110791..9170837b6 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -21,6 +21,7 @@ Tests For Compute
import datetime
import mox
+import stubout
from nova import compute
from nova import context
@@ -52,6 +53,10 @@ class FakeTime(object):
self.counter += t
+def nop_report_driver_status(self):
+ pass
+
+
class ComputeTestCase(test.TestCase):
"""Test case for compute"""
def setUp(self):
@@ -329,6 +334,28 @@ class ComputeTestCase(test.TestCase):
self.compute.terminate_instance(self.context, instance_id)
+ def test_finish_resize(self):
+ """Contrived test to ensure finish_resize doesn't raise anything"""
+
+ def fake(*args, **kwargs):
+ pass
+
+ self.stubs.Set(self.compute.driver, 'finish_resize', fake)
+ context = self.context.elevated()
+ instance_id = self._create_instance()
+ self.compute.prep_resize(context, instance_id, 1)
+ migration_ref = db.migration_get_by_instance_and_status(context,
+ instance_id, 'pre-migrating')
+ try:
+ self.compute.finish_resize(context, instance_id,
+ int(migration_ref['id']), {})
+ except KeyError, e:
+ # Only catch key errors. We want other reasons for the test to
+ # fail to actually error out so we don't obscure anything
+ self.fail()
+
+ self.compute.terminate_instance(self.context, instance_id)
+
def test_resize_instance(self):
"""Ensure instance can be migrated/resized"""
instance_id = self._create_instance()
@@ -649,6 +676,10 @@ class ComputeTestCase(test.TestCase):
def test_run_kill_vm(self):
"""Detect when a vm is terminated behind the scenes"""
+ self.stubs = stubout.StubOutForTesting()
+ self.stubs.Set(compute_manager.ComputeManager,
+ '_report_driver_status', nop_report_driver_status)
+
instance_id = self._create_instance()
self.compute.run_instance(self.context, instance_id)
diff --git a/nova/tests/test_exception.py b/nova/tests/test_exception.py
new file mode 100644
index 000000000..4d3b9cc73
--- /dev/null
+++ b/nova/tests/test_exception.py
@@ -0,0 +1,34 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import test
+from nova import exception
+
+
+class ApiErrorTestCase(test.TestCase):
+ def test_return_valid_error(self):
+ # without 'code' arg
+ err = exception.ApiError('fake error')
+ self.assertEqual(err.__str__(), 'fake error')
+ self.assertEqual(err.code, None)
+ self.assertEqual(err.msg, 'fake error')
+ # with 'code' arg
+ err = exception.ApiError('fake error', 'blah code')
+ self.assertEqual(err.__str__(), 'blah code: fake error')
+ self.assertEqual(err.code, 'blah code')
+ self.assertEqual(err.msg, 'fake error')
diff --git a/nova/tests/test_host_filter.py b/nova/tests/test_host_filter.py
new file mode 100644
index 000000000..c029d41e6
--- /dev/null
+++ b/nova/tests/test_host_filter.py
@@ -0,0 +1,208 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Scheduler Host Filter Drivers.
+"""
+
+import json
+
+from nova import exception
+from nova import flags
+from nova import test
+from nova.scheduler import host_filter
+
+FLAGS = flags.FLAGS
+
+
+class FakeZoneManager:
+ pass
+
+
+class HostFilterTestCase(test.TestCase):
+ """Test case for host filter drivers."""
+
+ def _host_caps(self, multiplier):
+ # Returns host capabilities in the following way:
+ # host1 = memory:free 10 (100max)
+ # disk:available 100 (1000max)
+ # hostN = memory:free 10 + 10N
+ # disk:available 100 + 100N
+ # in other words: hostN has more resources than host0
+ # which means ... don't go above 10 hosts.
+ return {'host_name-description': 'XenServer %s' % multiplier,
+ 'host_hostname': 'xs-%s' % multiplier,
+ 'host_memory_total': 100,
+ 'host_memory_overhead': 10,
+ 'host_memory_free': 10 + multiplier * 10,
+ 'host_memory_free-computed': 10 + multiplier * 10,
+ 'host_other-config': {},
+ 'host_ip_address': '192.168.1.%d' % (100 + multiplier),
+ 'host_cpu_info': {},
+ 'disk_available': 100 + multiplier * 100,
+ 'disk_total': 1000,
+ 'disk_used': 0,
+ 'host_uuid': 'xxx-%d' % multiplier,
+ 'host_name-label': 'xs-%s' % multiplier}
+
+ def setUp(self):
+ self.old_flag = FLAGS.default_host_filter_driver
+ FLAGS.default_host_filter_driver = \
+ 'nova.scheduler.host_filter.AllHostsFilter'
+ self.instance_type = dict(name='tiny',
+ memory_mb=50,
+ vcpus=10,
+ local_gb=500,
+ flavorid=1,
+ swap=500,
+ rxtx_quota=30000,
+ rxtx_cap=200)
+
+ self.zone_manager = FakeZoneManager()
+ states = {}
+ for x in xrange(10):
+ states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)}
+ self.zone_manager.service_states = states
+
+ def tearDown(self):
+ FLAGS.default_host_filter_driver = self.old_flag
+
+ def test_choose_driver(self):
+ # Test default driver ...
+ driver = host_filter.choose_driver()
+ self.assertEquals(driver._full_name(),
+ 'nova.scheduler.host_filter.AllHostsFilter')
+ # Test valid driver ...
+ driver = host_filter.choose_driver(
+ 'nova.scheduler.host_filter.FlavorFilter')
+ self.assertEquals(driver._full_name(),
+ 'nova.scheduler.host_filter.FlavorFilter')
+ # Test invalid driver ...
+ try:
+ host_filter.choose_driver('does not exist')
+ self.fail("Should not find driver")
+ except exception.SchedulerHostFilterDriverNotFound:
+ pass
+
+ def test_all_host_driver(self):
+ driver = host_filter.AllHostsFilter()
+ cooked = driver.instance_type_to_filter(self.instance_type)
+ hosts = driver.filter_hosts(self.zone_manager, cooked)
+ self.assertEquals(10, len(hosts))
+ for host, capabilities in hosts:
+ self.assertTrue(host.startswith('host'))
+
+ def test_flavor_driver(self):
+ driver = host_filter.FlavorFilter()
+ # filter all hosts that can support 50 ram and 500 disk
+ name, cooked = driver.instance_type_to_filter(self.instance_type)
+ self.assertEquals('nova.scheduler.host_filter.FlavorFilter', name)
+ hosts = driver.filter_hosts(self.zone_manager, cooked)
+ self.assertEquals(6, len(hosts))
+ just_hosts = [host for host, caps in hosts]
+ just_hosts.sort()
+ self.assertEquals('host05', just_hosts[0])
+ self.assertEquals('host10', just_hosts[5])
+
+ def test_json_driver(self):
+ driver = host_filter.JsonFilter()
+ # filter all hosts that can support 50 ram and 500 disk
+ name, cooked = driver.instance_type_to_filter(self.instance_type)
+ self.assertEquals('nova.scheduler.host_filter.JsonFilter', name)
+ hosts = driver.filter_hosts(self.zone_manager, cooked)
+ self.assertEquals(6, len(hosts))
+ just_hosts = [host for host, caps in hosts]
+ just_hosts.sort()
+ self.assertEquals('host05', just_hosts[0])
+ self.assertEquals('host10', just_hosts[5])
+
+ # Try some custom queries
+
+ raw = ['or',
+ ['and',
+ ['<', '$compute.host_memory_free', 30],
+ ['<', '$compute.disk_available', 300]
+ ],
+ ['and',
+ ['>', '$compute.host_memory_free', 70],
+ ['>', '$compute.disk_available', 700]
+ ]
+ ]
+ cooked = json.dumps(raw)
+ hosts = driver.filter_hosts(self.zone_manager, cooked)
+
+ self.assertEquals(5, len(hosts))
+ just_hosts = [host for host, caps in hosts]
+ just_hosts.sort()
+ for index, host in zip([1, 2, 8, 9, 10], just_hosts):
+ self.assertEquals('host%02d' % index, host)
+
+ raw = ['not',
+ ['=', '$compute.host_memory_free', 30],
+ ]
+ cooked = json.dumps(raw)
+ hosts = driver.filter_hosts(self.zone_manager, cooked)
+
+ self.assertEquals(9, len(hosts))
+ just_hosts = [host for host, caps in hosts]
+ just_hosts.sort()
+ for index, host in zip([1, 2, 4, 5, 6, 7, 8, 9, 10], just_hosts):
+ self.assertEquals('host%02d' % index, host)
+
+ raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100]
+ cooked = json.dumps(raw)
+ hosts = driver.filter_hosts(self.zone_manager, cooked)
+
+ self.assertEquals(5, len(hosts))
+ just_hosts = [host for host, caps in hosts]
+ just_hosts.sort()
+ for index, host in zip([2, 4, 6, 8, 10], just_hosts):
+ self.assertEquals('host%02d' % index, host)
+
+ # Try some bogus input ...
+ raw = ['unknown command', ]
+ cooked = json.dumps(raw)
+ try:
+ driver.filter_hosts(self.zone_manager, cooked)
+ self.fail("Should give KeyError")
+ except KeyError, e:
+ pass
+
+ self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps([])))
+ self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps({})))
+ self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps(
+ ['not', True, False, True, False]
+ )))
+
+ try:
+ driver.filter_hosts(self.zone_manager, json.dumps(
+ 'not', True, False, True, False
+ ))
+ self.fail("Should give KeyError")
+ except KeyError, e:
+ pass
+
+ self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
+ ['=', '$foo', 100]
+ )))
+ self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
+ ['=', '$.....', 100]
+ )))
+ self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
+ ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]
+ )))
+
+ self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
+ ['=', {}, ['>', '$missing....foo']]
+ )))
diff --git a/nova/tests/test_instance_types.py b/nova/tests/test_instance_types.py
index 5d6d5e1f4..ef271518c 100644
--- a/nova/tests/test_instance_types.py
+++ b/nova/tests/test_instance_types.py
@@ -75,16 +75,25 @@ class InstanceTypeTestCase(test.TestCase):
def test_invalid_create_args_should_fail(self):
"""Ensures that instance type creation fails with invalid args"""
self.assertRaises(
- exception.InvalidInputException,
+ exception.InvalidInput,
instance_types.create, self.name, 0, 1, 120, self.flavorid)
self.assertRaises(
- exception.InvalidInputException,
+ exception.InvalidInput,
instance_types.create, self.name, 256, -1, 120, self.flavorid)
self.assertRaises(
- exception.InvalidInputException,
+ exception.InvalidInput,
instance_types.create, self.name, 256, 1, "aa", self.flavorid)
def test_non_existant_inst_type_shouldnt_delete(self):
"""Ensures that instance type creation fails with invalid args"""
self.assertRaises(exception.ApiError,
instance_types.destroy, "sfsfsdfdfs")
+
+ def test_repeated_inst_types_should_raise_api_error(self):
+ """Ensures that instance duplicates raises ApiError"""
+ new_name = self.name + "dup"
+ instance_types.create(new_name, 256, 1, 120, self.flavorid + 1)
+ instance_types.destroy(new_name)
+ self.assertRaises(
+ exception.ApiError,
+ instance_types.create, new_name, 256, 1, 120, self.flavorid)
diff --git a/nova/tests/test_ipv6.py b/nova/tests/test_ipv6.py
new file mode 100644
index 000000000..11dc2ec98
--- /dev/null
+++ b/nova/tests/test_ipv6.py
@@ -0,0 +1,60 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 OpenStack LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Test suite for IPv6."""
+
+from nova import flags
+from nova import ipv6
+from nova import log as logging
+from nova import test
+
+LOG = logging.getLogger('nova.tests.test_ipv6')
+
+FLAGS = flags.FLAGS
+
+import sys
+
+
+class IPv6RFC2462TestCase(test.TestCase):
+ """Unit tests for IPv6 rfc2462 backend operations."""
+ def setUp(self):
+ super(IPv6RFC2462TestCase, self).setUp()
+ self.flags(ipv6_backend='rfc2462')
+ ipv6.reset_backend()
+
+ def test_to_global(self):
+ addr = ipv6.to_global('2001:db8::', '02:16:3e:33:44:55', 'test')
+ self.assertEquals(addr, '2001:db8::16:3eff:fe33:4455')
+
+ def test_to_mac(self):
+ mac = ipv6.to_mac('2001:db8::216:3eff:fe33:4455')
+ self.assertEquals(mac, '00:16:3e:33:44:55')
+
+
+class IPv6AccountIdentiferTestCase(test.TestCase):
+ """Unit tests for IPv6 account_identifier backend operations."""
+ def setUp(self):
+ super(IPv6AccountIdentiferTestCase, self).setUp()
+ self.flags(ipv6_backend='account_identifier')
+ ipv6.reset_backend()
+
+ def test_to_global(self):
+ addr = ipv6.to_global('2001:db8::', '02:16:3e:33:44:55', 'test')
+ self.assertEquals(addr, '2001:db8::a94a:8fe5:ff33:4455')
+
+ def test_to_mac(self):
+ mac = ipv6.to_mac('2001:db8::a94a:8fe5:ff33:4455')
+ self.assertEquals(mac, '02:16:3e:33:44:55')
diff --git a/nova/tests/test_misc.py b/nova/tests/test_misc.py
index 4e17e1ce0..cf8f4c05e 100644
--- a/nova/tests/test_misc.py
+++ b/nova/tests/test_misc.py
@@ -29,11 +29,12 @@ from nova.utils import parse_mailmap, str_dict_replace
class ProjectTestCase(test.TestCase):
def test_authors_up_to_date(self):
topdir = os.path.normpath(os.path.dirname(__file__) + '/../../')
- if os.path.exists(os.path.join(topdir, '.bzr')):
- contributors = set()
-
- mailmap = parse_mailmap(os.path.join(topdir, '.mailmap'))
+ missing = set()
+ contributors = set()
+ mailmap = parse_mailmap(os.path.join(topdir, '.mailmap'))
+ authors_file = open(os.path.join(topdir, 'Authors'), 'r').read()
+ if os.path.exists(os.path.join(topdir, '.bzr')):
import bzrlib.workingtree
tree = bzrlib.workingtree.WorkingTree.open(topdir)
tree.lock_read()
@@ -47,22 +48,36 @@ class ProjectTestCase(test.TestCase):
for r in revs:
for author in r.get_apparent_authors():
email = author.split(' ')[-1]
- contributors.add(str_dict_replace(email, mailmap))
+ contributors.add(str_dict_replace(email,
+ mailmap))
+ finally:
+ tree.unlock()
- authors_file = open(os.path.join(topdir, 'Authors'),
- 'r').read()
+ elif os.path.exists(os.path.join(topdir, '.git')):
+ import git
+ repo = git.Repo(topdir)
+ for commit in repo.head.commit.iter_parents():
+ email = commit.author.email
+ if email is None:
+ email = commit.author.name
+ if 'nova-core' in email:
+ continue
+ if email.split(' ')[-1] == '<>':
+ email = email.split(' ')[-2]
+ email = '<' + email + '>'
+ contributors.add(str_dict_replace(email, mailmap))
- missing = set()
- for contributor in contributors:
- if contributor == 'nova-core':
- continue
- if not contributor in authors_file:
- missing.add(contributor)
+ else:
+ return
- self.assertTrue(len(missing) == 0,
- '%r not listed in Authors' % missing)
- finally:
- tree.unlock()
+ for contributor in contributors:
+ if contributor == 'nova-core':
+ continue
+ if not contributor in authors_file:
+ missing.add(contributor)
+
+ self.assertTrue(len(missing) == 0,
+ '%r not listed in Authors' % missing)
class LockTestCase(test.TestCase):
diff --git a/nova/tests/test_notifier.py b/nova/tests/test_notifier.py
new file mode 100644
index 000000000..b6b0fcc68
--- /dev/null
+++ b/nova/tests/test_notifier.py
@@ -0,0 +1,117 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import nova
+
+from nova import context
+from nova import flags
+from nova import rpc
+import nova.notifier.api
+from nova.notifier.api import notify
+from nova.notifier import no_op_notifier
+from nova.notifier import rabbit_notifier
+from nova import test
+
+import stubout
+
+
+class NotifierTestCase(test.TestCase):
+ """Test case for notifications"""
+ def setUp(self):
+ super(NotifierTestCase, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(NotifierTestCase, self).tearDown()
+
+ def test_send_notification(self):
+ self.notify_called = False
+
+ def mock_notify(cls, *args):
+ self.notify_called = True
+
+ self.stubs.Set(nova.notifier.no_op_notifier, 'notify',
+ mock_notify)
+
+ class Mock(object):
+ pass
+ notify('publisher_id', 'event_type',
+ nova.notifier.api.WARN, dict(a=3))
+ self.assertEqual(self.notify_called, True)
+
+ def test_verify_message_format(self):
+ """A test to ensure changing the message format is prohibitively
+ annoying"""
+
+ def message_assert(message):
+ fields = [('publisher_id', 'publisher_id'),
+ ('event_type', 'event_type'),
+ ('priority', 'WARN'),
+ ('payload', dict(a=3))]
+ for k, v in fields:
+ self.assertEqual(message[k], v)
+ self.assertTrue(len(message['message_id']) > 0)
+ self.assertTrue(len(message['timestamp']) > 0)
+
+ self.stubs.Set(nova.notifier.no_op_notifier, 'notify',
+ message_assert)
+ notify('publisher_id', 'event_type',
+ nova.notifier.api.WARN, dict(a=3))
+
+ def test_send_rabbit_notification(self):
+ self.stubs.Set(nova.flags.FLAGS, 'notification_driver',
+ 'nova.notifier.rabbit_notifier')
+ self.mock_cast = False
+
+ def mock_cast(cls, *args):
+ self.mock_cast = True
+
+ class Mock(object):
+ pass
+
+ self.stubs.Set(nova.rpc, 'cast', mock_cast)
+ notify('publisher_id', 'event_type',
+ nova.notifier.api.WARN, dict(a=3))
+
+ self.assertEqual(self.mock_cast, True)
+
+ def test_invalid_priority(self):
+ def mock_cast(cls, *args):
+ pass
+
+ class Mock(object):
+ pass
+
+ self.stubs.Set(nova.rpc, 'cast', mock_cast)
+ self.assertRaises(nova.notifier.api.BadPriorityException,
+ notify, 'publisher_id',
+ 'event_type', 'not a priority', dict(a=3))
+
+ def test_rabbit_priority_queue(self):
+ self.stubs.Set(nova.flags.FLAGS, 'notification_driver',
+ 'nova.notifier.rabbit_notifier')
+ self.stubs.Set(nova.flags.FLAGS, 'notification_topic',
+ 'testnotify')
+
+ self.test_topic = None
+
+ def mock_cast(context, topic, msg):
+ self.test_topic = topic
+
+ self.stubs.Set(nova.rpc, 'cast', mock_cast)
+ notify('publisher_id',
+ 'event_type', 'DEBUG', dict(a=3))
+ self.assertEqual(self.test_topic, 'testnotify.debug')
diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py
index 86f75c880..b03f74111 100644
--- a/nova/tests/test_quota.py
+++ b/nova/tests/test_quota.py
@@ -95,12 +95,11 @@ class QuotaTestCase(test.TestCase):
num_instances = quota.allowed_instances(self.context, 100,
self._get_instance_type('m1.small'))
self.assertEqual(num_instances, 2)
- db.quota_create(self.context, {'project_id': self.project.id,
- 'instances': 10})
+ db.quota_create(self.context, self.project.id, 'instances', 10)
num_instances = quota.allowed_instances(self.context, 100,
self._get_instance_type('m1.small'))
self.assertEqual(num_instances, 4)
- db.quota_update(self.context, self.project.id, {'cores': 100})
+ db.quota_create(self.context, self.project.id, 'cores', 100)
num_instances = quota.allowed_instances(self.context, 100,
self._get_instance_type('m1.small'))
self.assertEqual(num_instances, 10)
@@ -110,13 +109,85 @@ class QuotaTestCase(test.TestCase):
num_metadata_items = quota.allowed_metadata_items(self.context,
too_many_items)
self.assertEqual(num_metadata_items, FLAGS.quota_metadata_items)
- db.quota_update(self.context, self.project.id, {'metadata_items': 5})
+ db.quota_create(self.context, self.project.id, 'metadata_items', 5)
num_metadata_items = quota.allowed_metadata_items(self.context,
too_many_items)
self.assertEqual(num_metadata_items, 5)
# Cleanup
- db.quota_destroy(self.context, self.project.id)
+ db.quota_destroy_all_by_project(self.context, self.project.id)
+
+ def test_unlimited_instances(self):
+ FLAGS.quota_instances = 2
+ FLAGS.quota_cores = 1000
+ instance_type = self._get_instance_type('m1.small')
+ num_instances = quota.allowed_instances(self.context, 100,
+ instance_type)
+ self.assertEqual(num_instances, 2)
+ db.quota_create(self.context, self.project.id, 'instances', None)
+ num_instances = quota.allowed_instances(self.context, 100,
+ instance_type)
+ self.assertEqual(num_instances, 100)
+ num_instances = quota.allowed_instances(self.context, 101,
+ instance_type)
+ self.assertEqual(num_instances, 101)
+
+ def test_unlimited_cores(self):
+ FLAGS.quota_instances = 1000
+ FLAGS.quota_cores = 2
+ instance_type = self._get_instance_type('m1.small')
+ num_instances = quota.allowed_instances(self.context, 100,
+ instance_type)
+ self.assertEqual(num_instances, 2)
+ db.quota_create(self.context, self.project.id, 'cores', None)
+ num_instances = quota.allowed_instances(self.context, 100,
+ instance_type)
+ self.assertEqual(num_instances, 100)
+ num_instances = quota.allowed_instances(self.context, 101,
+ instance_type)
+ self.assertEqual(num_instances, 101)
+
+ def test_unlimited_volumes(self):
+ FLAGS.quota_volumes = 10
+ FLAGS.quota_gigabytes = 1000
+ volumes = quota.allowed_volumes(self.context, 100, 1)
+ self.assertEqual(volumes, 10)
+ db.quota_create(self.context, self.project.id, 'volumes', None)
+ volumes = quota.allowed_volumes(self.context, 100, 1)
+ self.assertEqual(volumes, 100)
+ volumes = quota.allowed_volumes(self.context, 101, 1)
+ self.assertEqual(volumes, 101)
+
+ def test_unlimited_gigabytes(self):
+ FLAGS.quota_volumes = 1000
+ FLAGS.quota_gigabytes = 10
+ volumes = quota.allowed_volumes(self.context, 100, 1)
+ self.assertEqual(volumes, 10)
+ db.quota_create(self.context, self.project.id, 'gigabytes', None)
+ volumes = quota.allowed_volumes(self.context, 100, 1)
+ self.assertEqual(volumes, 100)
+ volumes = quota.allowed_volumes(self.context, 101, 1)
+ self.assertEqual(volumes, 101)
+
+ def test_unlimited_floating_ips(self):
+ FLAGS.quota_floating_ips = 10
+ floating_ips = quota.allowed_floating_ips(self.context, 100)
+ self.assertEqual(floating_ips, 10)
+ db.quota_create(self.context, self.project.id, 'floating_ips', None)
+ floating_ips = quota.allowed_floating_ips(self.context, 100)
+ self.assertEqual(floating_ips, 100)
+ floating_ips = quota.allowed_floating_ips(self.context, 101)
+ self.assertEqual(floating_ips, 101)
+
+ def test_unlimited_metadata_items(self):
+ FLAGS.quota_metadata_items = 10
+ items = quota.allowed_metadata_items(self.context, 100)
+ self.assertEqual(items, 10)
+ db.quota_create(self.context, self.project.id, 'metadata_items', None)
+ items = quota.allowed_metadata_items(self.context, 100)
+ self.assertEqual(items, 100)
+ items = quota.allowed_metadata_items(self.context, 101)
+ self.assertEqual(items, 101)
def test_too_many_instances(self):
instance_ids = []
diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py
index 51d987288..968ef9d6c 100644
--- a/nova/tests/test_scheduler.py
+++ b/nova/tests/test_scheduler.py
@@ -120,12 +120,11 @@ class SchedulerTestCase(test.TestCase):
dest = 'dummydest'
ctxt = context.get_admin_context()
- try:
- scheduler.show_host_resources(ctxt, dest)
- except exception.NotFound, e:
- c1 = (e.message.find(_("does not exist or is not a "
- "compute node.")) >= 0)
- self.assertTrue(c1)
+ self.assertRaises(exception.NotFound, scheduler.show_host_resources,
+ ctxt, dest)
+ #TODO(bcwaldon): reimplement this functionality
+ #c1 = (e.message.find(_("does not exist or is not a "
+ # "compute node.")) >= 0)
def _dic_is_equal(self, dic1, dic2, keys=None):
"""Compares 2 dictionary contents(Helper method)"""
@@ -698,14 +697,10 @@ class SimpleDriverTestCase(test.TestCase):
'topic': 'volume', 'report_count': 0}
s_ref = db.service_create(self.context, dic)
- try:
- self.scheduler.driver.schedule_live_migration(self.context,
- instance_id,
- i_ref['host'])
- except exception.Invalid, e:
- c = (e.message.find('volume node is not alive') >= 0)
+ self.assertRaises(exception.VolumeServiceUnavailable,
+ self.scheduler.driver.schedule_live_migration,
+ self.context, instance_id, i_ref['host'])
- self.assertTrue(c)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
db.volume_destroy(self.context, v_ref['id'])
@@ -718,13 +713,10 @@ class SimpleDriverTestCase(test.TestCase):
s_ref = self._create_compute_service(created_at=t, updated_at=t,
host=i_ref['host'])
- try:
- self.scheduler.driver._live_migration_src_check(self.context,
- i_ref)
- except exception.Invalid, e:
- c = (e.message.find('is not alive') >= 0)
+ self.assertRaises(exception.ComputeServiceUnavailable,
+ self.scheduler.driver._live_migration_src_check,
+ self.context, i_ref)
- self.assertTrue(c)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
@@ -749,14 +741,10 @@ class SimpleDriverTestCase(test.TestCase):
s_ref = self._create_compute_service(created_at=t, updated_at=t,
host=i_ref['host'])
- try:
- self.scheduler.driver._live_migration_dest_check(self.context,
- i_ref,
- i_ref['host'])
- except exception.Invalid, e:
- c = (e.message.find('is not alive') >= 0)
+ self.assertRaises(exception.ComputeServiceUnavailable,
+ self.scheduler.driver._live_migration_dest_check,
+ self.context, i_ref, i_ref['host'])
- self.assertTrue(c)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
@@ -766,14 +754,10 @@ class SimpleDriverTestCase(test.TestCase):
i_ref = db.instance_get(self.context, instance_id)
s_ref = self._create_compute_service(host=i_ref['host'])
- try:
- self.scheduler.driver._live_migration_dest_check(self.context,
- i_ref,
- i_ref['host'])
- except exception.Invalid, e:
- c = (e.message.find('choose other host') >= 0)
+ self.assertRaises(exception.UnableToMigrateToSelf,
+ self.scheduler.driver._live_migration_dest_check,
+ self.context, i_ref, i_ref['host'])
- self.assertTrue(c)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
@@ -784,14 +768,10 @@ class SimpleDriverTestCase(test.TestCase):
s_ref = self._create_compute_service(host='somewhere',
memory_mb_used=12)
- try:
- self.scheduler.driver._live_migration_dest_check(self.context,
- i_ref,
- 'somewhere')
- except exception.NotEmpty, e:
- c = (e.message.find('Unable to migrate') >= 0)
+ self.assertRaises(exception.MigrationError,
+ self.scheduler.driver._live_migration_dest_check,
+ self.context, i_ref, 'somewhere')
- self.assertTrue(c)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
@@ -837,14 +817,10 @@ class SimpleDriverTestCase(test.TestCase):
"args": {'filename': fpath}})
self.mox.ReplayAll()
- try:
- self.scheduler.driver._live_migration_common_check(self.context,
- i_ref,
- dest)
- except exception.Invalid, e:
- c = (e.message.find('does not exist') >= 0)
+ self.assertRaises(exception.SourceHostUnavailable,
+ self.scheduler.driver._live_migration_common_check,
+ self.context, i_ref, dest)
- self.assertTrue(c)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
@@ -865,14 +841,10 @@ class SimpleDriverTestCase(test.TestCase):
driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest)
self.mox.ReplayAll()
- try:
- self.scheduler.driver._live_migration_common_check(self.context,
- i_ref,
- dest)
- except exception.Invalid, e:
- c = (e.message.find(_('Different hypervisor type')) >= 0)
+ self.assertRaises(exception.InvalidHypervisorType,
+ self.scheduler.driver._live_migration_common_check,
+ self.context, i_ref, dest)
- self.assertTrue(c)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
db.service_destroy(self.context, s_ref2['id'])
@@ -895,14 +867,10 @@ class SimpleDriverTestCase(test.TestCase):
driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest)
self.mox.ReplayAll()
- try:
- self.scheduler.driver._live_migration_common_check(self.context,
- i_ref,
- dest)
- except exception.Invalid, e:
- c = (e.message.find(_('Older hypervisor version')) >= 0)
+ self.assertRaises(exception.DestinationHypervisorTooOld,
+ self.scheduler.driver._live_migration_common_check,
+ self.context, i_ref, dest)
- self.assertTrue(c)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
db.service_destroy(self.context, s_ref2['id'])
@@ -968,7 +936,7 @@ class FakeRerouteCompute(api.reroute_compute):
def go_boom(self, context, instance):
- raise exception.InstanceNotFound("boom message", instance)
+ raise exception.InstanceNotFound(instance_id=instance)
def found_instance(self, context, instance):
@@ -1017,11 +985,8 @@ class ZoneRedirectTest(test.TestCase):
def test_routing_flags(self):
FLAGS.enable_zone_routing = False
decorator = FakeRerouteCompute("foo")
- try:
- result = decorator(go_boom)(None, None, 1)
- self.assertFail(_("Should have thrown exception."))
- except exception.InstanceNotFound, e:
- self.assertEquals(e.message, 'boom message')
+ self.assertRaises(exception.InstanceNotFound, decorator(go_boom),
+ None, None, 1)
def test_get_collection_context_and_id(self):
decorator = api.reroute_compute("foo")
diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py
index e08d229b0..8f7e83c3e 100644
--- a/nova/tests/test_utils.py
+++ b/nova/tests/test_utils.py
@@ -17,9 +17,9 @@
import os
import tempfile
+from nova import exception
from nova import test
from nova import utils
-from nova import exception
class ExecuteTestCase(test.TestCase):
@@ -250,3 +250,28 @@ class GetFromPathTestCase(test.TestCase):
input = {'a': [1, 2, {'b': 'b_1'}]}
self.assertEquals([1, 2, {'b': 'b_1'}], f(input, "a"))
self.assertEquals(['b_1'], f(input, "a/b"))
+
+
+class GenericUtilsTestCase(test.TestCase):
+ def test_parse_server_string(self):
+ result = utils.parse_server_string('::1')
+ self.assertEqual(('::1', ''), result)
+ result = utils.parse_server_string('[::1]:8773')
+ self.assertEqual(('::1', '8773'), result)
+ result = utils.parse_server_string('2001:db8::192.168.1.1')
+ self.assertEqual(('2001:db8::192.168.1.1', ''), result)
+ result = utils.parse_server_string('[2001:db8::192.168.1.1]:8773')
+ self.assertEqual(('2001:db8::192.168.1.1', '8773'), result)
+ result = utils.parse_server_string('192.168.1.1')
+ self.assertEqual(('192.168.1.1', ''), result)
+ result = utils.parse_server_string('192.168.1.2:8773')
+ self.assertEqual(('192.168.1.2', '8773'), result)
+ result = utils.parse_server_string('192.168.1.3')
+ self.assertEqual(('192.168.1.3', ''), result)
+ result = utils.parse_server_string('www.example.com:8443')
+ self.assertEqual(('www.example.com', '8443'), result)
+ result = utils.parse_server_string('www.example.com')
+ self.assertEqual(('www.example.com', ''), result)
+ # error case
+ result = utils.parse_server_string('www.exa:mple.com:8443')
+ self.assertEqual(('', ''), result)
diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py
index 0a0c7a958..1bec9caca 100644
--- a/nova/tests/test_virt.py
+++ b/nova/tests/test_virt.py
@@ -31,9 +31,7 @@ from nova import test
from nova import utils
from nova.api.ec2 import cloud
from nova.auth import manager
-from nova.compute import manager as compute_manager
from nova.compute import power_state
-from nova.db.sqlalchemy import models
from nova.virt import libvirt_conn
libvirt = None
@@ -46,6 +44,27 @@ def _concurrency(wait, done, target):
done.send()
+def _create_network_info(count=1, ipv6=None):
+ if ipv6 is None:
+ ipv6 = FLAGS.use_ipv6
+ fake = 'fake'
+ fake_ip = '0.0.0.0/0'
+ fake_ip_2 = '0.0.0.1/0'
+ fake_ip_3 = '0.0.0.1/0'
+ network = {'gateway': fake,
+ 'gateway_v6': fake,
+ 'bridge': fake,
+ 'cidr': fake_ip,
+ 'cidr_v6': fake_ip}
+ mapping = {'mac': fake,
+ 'ips': [{'ip': fake_ip}, {'ip': fake_ip}]}
+ if ipv6:
+ mapping['ip6s'] = [{'ip': fake_ip},
+ {'ip': fake_ip_2},
+ {'ip': fake_ip_3}]
+ return [(network, mapping) for x in xrange(0, count)]
+
+
class CacheConcurrencyTestCase(test.TestCase):
def setUp(self):
super(CacheConcurrencyTestCase, self).setUp()
@@ -194,6 +213,37 @@ class LibvirtConnTestCase(test.TestCase):
return db.service_create(context.get_admin_context(), service_ref)
+ def test_preparing_xml_info(self):
+ conn = libvirt_conn.LibvirtConnection(True)
+ instance_ref = db.instance_create(self.context, self.test_instance)
+
+ result = conn._prepare_xml_info(instance_ref, False)
+ self.assertFalse(result['nics'])
+
+ result = conn._prepare_xml_info(instance_ref, False,
+ _create_network_info())
+ self.assertTrue(len(result['nics']) == 1)
+
+ result = conn._prepare_xml_info(instance_ref, False,
+ _create_network_info(2))
+ self.assertTrue(len(result['nics']) == 2)
+
+ def test_get_nic_for_xml_v4(self):
+ conn = libvirt_conn.LibvirtConnection(True)
+ network, mapping = _create_network_info()[0]
+ self.flags(use_ipv6=False)
+ params = conn._get_nic_for_xml(network, mapping)['extra_params']
+ self.assertTrue(params.find('PROJNETV6') == -1)
+ self.assertTrue(params.find('PROJMASKV6') == -1)
+
+ def test_get_nic_for_xml_v6(self):
+ conn = libvirt_conn.LibvirtConnection(True)
+ network, mapping = _create_network_info()[0]
+ self.flags(use_ipv6=True)
+ params = conn._get_nic_for_xml(network, mapping)['extra_params']
+ self.assertTrue(params.find('PROJNETV6') > -1)
+ self.assertTrue(params.find('PROJMASKV6') > -1)
+
def test_xml_and_uri_no_ramdisk_no_kernel(self):
instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data,
@@ -229,6 +279,22 @@ class LibvirtConnTestCase(test.TestCase):
instance_data = dict(self.test_instance)
self._check_xml_and_container(instance_data)
+ def test_multi_nic(self):
+ instance_data = dict(self.test_instance)
+ network_info = _create_network_info(2)
+ conn = libvirt_conn.LibvirtConnection(True)
+ instance_ref = db.instance_create(self.context, instance_data)
+ xml = conn.to_xml(instance_ref, False, network_info)
+ tree = xml_to_tree(xml)
+ interfaces = tree.findall("./devices/interface")
+ self.assertEquals(len(interfaces), 2)
+ parameters = interfaces[0].findall('./filterref/parameter')
+ self.assertEquals(interfaces[0].get('type'), 'bridge')
+ self.assertEquals(parameters[0].get('name'), 'IP')
+ self.assertEquals(parameters[0].get('value'), '0.0.0.0/0')
+ self.assertEquals(parameters[1].get('name'), 'DHCPSERVER')
+ self.assertEquals(parameters[1].get('value'), 'fake')
+
def _check_xml_and_container(self, instance):
user_context = context.RequestContext(project=self.project,
user=self.user)
@@ -327,19 +393,13 @@ class LibvirtConnTestCase(test.TestCase):
check = (lambda t: t.find('./os/initrd'), None)
check_list.append(check)
+ parameter = './devices/interface/filterref/parameter'
common_checks = [
(lambda t: t.find('.').tag, 'domain'),
- (lambda t: t.find(
- './devices/interface/filterref/parameter').get('name'), 'IP'),
- (lambda t: t.find(
- './devices/interface/filterref/parameter').get(
- 'value'), '10.11.12.13'),
- (lambda t: t.findall(
- './devices/interface/filterref/parameter')[1].get(
- 'name'), 'DHCPSERVER'),
- (lambda t: t.findall(
- './devices/interface/filterref/parameter')[1].get(
- 'value'), '10.0.0.1'),
+ (lambda t: t.find(parameter).get('name'), 'IP'),
+ (lambda t: t.find(parameter).get('value'), '10.11.12.13'),
+ (lambda t: t.findall(parameter)[1].get('name'), 'DHCPSERVER'),
+ (lambda t: t.findall(parameter)[1].get('value'), '10.0.0.1'),
(lambda t: t.find('./devices/serial/source').get(
'path').split('/')[1], 'console.log'),
(lambda t: t.find('./memory').text, '2097152')]
@@ -451,7 +511,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
conn = libvirt_conn.LibvirtConnection(False)
- self.assertRaises(exception.Invalid,
+ self.assertRaises(exception.ComputeServiceUnavailable,
conn.update_available_resource,
self.context, 'dummy')
@@ -549,6 +609,48 @@ class LibvirtConnTestCase(test.TestCase):
db.volume_destroy(self.context, volume_ref['id'])
db.instance_destroy(self.context, instance_ref['id'])
+ def test_spawn_with_network_info(self):
+ # Skip if non-libvirt environment
+ if not self.lazy_load_library_exists():
+ return
+
+ # Preparing mocks
+ def fake_none(self, instance):
+ return
+
+ self.create_fake_libvirt_mock()
+ instance = db.instance_create(self.context, self.test_instance)
+
+ # Start test
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
+ conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
+
+ network = db.project_get_network(context.get_admin_context(),
+ self.project.id)
+ ip_dict = {'ip': self.test_ip,
+ 'netmask': network['netmask'],
+ 'enabled': '1'}
+ mapping = {'label': network['label'],
+ 'gateway': network['gateway'],
+ 'mac': instance['mac_address'],
+ 'dns': [network['dns']],
+ 'ips': [ip_dict]}
+ network_info = [(network, mapping)]
+
+ try:
+ conn.spawn(instance, network_info)
+ except Exception, e:
+ count = (0 <= str(e.message).find('Unexpected method call'))
+
+ self.assertTrue(count)
+
+ def test_get_host_ip_addr(self):
+ conn = libvirt_conn.LibvirtConnection(False)
+ ip = conn.get_host_ip_addr()
+ self.assertEquals(ip, FLAGS.my_ip)
+
def tearDown(self):
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
@@ -614,12 +716,15 @@ class IptablesFirewallTestCase(test.TestCase):
'# Completed on Tue Jan 18 23:47:56 2011',
]
+ def _create_instance_ref(self):
+ return db.instance_create(self.context,
+ {'user_id': 'fake',
+ 'project_id': 'fake',
+ 'mac_address': '56:12:12:12:12:12',
+ 'instance_type_id': 1})
+
def test_static_filters(self):
- instance_ref = db.instance_create(self.context,
- {'user_id': 'fake',
- 'project_id': 'fake',
- 'mac_address': '56:12:12:12:12:12',
- 'instance_type_id': 1})
+ instance_ref = self._create_instance_ref()
ip = '10.11.12.13'
network_ref = db.project_get_network(self.context,
@@ -730,6 +835,50 @@ class IptablesFirewallTestCase(test.TestCase):
"TCP port 80/81 acceptance rule wasn't added")
db.instance_destroy(admin_ctxt, instance_ref['id'])
+ def test_filters_for_instance_with_ip_v6(self):
+ self.flags(use_ipv6=True)
+ network_info = _create_network_info()
+ rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
+ self.assertEquals(len(rulesv4), 2)
+ self.assertEquals(len(rulesv6), 3)
+
+ def test_filters_for_instance_without_ip_v6(self):
+ self.flags(use_ipv6=False)
+ network_info = _create_network_info()
+ rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
+ self.assertEquals(len(rulesv4), 2)
+ self.assertEquals(len(rulesv6), 0)
+
+ def test_multinic_iptables(self):
+ ipv4_rules_per_network = 2
+ ipv6_rules_per_network = 3
+ networks_count = 5
+ instance_ref = self._create_instance_ref()
+ network_info = _create_network_info(networks_count)
+ ipv4_len = len(self.fw.iptables.ipv4['filter'].rules)
+ ipv6_len = len(self.fw.iptables.ipv6['filter'].rules)
+ inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref,
+ network_info)
+ self.fw.add_filters_for_instance(instance_ref, network_info)
+ ipv4 = self.fw.iptables.ipv4['filter'].rules
+ ipv6 = self.fw.iptables.ipv6['filter'].rules
+ ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
+ ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len
+ self.assertEquals(ipv4_network_rules,
+ ipv4_rules_per_network * networks_count)
+ self.assertEquals(ipv6_network_rules,
+ ipv6_rules_per_network * networks_count)
+
+ def test_do_refresh_security_group_rules(self):
+ instance_ref = self._create_instance_ref()
+ self.mox.StubOutWithMock(self.fw,
+ 'add_filters_for_instance',
+ use_mock_anything=True)
+ self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg())
+ self.fw.instances[instance_ref['id']] = instance_ref
+ self.mox.ReplayAll()
+ self.fw.do_refresh_security_group_rules("fake")
+
class NWFilterTestCase(test.TestCase):
def setUp(self):
@@ -811,6 +960,28 @@ class NWFilterTestCase(test.TestCase):
return db.security_group_get_by_name(self.context, 'fake', 'testgroup')
+ def _create_instance(self):
+ return db.instance_create(self.context,
+ {'user_id': 'fake',
+ 'project_id': 'fake',
+ 'mac_address': '00:A0:C9:14:C8:29',
+ 'instance_type_id': 1})
+
+ def _create_instance_type(self, params={}):
+ """Create a test instance"""
+ context = self.context.elevated()
+ inst = {}
+ inst['name'] = 'm1.small'
+ inst['memory_mb'] = '1024'
+ inst['vcpus'] = '1'
+ inst['local_gb'] = '20'
+ inst['flavorid'] = '1'
+ inst['swap'] = '2048'
+ inst['rxtx_quota'] = 100
+ inst['rxtx_cap'] = 200
+ inst.update(params)
+ return db.instance_type_create(context, inst)['id']
+
def test_creates_base_rule_first(self):
# These come pre-defined by libvirt
self.defined_filters = ['no-mac-spoofing',
@@ -839,25 +1010,18 @@ class NWFilterTestCase(test.TestCase):
self.fake_libvirt_connection.nwfilterDefineXML = _filterDefineXMLMock
- instance_ref = db.instance_create(self.context,
- {'user_id': 'fake',
- 'project_id': 'fake',
- 'mac_address': '00:A0:C9:14:C8:29',
- 'instance_type_id': 1})
+ instance_ref = self._create_instance()
inst_id = instance_ref['id']
ip = '10.11.12.13'
- network_ref = db.project_get_network(self.context,
- 'fake')
-
- fixed_ip = {'address': ip,
- 'network_id': network_ref['id']}
+ network_ref = db.project_get_network(self.context, 'fake')
+ fixed_ip = {'address': ip, 'network_id': network_ref['id']}
admin_ctxt = context.get_admin_context()
db.fixed_ip_create(admin_ctxt, fixed_ip)
db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
- 'instance_id': instance_ref['id']})
+ 'instance_id': inst_id})
def _ensure_all_called():
instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'],
@@ -883,3 +1047,11 @@ class NWFilterTestCase(test.TestCase):
_ensure_all_called()
self.teardown_security_group()
db.instance_destroy(admin_ctxt, instance_ref['id'])
+
+ def test_create_network_filters(self):
+ instance_ref = self._create_instance()
+ network_info = _create_network_info(3)
+ result = self.fw._create_network_filters(instance_ref,
+ network_info,
+ "fake")
+ self.assertEquals(len(result), 3)
diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py
index e9d8289aa..236d12434 100644
--- a/nova/tests/test_volume.py
+++ b/nova/tests/test_volume.py
@@ -142,7 +142,7 @@ class VolumeTestCase(test.TestCase):
self.assertEqual(vol['status'], "available")
self.volume.delete_volume(self.context, volume_id)
- self.assertRaises(exception.Error,
+ self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 375480a2e..be1e35697 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -16,7 +16,9 @@
"""Test suite for XenAPI."""
+import eventlet
import functools
+import json
import os
import re
import stubout
@@ -197,6 +199,28 @@ class XenAPIVMTestCase(test.TestCase):
self.context = context.RequestContext('fake', 'fake', False)
self.conn = xenapi_conn.get_connection(False)
+ def test_parallel_builds(self):
+ stubs.stubout_loopingcall_delay(self.stubs)
+
+ def _do_build(id, proj, user, *args):
+ values = {
+ 'id': id,
+ 'project_id': proj,
+ 'user_id': user,
+ 'image_id': 1,
+ 'kernel_id': 2,
+ 'ramdisk_id': 3,
+ 'instance_type_id': '3', # m1.large
+ 'mac_address': 'aa:bb:cc:dd:ee:ff',
+ 'os_type': 'linux'}
+ instance = db.instance_create(self.context, values)
+ self.conn.spawn(instance)
+
+ gt1 = eventlet.spawn(_do_build, 1, self.project.id, self.user.id)
+ gt2 = eventlet.spawn(_do_build, 2, self.project.id, self.user.id)
+ gt1.wait()
+ gt2.wait()
+
def test_list_instances_0(self):
instances = self.conn.list_instances()
self.assertEquals(instances, [])
@@ -665,3 +689,52 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_VHD
self.fake_instance.kernel_id = None
self.assert_disk_type(vm_utils.ImageType.DISK_VHD)
+
+
+class FakeXenApi(object):
+ """Fake XenApi for testing HostState."""
+
+ class FakeSR(object):
+ def get_record(self, ref):
+ return {'virtual_allocation': 10000,
+ 'physical_utilisation': 20000}
+
+ SR = FakeSR()
+
+
+class FakeSession(object):
+ """Fake Session class for HostState testing."""
+
+ def async_call_plugin(self, *args):
+ return None
+
+ def wait_for_task(self, *args):
+ vm = {'total': 10,
+ 'overhead': 20,
+ 'free': 30,
+ 'free-computed': 40}
+ return json.dumps({'host_memory': vm})
+
+ def get_xenapi(self):
+ return FakeXenApi()
+
+
+class HostStateTestCase(test.TestCase):
+ """Tests HostState, which holds metrics from XenServer that get
+ reported back to the Schedulers."""
+
+ def _fake_safe_find_sr(self, session):
+ """None SR ref since we're ignoring it in FakeSR."""
+ return None
+
+ def test_host_state(self):
+ self.stubs = stubout.StubOutForTesting()
+ self.stubs.Set(vm_utils, 'safe_find_sr', self._fake_safe_find_sr)
+ host_state = xenapi_conn.HostState(FakeSession())
+ stats = host_state._stats
+ self.assertEquals(stats['disk_total'], 10000)
+ self.assertEquals(stats['disk_used'], 20000)
+ self.assertEquals(stats['host_memory_total'], 10)
+ self.assertEquals(stats['host_memory_overhead'], 20)
+ self.assertEquals(stats['host_memory_free'], 30)
+ self.assertEquals(stats['host_memory_free_computed'], 40)
diff --git a/nova/tests/test_zones.py b/nova/tests/test_zones.py
index 688dc704d..e132809dc 100644
--- a/nova/tests/test_zones.py
+++ b/nova/tests/test_zones.py
@@ -78,38 +78,32 @@ class ZoneManagerTestCase(test.TestCase):
def test_service_capabilities(self):
zm = zone_manager.ZoneManager()
- caps = zm.get_zone_capabilities(self, None)
+ caps = zm.get_zone_capabilities(None)
self.assertEquals(caps, {})
zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
- caps = zm.get_zone_capabilities(self, None)
+ caps = zm.get_zone_capabilities(None)
self.assertEquals(caps, dict(svc1_a=(1, 1), svc1_b=(2, 2)))
zm.update_service_capabilities("svc1", "host1", dict(a=2, b=3))
- caps = zm.get_zone_capabilities(self, None)
+ caps = zm.get_zone_capabilities(None)
self.assertEquals(caps, dict(svc1_a=(2, 2), svc1_b=(3, 3)))
zm.update_service_capabilities("svc1", "host2", dict(a=20, b=30))
- caps = zm.get_zone_capabilities(self, None)
+ caps = zm.get_zone_capabilities(None)
self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30)))
zm.update_service_capabilities("svc10", "host1", dict(a=99, b=99))
- caps = zm.get_zone_capabilities(self, None)
+ caps = zm.get_zone_capabilities(None)
self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30),
svc10_a=(99, 99), svc10_b=(99, 99)))
zm.update_service_capabilities("svc1", "host3", dict(c=5))
- caps = zm.get_zone_capabilities(self, None)
+ caps = zm.get_zone_capabilities(None)
self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30),
svc1_c=(5, 5), svc10_a=(99, 99),
svc10_b=(99, 99)))
- caps = zm.get_zone_capabilities(self, 'svc1')
- self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30),
- svc1_c=(5, 5)))
- caps = zm.get_zone_capabilities(self, 'svc10')
- self.assertEquals(caps, dict(svc10_a=(99, 99), svc10_b=(99, 99)))
-
def test_refresh_from_db_replace_existing(self):
zm = zone_manager.ZoneManager()
zone_state = zone_manager.ZoneState()
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index 205f6c902..4833ccb07 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -16,6 +16,7 @@
"""Stubouts, mocks and fixtures for the test suite"""
+import eventlet
from nova.virt import xenapi_conn
from nova.virt.xenapi import fake
from nova.virt.xenapi import volume_utils
@@ -28,29 +29,6 @@ def stubout_instance_snapshot(stubs):
@classmethod
def fake_fetch_image(cls, session, instance_id, image, user, project,
type):
- # Stubout wait_for_task
- def fake_wait_for_task(self, task, id):
- class FakeEvent:
-
- def send(self, value):
- self.rv = value
-
- def wait(self):
- return self.rv
-
- done = FakeEvent()
- self._poll_task(id, task, done)
- rv = done.wait()
- return rv
-
- def fake_loop(self):
- pass
-
- stubs.Set(xenapi_conn.XenAPISession, 'wait_for_task',
- fake_wait_for_task)
-
- stubs.Set(xenapi_conn.XenAPISession, '_stop_loop', fake_loop)
-
from nova.virt.xenapi.fake import create_vdi
name_label = "instance-%s" % instance_id
#TODO: create fake SR record
@@ -63,11 +41,6 @@ def stubout_instance_snapshot(stubs):
stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image)
- def fake_parse_xmlrpc_value(val):
- return val
-
- stubs.Set(xenapi_conn, '_parse_xmlrpc_value', fake_parse_xmlrpc_value)
-
def fake_wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
original_parent_uuid):
from nova.virt.xenapi.fake import create_vdi
@@ -144,6 +117,16 @@ def stubout_loopingcall_start(stubs):
stubs.Set(utils.LoopingCall, 'start', fake_start)
+def stubout_loopingcall_delay(stubs):
+ def fake_start(self, interval, now=True):
+ self._running = True
+ eventlet.sleep(1)
+ self.f(*self.args, **self.kw)
+ # This would fail before parallel xenapi calls were fixed
+ assert self._running == False
+ stubs.Set(utils.LoopingCall, 'start', fake_start)
+
+
class FakeSessionForVMTests(fake.SessionBase):
""" Stubs out a XenAPISession for VM tests """
def __init__(self, uri):
diff --git a/nova/utils.py b/nova/utils.py
index efd0dc348..105d73c7c 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -60,7 +60,7 @@ def import_class(import_str):
return getattr(sys.modules[mod_str], class_str)
except (ImportError, ValueError, AttributeError), exc:
LOG.debug(_('Inner Exception: %s'), exc)
- raise exception.NotFound(_('Class %s cannot be found') % class_str)
+ raise exception.ClassNotFound(class_name=class_str)
def import_object(import_str):
@@ -232,9 +232,12 @@ def default_flagfile(filename='nova.conf'):
# turn relative filename into an absolute path
script_dir = os.path.dirname(inspect.stack()[-1][1])
filename = os.path.abspath(os.path.join(script_dir, filename))
- if os.path.exists(filename):
- flagfile = ['--flagfile=%s' % filename]
- sys.argv = sys.argv[:1] + flagfile + sys.argv[1:]
+ if not os.path.exists(filename):
+ filename = "./nova.conf"
+ if not os.path.exists(filename):
+ filename = '/etc/nova/nova.conf'
+ flagfile = ['--flagfile=%s' % filename]
+ sys.argv = sys.argv[:1] + flagfile + sys.argv[1:]
def debug(arg):
@@ -295,26 +298,6 @@ def get_my_linklocal(interface):
" :%(ex)s") % locals())
-def to_global_ipv6(prefix, mac):
- try:
- mac64 = netaddr.EUI(mac).eui64().words
- int_addr = int(''.join(['%02x' % i for i in mac64]), 16)
- mac64_addr = netaddr.IPAddress(int_addr)
- maskIP = netaddr.IPNetwork(prefix).ip
- return (mac64_addr ^ netaddr.IPAddress('::0200:0:0:0') | maskIP).\
- format()
- except TypeError:
- raise TypeError(_('Bad mac for to_global_ipv6: %s') % mac)
-
-
-def to_mac(ipv6_address):
- address = netaddr.IPAddress(ipv6_address)
- mask1 = netaddr.IPAddress('::ffff:ffff:ffff:ffff')
- mask2 = netaddr.IPAddress('::0200:0:0:0')
- mac64 = netaddr.EUI(int(address & mask1 ^ mask2)).words
- return ':'.join(['%02x' % i for i in mac64[0:3] + mac64[5:8]])
-
-
def utcnow():
"""Overridable version of datetime.datetime.utcnow."""
if utcnow.override_time:
@@ -451,6 +434,8 @@ class LoopingCall(object):
try:
while self._running:
self.f(*self.args, **self.kw)
+ if not self._running:
+ break
greenthread.sleep(interval)
except LoopingCallDone, e:
self.stop()
@@ -701,3 +686,33 @@ def check_isinstance(obj, cls):
raise Exception(_('Expected object of type: %s') % (str(cls)))
# TODO(justinsb): Can we make this better??
return cls() # Ugly PyLint hack
+
+
+def parse_server_string(server_str):
+ """
+ Parses the given server_string and returns a list of host and port.
+ If it's not a combination of host part and port, the port element
+ is a null string. If the input is invalid expression, return a null
+ list.
+ """
+ try:
+ # First of all, exclude pure IPv6 address (w/o port).
+ if netaddr.valid_ipv6(server_str):
+ return (server_str, '')
+
+ # Next, check if this is IPv6 address with a port number combination.
+ if server_str.find("]:") != -1:
+ (address, port) = server_str.replace('[', '', 1).split(']:')
+ return (address, port)
+
+ # Third, check if this is a combination of an address and a port
+ if server_str.find(':') == -1:
+ return (server_str, '')
+
+ # This must be a combination of an address and a port
+ (address, port) = server_str.split(':')
+ return (address, port)
+
+ except:
+ LOG.debug(_('Invalid server_string: %s' % server_str))
+ return ('', '')
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index c3d5230df..5ac376e46 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -288,8 +288,7 @@ class FakeConnection(driver.ComputeDriver):
knowledge of the instance
"""
if instance_name not in self.instances:
- raise exception.NotFound(_("Instance %s Not Found")
- % instance_name)
+ raise exception.InstanceNotFound(instance_id=instance_name)
i = self.instances[instance_name]
return {'state': i.state,
'max_mem': 0,
@@ -368,7 +367,7 @@ class FakeConnection(driver.ComputeDriver):
return [0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L]
def get_console_output(self, instance):
- return 'FAKE CONSOLE OUTPUT'
+ return 'FAKE CONSOLE\xffOUTPUT'
def get_ajax_console(self, instance):
return {'token': 'FAKETOKEN',
diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py
index 10f3b2f4b..85ebdd0b1 100644
--- a/nova/virt/hyperv.py
+++ b/nova/virt/hyperv.py
@@ -143,8 +143,7 @@ class HyperVConnection(driver.ComputeDriver):
""" Create a new VM and start it."""
vm = self._lookup(instance.name)
if vm is not None:
- raise exception.Duplicate(_('Attempt to create duplicate vm %s') %
- instance.name)
+ raise exception.InstanceExists(name=instance.name)
user = manager.AuthManager().get_user(instance['user_id'])
project = manager.AuthManager().get_project(instance['project_id'])
@@ -373,7 +372,7 @@ class HyperVConnection(driver.ComputeDriver):
"""Reboot the specified instance."""
vm = self._lookup(instance.name)
if vm is None:
- raise exception.NotFound('instance not present %s' % instance.name)
+ raise exception.InstanceNotFound(instance_id=instance.id)
self._set_vm_state(instance.name, 'Reboot')
def destroy(self, instance):
@@ -417,7 +416,7 @@ class HyperVConnection(driver.ComputeDriver):
"""Get information about the VM"""
vm = self._lookup(instance_id)
if vm is None:
- raise exception.NotFound('instance not present %s' % instance_id)
+ raise exception.InstanceNotFound(instance_id=instance_id)
vm = self._conn.Msvm_ComputerSystem(ElementName=instance_id)[0]
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
vmsettings = vm.associators(
@@ -479,14 +478,12 @@ class HyperVConnection(driver.ComputeDriver):
def attach_volume(self, instance_name, device_path, mountpoint):
vm = self._lookup(instance_name)
if vm is None:
- raise exception.NotFound('Cannot attach volume to missing %s vm'
- % instance_name)
+ raise exception.InstanceNotFound(instance_id=instance_name)
def detach_volume(self, instance_name, mountpoint):
vm = self._lookup(instance_name)
if vm is None:
- raise exception.NotFound('Cannot detach volume from missing %s '
- % instance_name)
+ raise exception.InstanceNotFound(instance_id=instance_name)
def poll_rescued_instances(self, timeout):
pass
@@ -494,3 +491,11 @@ class HyperVConnection(driver.ComputeDriver):
def update_available_resource(self, ctxt, host):
"""This method is supported only by libvirt."""
return
+
+ def update_host_status(self):
+ """See xenapi_conn.py implementation."""
+ pass
+
+ def get_host_stats(self, refresh=False):
+ """See xenapi_conn.py implementation."""
+ pass
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index 5202c838a..fa918b0a3 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -57,6 +57,7 @@ from nova import context
from nova import db
from nova import exception
from nova import flags
+from nova import ipv6
from nova import log as logging
from nova import utils
from nova import vnc
@@ -184,8 +185,9 @@ def _get_network_info(instance):
def ip6_dict():
prefix = network['cidr_v6']
mac = instance['mac_address']
+ project_id = instance['project_id']
return {
- 'ip': utils.to_global_ipv6(prefix, mac),
+ 'ip': ipv6.to_global(prefix, mac, project_id),
'netmask': network['netmask_v6'],
'enabled': '1'}
@@ -312,19 +314,10 @@ class LibvirtConnection(driver.ComputeDriver):
def destroy(self, instance, cleanup=True):
instance_name = instance['name']
- # TODO(justinsb): Refactor all lookupByName calls for error-handling
try:
- virt_dom = self._conn.lookupByName(instance_name)
- except libvirt.libvirtError as e:
- errcode = e.get_error_code()
- if errcode == libvirt.VIR_ERR_NO_DOMAIN:
- virt_dom = None
- else:
- LOG.warning(_("Error from libvirt during lookup of "
- "%(instance_name)s. Code=%(errcode)s "
- "Error=%(e)s") %
- locals())
- raise
+ virt_dom = self._lookup_by_name(instance_name)
+ except exception.NotFound:
+ virt_dom = None
# If the instance is already terminated, we're still happy
# Otherwise, destroy it
@@ -395,7 +388,7 @@ class LibvirtConnection(driver.ComputeDriver):
@exception.wrap_exception
def attach_volume(self, instance_name, device_path, mountpoint):
- virt_dom = self._conn.lookupByName(instance_name)
+ virt_dom = self._lookup_by_name(instance_name)
mount_device = mountpoint.rpartition("/")[2]
if device_path.startswith('/dev/'):
xml = """<disk type='block'>
@@ -413,7 +406,7 @@ class LibvirtConnection(driver.ComputeDriver):
name,
mount_device)
else:
- raise exception.Invalid(_("Invalid device path %s") % device_path)
+ raise exception.InvalidDevicePath(path=device_path)
virt_dom.attachDevice(xml)
@@ -439,11 +432,11 @@ class LibvirtConnection(driver.ComputeDriver):
@exception.wrap_exception
def detach_volume(self, instance_name, mountpoint):
- virt_dom = self._conn.lookupByName(instance_name)
+ virt_dom = self._lookup_by_name(instance_name)
mount_device = mountpoint.rpartition("/")[2]
xml = self._get_disk_xml(virt_dom.XMLDesc(0), mount_device)
if not xml:
- raise exception.NotFound(_("No disk at %s") % mount_device)
+ raise exception.DiskNotFound(location=mount_device)
virt_dom.detachDevice(xml)
@exception.wrap_exception
@@ -456,7 +449,7 @@ class LibvirtConnection(driver.ComputeDriver):
"""
image_service = utils.import_object(FLAGS.image_service)
- virt_dom = self._conn.lookupByName(instance['name'])
+ virt_dom = self._lookup_by_name(instance['name'])
elevated = context.get_admin_context()
base = image_service.show(elevated, instance['image_id'])
@@ -522,8 +515,15 @@ class LibvirtConnection(driver.ComputeDriver):
reboot happens, as the guest OS cannot ignore this action.
"""
+ virt_dom = self._conn.lookupByName(instance['name'])
+ # NOTE(itoumsn): Use XML delived from the running instance
+ # instead of using to_xml(instance). This is almost the ultimate
+ # stupid workaround.
+ xml = virt_dom.XMLDesc(0)
+ # NOTE(itoumsn): self.shutdown() and wait instead of self.destroy() is
+ # better because we cannot ensure flushing dirty buffers
+ # in the guest OS. But, in case of KVM, shutdown() does not work...
self.destroy(instance, False)
- xml = self.to_xml(instance)
self.firewall_driver.setup_basic_filtering(instance)
self.firewall_driver.prepare_instance_filter(instance)
self._create_new_domain(xml)
@@ -623,7 +623,7 @@ class LibvirtConnection(driver.ComputeDriver):
xml = self.to_xml(instance, False, network_info)
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
- self._create_image(instance, xml, network_info)
+ self._create_image(instance, xml, network_info=network_info)
domain = self._create_new_domain(xml)
LOG.debug(_("instance %s: is running"), instance['name'])
self.firewall_driver.apply_instance_filter(instance)
@@ -715,7 +715,7 @@ class LibvirtConnection(driver.ComputeDriver):
raise Exception(_('Unable to find an open port'))
def get_pty_for_instance(instance_name):
- virt_dom = self._conn.lookupByName(instance_name)
+ virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
dom = minidom.parseString(xml)
@@ -737,10 +737,13 @@ class LibvirtConnection(driver.ComputeDriver):
subprocess.Popen(cmd, shell=True)
return {'token': token, 'host': host, 'port': port}
+ def get_host_ip_addr(self):
+ return FLAGS.my_ip
+
@exception.wrap_exception
def get_vnc_console(self, instance):
def get_vnc_port_for_instance(instance_name):
- virt_dom = self._conn.lookupByName(instance_name)
+ virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
# TODO: use etree instead of minidom
dom = minidom.parseString(xml)
@@ -962,26 +965,16 @@ class LibvirtConnection(driver.ComputeDriver):
mac_id = mapping['mac'].replace(':', '')
if FLAGS.allow_project_net_traffic:
+ template = "<parameter name=\"%s\"value=\"%s\" />\n"
+ net, mask = _get_net_and_mask(network['cidr'])
+ values = [("PROJNET", net), ("PROJMASK", mask)]
if FLAGS.use_ipv6:
- net, mask = _get_net_and_mask(network['cidr'])
net_v6, prefixlen_v6 = _get_net_and_prefixlen(
network['cidr_v6'])
- extra_params = ("<parameter name=\"PROJNET\" "
- "value=\"%s\" />\n"
- "<parameter name=\"PROJMASK\" "
- "value=\"%s\" />\n"
- "<parameter name=\"PROJNETV6\" "
- "value=\"%s\" />\n"
- "<parameter name=\"PROJMASKV6\" "
- "value=\"%s\" />\n") % \
- (net, mask, net_v6, prefixlen_v6)
- else:
- net, mask = _get_net_and_mask(network['cidr'])
- extra_params = ("<parameter name=\"PROJNET\" "
- "value=\"%s\" />\n"
- "<parameter name=\"PROJMASK\" "
- "value=\"%s\" />\n") % \
- (net, mask)
+ values.extend([("PROJNETV6", net_v6),
+ ("PROJMASKV6", prefixlen_v6)])
+
+ extra_params = "".join([template % value for value in values])
else:
extra_params = "\n"
@@ -999,10 +992,7 @@ class LibvirtConnection(driver.ComputeDriver):
return result
- def to_xml(self, instance, rescue=False, network_info=None):
- # TODO(termie): cache?
- LOG.debug(_('instance %s: starting toXML method'), instance['name'])
-
+ def _prepare_xml_info(self, instance, rescue=False, network_info=None):
# TODO(adiantum) remove network_info creation code
# when multinics will be completed
if not network_info:
@@ -1010,8 +1000,7 @@ class LibvirtConnection(driver.ComputeDriver):
nics = []
for (network, mapping) in network_info:
- nics.append(self._get_nic_for_xml(network,
- mapping))
+ nics.append(self._get_nic_for_xml(network, mapping))
# FIXME(vish): stick this in db
inst_type_id = instance['instance_type_id']
inst_type = instance_types.get_instance_type(inst_type_id)
@@ -1043,32 +1032,43 @@ class LibvirtConnection(driver.ComputeDriver):
xml_info['ramdisk'] = xml_info['basepath'] + "/ramdisk"
xml_info['disk'] = xml_info['basepath'] + "/disk"
+ return xml_info
+ def to_xml(self, instance, rescue=False, network_info=None):
+ # TODO(termie): cache?
+ LOG.debug(_('instance %s: starting toXML method'), instance['name'])
+ xml_info = self._prepare_xml_info(instance, rescue, network_info)
xml = str(Template(self.libvirt_xml, searchList=[xml_info]))
- LOG.debug(_('instance %s: finished toXML method'),
- instance['name'])
+ LOG.debug(_('instance %s: finished toXML method'), instance['name'])
return xml
- def get_info(self, instance_name):
- """Retrieve information from libvirt for a specific instance name.
+ def _lookup_by_name(self, instance_name):
+ """Retrieve libvirt domain object given an instance name.
- If a libvirt error is encountered during lookup, we might raise a
- NotFound exception or Error exception depending on how severe the
- libvirt error is.
+ All libvirt error handling should be handled in this method and
+ relevant nova exceptions should be raised in response.
"""
try:
- virt_dom = self._conn.lookupByName(instance_name)
+ return self._conn.lookupByName(instance_name)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
- msg = _("Instance %s not found") % instance_name
- raise exception.NotFound(msg)
+ raise exception.InstanceNotFound(instance_id=instance_name)
msg = _("Error from libvirt while looking up %(instance_name)s: "
"[Error Code %(error_code)s] %(ex)s") % locals()
raise exception.Error(msg)
+ def get_info(self, instance_name):
+ """Retrieve information from libvirt for a specific instance name.
+
+ If a libvirt error is encountered during lookup, we might raise a
+ NotFound exception or Error exception depending on how severe the
+ libvirt error is.
+
+ """
+ virt_dom = self._lookup_by_name(instance_name)
(state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info()
return {'state': state,
'max_mem': max_mem,
@@ -1105,7 +1105,7 @@ class LibvirtConnection(driver.ComputeDriver):
Returns a list of all block devices for this domain.
"""
- domain = self._conn.lookupByName(instance_name)
+ domain = self._lookup_by_name(instance_name)
# TODO(devcamcar): Replace libxml2 with etree.
xml = domain.XMLDesc(0)
doc = None
@@ -1147,7 +1147,7 @@ class LibvirtConnection(driver.ComputeDriver):
Returns a list of all network interfaces for this instance.
"""
- domain = self._conn.lookupByName(instance_name)
+ domain = self._lookup_by_name(instance_name)
# TODO(devcamcar): Replace libxml2 with etree.
xml = domain.XMLDesc(0)
doc = None
@@ -1313,9 +1313,9 @@ class LibvirtConnection(driver.ComputeDriver):
xml = libxml2.parseDoc(xml)
nodes = xml.xpathEval('//host/cpu')
if len(nodes) != 1:
- raise exception.Invalid(_("Invalid xml. '<cpu>' must be 1,"
- "but %d\n") % len(nodes)
- + xml.serialize())
+ reason = _("'<cpu>' must be 1, but %d\n") % len(nodes)
+ reason += xml.serialize()
+ raise exception.InvalidCPUInfo(reason=reason)
cpu_info = dict()
@@ -1344,9 +1344,8 @@ class LibvirtConnection(driver.ComputeDriver):
tkeys = topology.keys()
if set(tkeys) != set(keys):
ks = ', '.join(keys)
- raise exception.Invalid(_("Invalid xml: topology"
- "(%(topology)s) must have "
- "%(ks)s") % locals())
+ reason = _("topology (%(topology)s) must have %(ks)s")
+ raise exception.InvalidCPUInfo(reason=reason % locals())
feature_nodes = xml.xpathEval('//host/cpu/feature')
features = list()
@@ -1362,7 +1361,7 @@ class LibvirtConnection(driver.ComputeDriver):
Note that this function takes an instance name, not an Instance, so
that it can be called by monitor.
"""
- domain = self._conn.lookupByName(instance_name)
+ domain = self._lookup_by_name(instance_name)
return domain.blockStats(disk)
def interface_stats(self, instance_name, interface):
@@ -1370,7 +1369,7 @@ class LibvirtConnection(driver.ComputeDriver):
Note that this function takes an instance name, not an Instance, so
that it can be called by monitor.
"""
- domain = self._conn.lookupByName(instance_name)
+ domain = self._lookup_by_name(instance_name)
return domain.interfaceStats(interface)
def get_console_pool_info(self, console_type):
@@ -1401,9 +1400,7 @@ class LibvirtConnection(driver.ComputeDriver):
try:
service_ref = db.service_get_all_compute_by_host(ctxt, host)[0]
except exception.NotFound:
- raise exception.Invalid(_("Cannot update compute manager "
- "specific info, because no service "
- "record was found."))
+ raise exception.ComputeServiceUnavailable(host=host)
# Updating host information
dic = {'vcpus': self.get_vcpu_total(),
@@ -1456,7 +1453,7 @@ class LibvirtConnection(driver.ComputeDriver):
raise
if ret <= 0:
- raise exception.Invalid(m % locals())
+ raise exception.InvalidCPUInfo(reason=m % locals())
return
@@ -1566,7 +1563,7 @@ class LibvirtConnection(driver.ComputeDriver):
FLAGS.live_migration_bandwidth)
except Exception:
- recover_method(ctxt, instance_ref)
+ recover_method(ctxt, instance_ref, dest=dest)
raise
# Waiting for completion of live_migration.
@@ -1587,6 +1584,14 @@ class LibvirtConnection(driver.ComputeDriver):
"""See comments of same method in firewall_driver."""
self.firewall_driver.unfilter_instance(instance_ref)
+ def update_host_status(self):
+ """See xenapi_conn.py implementation."""
+ pass
+
+ def get_host_stats(self, refresh=False):
+ """See xenapi_conn.py implementation."""
+ pass
+
class FirewallDriver(object):
def prepare_instance_filter(self, instance, network_info=None):
@@ -1609,7 +1614,9 @@ class FirewallDriver(object):
"""
raise NotImplementedError()
- def refresh_security_group_rules(self, security_group_id):
+ def refresh_security_group_rules(self,
+ security_group_id,
+ network_info=None):
"""Refresh security group rules from data store
Gets called when a rule has been added to or removed from
@@ -1742,11 +1749,16 @@ class NWFilterFirewall(FirewallDriver):
logging.info('ensuring static filters')
self._ensure_static_filters()
+ if instance['image_id'] == str(FLAGS.vpn_image_id):
+ base_filter = 'nova-vpn'
+ else:
+ base_filter = 'nova-base'
+
for (network, mapping) in network_info:
nic_id = mapping['mac'].replace(':', '')
instance_filter_name = self._instance_filter_name(instance, nic_id)
self._define_filter(self._filter_container(instance_filter_name,
- ['nova-base']))
+ [base_filter]))
def _ensure_static_filters(self):
if self.static_filters_configured:
@@ -1757,11 +1769,12 @@ class NWFilterFirewall(FirewallDriver):
'no-ip-spoofing',
'no-arp-spoofing',
'allow-dhcp-server']))
+ self._define_filter(self._filter_container('nova-vpn',
+ ['allow-dhcp-server']))
self._define_filter(self.nova_base_ipv4_filter)
self._define_filter(self.nova_base_ipv6_filter)
self._define_filter(self.nova_dhcp_filter)
self._define_filter(self.nova_ra_filter)
- self._define_filter(self.nova_vpn_filter)
if FLAGS.allow_project_net_traffic:
self._define_filter(self.nova_project_filter)
if FLAGS.use_ipv6:
@@ -1775,14 +1788,6 @@ class NWFilterFirewall(FirewallDriver):
''.join(["<filterref filter='%s'/>" % (f,) for f in filters]))
return xml
- nova_vpn_filter = '''<filter name='nova-vpn' chain='root'>
- <uuid>2086015e-cf03-11df-8c5d-080027c27973</uuid>
- <filterref filter='allow-dhcp-server'/>
- <filterref filter='nova-allow-dhcp-server'/>
- <filterref filter='nova-base-ipv4'/>
- <filterref filter='nova-base-ipv6'/>
- </filter>'''
-
def nova_base_ipv4_filter(self):
retval = "<filter name='nova-base-ipv4' chain='ipv4'>"
for protocol in ['tcp', 'udp', 'icmp']:
@@ -1845,10 +1850,6 @@ class NWFilterFirewall(FirewallDriver):
"""
if not network_info:
network_info = _get_network_info(instance)
- if instance['image_id'] == FLAGS.vpn_image_id:
- base_filter = 'nova-vpn'
- else:
- base_filter = 'nova-base'
ctxt = context.get_admin_context()
@@ -1860,43 +1861,63 @@ class NWFilterFirewall(FirewallDriver):
'nova-base-ipv6',
'nova-allow-dhcp-server']
+ if FLAGS.use_ipv6:
+ networks = [network for (network, _m) in network_info if
+ network['gateway_v6']]
+
+ if networks:
+ instance_secgroup_filter_children.\
+ append('nova-allow-ra-server')
+
for security_group in \
db.security_group_get_by_instance(ctxt, instance['id']):
self.refresh_security_group_rules(security_group['id'])
- instance_secgroup_filter_children += [('nova-secgroup-%s' %
- security_group['id'])]
+ instance_secgroup_filter_children.append('nova-secgroup-%s' %
+ security_group['id'])
self._define_filter(
self._filter_container(instance_secgroup_filter_name,
instance_secgroup_filter_children))
- for (network, mapping) in network_info:
- nic_id = mapping['mac'].replace(':', '')
- instance_filter_name = self._instance_filter_name(instance, nic_id)
- instance_filter_children = \
- [base_filter, instance_secgroup_filter_name]
+ network_filters = self.\
+ _create_network_filters(instance, network_info,
+ instance_secgroup_filter_name)
- if FLAGS.use_ipv6:
- gateway_v6 = network['gateway_v6']
+ for (name, children) in network_filters:
+ self._define_filters(name, children)
- if gateway_v6:
- instance_secgroup_filter_children += \
- ['nova-allow-ra-server']
+ def _create_network_filters(self, instance, network_info,
+ instance_secgroup_filter_name):
+ if instance['image_id'] == str(FLAGS.vpn_image_id):
+ base_filter = 'nova-vpn'
+ else:
+ base_filter = 'nova-base'
+
+ result = []
+ for (_n, mapping) in network_info:
+ nic_id = mapping['mac'].replace(':', '')
+ instance_filter_name = self._instance_filter_name(instance, nic_id)
+ instance_filter_children = [base_filter,
+ instance_secgroup_filter_name]
if FLAGS.allow_project_net_traffic:
- instance_filter_children += ['nova-project']
+ instance_filter_children.append('nova-project')
if FLAGS.use_ipv6:
- instance_filter_children += ['nova-project-v6']
+ instance_filter_children.append('nova-project-v6')
- self._define_filter(
- self._filter_container(instance_filter_name,
- instance_filter_children))
+ result.append((instance_filter_name, instance_filter_children))
- return
+ return result
- def refresh_security_group_rules(self, security_group_id):
+ def _define_filters(self, filter_name, filter_children):
+ self._define_filter(self._filter_container(filter_name,
+ filter_children))
+
+ def refresh_security_group_rules(self,
+ security_group_id,
+ network_info=None):
return self._define_filter(
self.security_group_to_nwfilter_xml(security_group_id))
@@ -1996,34 +2017,23 @@ class IptablesFirewallDriver(FirewallDriver):
self.add_filters_for_instance(instance, network_info)
self.iptables.apply()
- def add_filters_for_instance(self, instance, network_info=None):
- if not network_info:
- network_info = _get_network_info(instance)
- chain_name = self._instance_chain_name(instance)
-
- self.iptables.ipv4['filter'].add_chain(chain_name)
-
- ips_v4 = [ip['ip'] for (_, mapping) in network_info
- for ip in mapping['ips']]
+ def _create_filter(self, ips, chain_name):
+ return ['-d %s -j $%s' % (ip, chain_name) for ip in ips]
- for ipv4_address in ips_v4:
- self.iptables.ipv4['filter'].add_rule('local',
- '-d %s -j $%s' %
- (ipv4_address, chain_name))
+ def _filters_for_instance(self, chain_name, network_info):
+ ips_v4 = [ip['ip'] for (_n, mapping) in network_info
+ for ip in mapping['ips']]
+ ipv4_rules = self._create_filter(ips_v4, chain_name)
+ ipv6_rules = []
if FLAGS.use_ipv6:
- self.iptables.ipv6['filter'].add_chain(chain_name)
- ips_v6 = [ip['ip'] for (_, mapping) in network_info
- for ip in mapping['ip6s']]
+ ips_v6 = [ip['ip'] for (_n, mapping) in network_info
+ for ip in mapping['ip6s']]
+ ipv6_rules = self._create_filter(ips_v6, chain_name)
- for ipv6_address in ips_v6:
- self.iptables.ipv6['filter'].add_rule('local',
- '-d %s -j $%s' %
- (ipv6_address,
- chain_name))
-
- ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info)
+ return ipv4_rules, ipv6_rules
+ def _add_filters(self, chain_name, ipv4_rules, ipv6_rules):
for rule in ipv4_rules:
self.iptables.ipv4['filter'].add_rule(chain_name, rule)
@@ -2031,6 +2041,17 @@ class IptablesFirewallDriver(FirewallDriver):
for rule in ipv6_rules:
self.iptables.ipv6['filter'].add_rule(chain_name, rule)
+ def add_filters_for_instance(self, instance, network_info=None):
+ chain_name = self._instance_chain_name(instance)
+ if FLAGS.use_ipv6:
+ self.iptables.ipv6['filter'].add_chain(chain_name)
+ self.iptables.ipv4['filter'].add_chain(chain_name)
+ ipv4_rules, ipv6_rules = self._filters_for_instance(chain_name,
+ network_info)
+ self._add_filters('local', ipv4_rules, ipv6_rules)
+ ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info)
+ self._add_filters(chain_name, ipv4_rules, ipv6_rules)
+
def remove_filters_for_instance(self, instance):
chain_name = self._instance_chain_name(instance)
@@ -2154,15 +2175,19 @@ class IptablesFirewallDriver(FirewallDriver):
def refresh_security_group_members(self, security_group):
pass
- def refresh_security_group_rules(self, security_group):
- self.do_refresh_security_group_rules(security_group)
+ def refresh_security_group_rules(self, security_group, network_info=None):
+ self.do_refresh_security_group_rules(security_group, network_info)
self.iptables.apply()
@utils.synchronized('iptables', external=True)
- def do_refresh_security_group_rules(self, security_group):
+ def do_refresh_security_group_rules(self,
+ security_group,
+ network_info=None):
for instance in self.instances.values():
self.remove_filters_for_instance(instance)
- self.add_filters_for_instance(instance)
+ if not network_info:
+ network_info = _get_network_info(instance)
+ self.add_filters_for_instance(instance, network_info)
def _security_group_chain_name(self, security_group_id):
return 'nova-sg-%s' % (security_group_id,)
diff --git a/nova/virt/vmwareapi/fake.py b/nova/virt/vmwareapi/fake.py
index 4bb467fa9..7370684bd 100644
--- a/nova/virt/vmwareapi/fake.py
+++ b/nova/virt/vmwareapi/fake.py
@@ -387,12 +387,11 @@ def _add_file(file_path):
def _remove_file(file_path):
"""Removes a file reference from the db."""
if _db_content.get("files") is None:
- raise exception.NotFound(_("No files have been added yet"))
+ raise exception.NoFilesFound()
# Check if the remove is for a single file object or for a folder
if file_path.find(".vmdk") != -1:
if file_path not in _db_content.get("files"):
- raise exception.NotFound(_("File- '%s' is not there in the "
- "datastore") % file_path)
+ raise exception.FileNotFound(file_path=file_path)
_db_content.get("files").remove(file_path)
else:
# Removes the files in the folder and the folder too from the db
@@ -579,7 +578,7 @@ class FakeVim(object):
"""Searches the datastore for a file."""
ds_path = kwargs.get("datastorePath")
if _db_content.get("files", None) is None:
- raise exception.NotFound(_("No files have been added yet"))
+ raise exception.NoFilesFound()
for file in _db_content.get("files"):
if file.find(ds_path) != -1:
task_mdo = create_task(method, "success")
@@ -591,7 +590,7 @@ class FakeVim(object):
"""Creates a directory in the datastore."""
ds_path = kwargs.get("name")
if _db_content.get("files", None) is None:
- raise exception.NotFound(_("No files have been added yet"))
+ raise exception.NoFilesFound()
_db_content["files"].append(ds_path)
def _set_power_state(self, method, vm_ref, pwr_state="poweredOn"):
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index 55517a007..bd4b85285 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -100,8 +100,7 @@ class VMWareVMOps(object):
"""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref:
- raise exception.Duplicate(_("Attempted to create a VM with a name"
- " %s, but that already exists on the host") % instance.name)
+ raise exception.InstanceExists(name=instance.name)
client_factory = self._session._get_vim().client.factory
service_content = self._session._get_vim().get_service_content()
@@ -116,8 +115,7 @@ class VMWareVMOps(object):
network_utils.get_network_with_the_name(self._session,
net_name)
if network_ref is None:
- raise exception.NotFound(_("Network with the name '%s' doesn't"
- " exist on the ESX host") % net_name)
+ raise exception.NetworkNotFoundForBridge(bridge=net_name)
_check_if_network_bridge_exists()
@@ -337,8 +335,7 @@ class VMWareVMOps(object):
"""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance.name)
+ raise exception.InstanceNotFound(instance_id=instance.id)
client_factory = self._session._get_vim().client.factory
service_content = self._session._get_vim().get_service_content()
@@ -388,8 +385,7 @@ class VMWareVMOps(object):
"VirtualMachine",
"datastore")
if not ds_ref_ret:
- raise exception.NotFound(_("Failed to get the datastore "
- "reference(s) which the VM uses"))
+ raise exception.DatastoreNotFound()
ds_ref = ds_ref_ret.ManagedObjectReference[0]
ds_browser = vim_util.get_dynamic_property(
self._session._get_vim(),
@@ -480,8 +476,7 @@ class VMWareVMOps(object):
"""Reboot a VM instance."""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance.name)
+ raise exception.InstanceNotFound(instance_id=instance.id)
lst_properties = ["summary.guest.toolsStatus", "runtime.powerState",
"summary.guest.toolsRunningStatus"]
props = self._session._call_method(vim_util, "get_object_properties",
@@ -501,8 +496,8 @@ class VMWareVMOps(object):
# Raise an exception if the VM is not powered On.
if pwr_state not in ["poweredOn"]:
- raise exception.Invalid(_("instance - %s not poweredOn. So can't "
- "be rebooted.") % instance.name)
+ reason = _("instance is not powered on")
+ raise exception.InstanceRebootFailure(reason=reason)
# If latest vmware tools are installed in the VM, and that the tools
# are running, then only do a guest reboot. Otherwise do a hard reset.
@@ -605,8 +600,7 @@ class VMWareVMOps(object):
"""Suspend the specified instance."""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance.name)
+ raise exception.InstanceNotFound(instance_id=instance.id)
pwr_state = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
@@ -620,8 +614,9 @@ class VMWareVMOps(object):
LOG.debug(_("Suspended the VM %s ") % instance.name)
# Raise Exception if VM is poweredOff
elif pwr_state == "poweredOff":
- raise exception.Invalid(_("instance - %s is poweredOff and hence "
- " can't be suspended.") % instance.name)
+ reason = _("instance is powered off and can not be suspended.")
+ raise exception.InstanceSuspendFailure(reason=reason)
+
LOG.debug(_("VM %s was already in suspended state. So returning "
"without doing anything") % instance.name)
@@ -629,8 +624,7 @@ class VMWareVMOps(object):
"""Resume the specified instance."""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance.name)
+ raise exception.InstanceNotFound(instance_id=instance.id)
pwr_state = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
@@ -643,15 +637,14 @@ class VMWareVMOps(object):
self._wait_with_callback(instance.id, suspend_task, callback)
LOG.debug(_("Resumed the VM %s ") % instance.name)
else:
- raise exception.Invalid(_("instance - %s not in Suspended state "
- "and hence can't be Resumed.") % instance.name)
+ reason = _("instance is not in a suspended state")
+ raise exception.InstanceResumeFailure(reason=reason)
def get_info(self, instance_name):
"""Return data about the VM instance."""
vm_ref = self._get_vm_ref_from_the_name(instance_name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance_name)
+ raise exception.InstanceNotFound(instance_id=instance_name)
lst_properties = ["summary.config.numCpu",
"summary.config.memorySizeMB",
@@ -687,8 +680,7 @@ class VMWareVMOps(object):
"""Return snapshot of console."""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance.name)
+ raise exception.InstanceNotFound(instance_id=instance.id)
param_list = {"id": str(vm_ref)}
base_url = "%s://%s/screen?%s" % (self._session._scheme,
self._session._host_ip,
@@ -717,8 +709,7 @@ class VMWareVMOps(object):
admin_context = context.get_admin_context()
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance.name)
+ raise exception.InstanceNotFound(instance_id=instance.id)
network = db.network_get_by_instance(context.get_admin_context(),
instance['id'])
mac_address = None
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 1927500ad..c8f342aa8 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -506,9 +506,7 @@ class VMHelper(HelperBase):
try:
return glance_disk_format2nova_type[disk_format]
except KeyError:
- raise exception.NotFound(
- _("Unrecognized disk_format '%(disk_format)s'")
- % locals())
+ raise exception.InvalidDiskFormat(disk_format=disk_format)
def determine_from_instance():
if instance.kernel_id:
@@ -643,8 +641,7 @@ class VMHelper(HelperBase):
if n == 0:
return None
elif n > 1:
- raise exception.Duplicate(_('duplicate name found: %s') %
- name_label)
+ raise exception.InstanceExists(name=name_label)
else:
return vm_refs[0]
@@ -853,7 +850,7 @@ def safe_find_sr(session):
"""
sr_ref = find_sr(session)
if sr_ref is None:
- raise exception.NotFound(_('Cannot find SR to read/write VDI'))
+ raise exception.StorageRepositoryNotFound()
return sr_ref
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 8b6a35f74..13d7d215b 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -28,12 +28,13 @@ import subprocess
import tempfile
import uuid
-from nova import db
from nova import context
-from nova import log as logging
+from nova import db
from nova import exception
-from nova import utils
from nova import flags
+from nova import ipv6
+from nova import log as logging
+from nova import utils
from nova.auth.manager import AuthManager
from nova.compute import power_state
@@ -127,8 +128,7 @@ class VMOps(object):
instance_name = instance.name
vm_ref = VMHelper.lookup(self._session, instance_name)
if vm_ref is not None:
- raise exception.Duplicate(_('Attempted to create'
- ' non-unique name %s') % instance_name)
+ raise exception.InstanceExists(name=instance_name)
#ensure enough free memory is available
if not VMHelper.ensure_free_mem(self._session, instance):
@@ -211,8 +211,6 @@ class VMOps(object):
def _wait_for_boot():
try:
state = self.get_info(instance_name)['state']
- db.instance_set_state(context.get_admin_context(),
- instance['id'], state)
if state == power_state.RUNNING:
LOG.debug(_('Instance %s: booted'), instance_name)
timer.stop()
@@ -220,11 +218,7 @@ class VMOps(object):
return True
except Exception, exc:
LOG.warn(exc)
- LOG.exception(_('instance %s: failed to boot'),
- instance_name)
- db.instance_set_state(context.get_admin_context(),
- instance['id'],
- power_state.SHUTDOWN)
+ LOG.exception(_('Instance %s: failed to boot'), instance_name)
timer.stop()
return False
@@ -260,8 +254,7 @@ class VMOps(object):
instance_name = instance_or_vm.name
vm_ref = VMHelper.lookup(self._session, instance_name)
if vm_ref is None:
- raise exception.NotFound(
- _('Instance not present %s') % instance_name)
+ raise exception.InstanceNotFound(instance_id=instance_obj.id)
return vm_ref
def _acquire_bootlock(self, vm):
@@ -436,11 +429,12 @@ class VMOps(object):
"""
# Need to uniquely identify this request.
- transaction_id = str(uuid.uuid4())
+ key_init_transaction_id = str(uuid.uuid4())
# The simple Diffie-Hellman class is used to manage key exchange.
dh = SimpleDH()
- args = {'id': transaction_id, 'pub': str(dh.get_public())}
- resp = self._make_agent_call('key_init', instance, '', args)
+ key_init_args = {'id': key_init_transaction_id,
+ 'pub': str(dh.get_public())}
+ resp = self._make_agent_call('key_init', instance, '', key_init_args)
if resp is None:
# No response from the agent
return
@@ -454,8 +448,9 @@ class VMOps(object):
dh.compute_shared(agent_pub)
enc_pass = dh.encrypt(new_pass)
# Send the encrypted password
- args['enc_pass'] = enc_pass
- resp = self._make_agent_call('password', instance, '', args)
+ password_transaction_id = str(uuid.uuid4())
+ password_args = {'id': password_transaction_id, 'enc_pass': enc_pass}
+ resp = self._make_agent_call('password', instance, '', password_args)
if resp is None:
# No response from the agent
return
@@ -578,9 +573,8 @@ class VMOps(object):
if not (instance.kernel_id and instance.ramdisk_id):
# 2. We only have kernel xor ramdisk
- raise exception.NotFound(
- _("Instance %(instance_id)s has a kernel or ramdisk but not "
- "both" % locals()))
+ raise exception.InstanceUnacceptable(instance_id=instance_id,
+ reason=_("instance has a kernel or ramdisk but not both"))
# 3. We have both kernel and ramdisk
(kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(self._session,
@@ -721,8 +715,7 @@ class VMOps(object):
"%s-rescue" % instance.name)
if not rescue_vm_ref:
- raise exception.NotFound(_(
- "Instance is not in Rescue Mode: %s" % instance.name))
+ raise exception.InstanceNotInRescueMode(instance_id=instance.id)
original_vm_ref = VMHelper.lookup(self._session, instance.name)
instance._rescue = False
@@ -816,8 +809,9 @@ class VMOps(object):
def ip6_dict():
return {
- "ip": utils.to_global_ipv6(network['cidr_v6'],
- instance['mac_address']),
+ "ip": ipv6.to_global(network['cidr_v6'],
+ instance['mac_address'],
+ instance['project_id']),
"netmask": network['netmask_v6'],
"enabled": "1"}
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index 757ecf5ad..afcb8cf47 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -45,8 +45,7 @@ class VolumeOps(object):
# Before we start, check that the VM exists
vm_ref = VMHelper.lookup(self._session, instance_name)
if vm_ref is None:
- raise exception.NotFound(_('Instance %s not found')
- % instance_name)
+ raise exception.InstanceNotFound(instance_id=instance_name)
# NOTE: No Resource Pool concept so far
LOG.debug(_("Attach_volume: %(instance_name)s, %(device_path)s,"
" %(mountpoint)s") % locals())
@@ -98,8 +97,7 @@ class VolumeOps(object):
# Before we start, check that the VM exists
vm_ref = VMHelper.lookup(self._session, instance_name)
if vm_ref is None:
- raise exception.NotFound(_('Instance %s not found')
- % instance_name)
+ raise exception.InstanceNotFound(instance_id=instance_name)
# Detach VBD from VM
LOG.debug(_("Detach_volume: %(instance_name)s, %(mountpoint)s")
% locals())
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index b8a24d5a2..9e48f86b7 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -57,6 +57,8 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block.
- suffix "_rec" for record objects
"""
+import json
+import random
import sys
import urlparse
import xmlrpclib
@@ -67,10 +69,12 @@ from eventlet import timeout
from nova import context
from nova import db
+from nova import exception
from nova import utils
from nova import flags
from nova import log as logging
from nova.virt import driver
+from nova.virt.xenapi import vm_utils
from nova.virt.xenapi.vmops import VMOps
from nova.virt.xenapi.volumeops import VolumeOps
@@ -165,9 +169,16 @@ class XenAPIConnection(driver.ComputeDriver):
def __init__(self, url, user, pw):
super(XenAPIConnection, self).__init__()
- session = XenAPISession(url, user, pw)
- self._vmops = VMOps(session)
- self._volumeops = VolumeOps(session)
+ self._session = XenAPISession(url, user, pw)
+ self._vmops = VMOps(self._session)
+ self._volumeops = VolumeOps(self._session)
+ self._host_state = None
+
+ @property
+ def HostState(self):
+ if not self._host_state:
+ self._host_state = HostState(self._session)
+ return self._host_state
def init_host(self, host):
#FIXME(armando): implement this
@@ -315,6 +326,16 @@ class XenAPIConnection(driver.ComputeDriver):
"""This method is supported only by libvirt."""
raise NotImplementedError('This method is supported only by libvirt.')
+ def update_host_status(self):
+ """Update the status info of the host, and return those values
+ to the calling program."""
+ return self.HostState.update_status()
+
+ def get_host_stats(self, refresh=False):
+ """Return the current state of the host. If 'refresh' is
+ True, run the update first."""
+ return self.HostState.get_host_stats(refresh=refresh)
+
class XenAPISession(object):
"""The session to invoke XenAPI SDK calls"""
@@ -326,7 +347,6 @@ class XenAPISession(object):
"(is the Dom0 disk full?)"))
with timeout.Timeout(FLAGS.xenapi_login_timeout, exception):
self._session.login_with_password(user, pw)
- self.loop = None
def get_imported_xenapi(self):
"""Stubout point. This can be replaced with a mock xenapi module."""
@@ -363,57 +383,52 @@ class XenAPISession(object):
def wait_for_task(self, task, id=None):
"""Return the result of the given task. The task is polled
- until it completes. Not re-entrant."""
+ until it completes."""
done = event.Event()
- self.loop = utils.LoopingCall(self._poll_task, id, task, done)
- self.loop.start(FLAGS.xenapi_task_poll_interval, now=True)
- rv = done.wait()
- self.loop.stop()
- return rv
-
- def _stop_loop(self):
- """Stop polling for task to finish."""
- #NOTE(sandy-walsh) Had to break this call out to support unit tests.
- if self.loop:
- self.loop.stop()
+ loop = utils.LoopingCall(f=None)
+
+ def _poll_task():
+ """Poll the given XenAPI task, and return the result if the
+ action was completed successfully or not.
+ """
+ try:
+ name = self._session.xenapi.task.get_name_label(task)
+ status = self._session.xenapi.task.get_status(task)
+ if id:
+ action = dict(
+ instance_id=int(id),
+ action=name[0:255], # Ensure action is never > 255
+ error=None)
+ if status == "pending":
+ return
+ elif status == "success":
+ result = self._session.xenapi.task.get_result(task)
+ LOG.info(_("Task [%(name)s] %(task)s status:"
+ " success %(result)s") % locals())
+ done.send(_parse_xmlrpc_value(result))
+ else:
+ error_info = self._session.xenapi.task.get_error_info(task)
+ action["error"] = str(error_info)
+ LOG.warn(_("Task [%(name)s] %(task)s status:"
+ " %(status)s %(error_info)s") % locals())
+ done.send_exception(self.XenAPI.Failure(error_info))
+
+ if id:
+ db.instance_action_create(context.get_admin_context(),
+ action)
+ except self.XenAPI.Failure, exc:
+ LOG.warn(exc)
+ done.send_exception(*sys.exc_info())
+ loop.stop()
+
+ loop.f = _poll_task
+ loop.start(FLAGS.xenapi_task_poll_interval, now=True)
+ return done.wait()
def _create_session(self, url):
"""Stubout point. This can be replaced with a mock session."""
return self.XenAPI.Session(url)
- def _poll_task(self, id, task, done):
- """Poll the given XenAPI task, and fire the given action if we
- get a result.
- """
- try:
- name = self._session.xenapi.task.get_name_label(task)
- status = self._session.xenapi.task.get_status(task)
- if id:
- action = dict(
- instance_id=int(id),
- action=name[0:255], # Ensure action is never > 255
- error=None)
- if status == "pending":
- return
- elif status == "success":
- result = self._session.xenapi.task.get_result(task)
- LOG.info(_("Task [%(name)s] %(task)s status:"
- " success %(result)s") % locals())
- done.send(_parse_xmlrpc_value(result))
- else:
- error_info = self._session.xenapi.task.get_error_info(task)
- action["error"] = str(error_info)
- LOG.warn(_("Task [%(name)s] %(task)s status:"
- " %(status)s %(error_info)s") % locals())
- done.send_exception(self.XenAPI.Failure(error_info))
-
- if id:
- db.instance_action_create(context.get_admin_context(), action)
- except self.XenAPI.Failure, exc:
- LOG.warn(exc)
- done.send_exception(*sys.exc_info())
- self._stop_loop()
-
def _unwrap_plugin_exceptions(self, func, *args, **kwargs):
"""Parse exception details"""
try:
@@ -436,6 +451,65 @@ class XenAPISession(object):
raise
+class HostState(object):
+ """Manages information about the XenServer host this compute
+ node is running on.
+ """
+ def __init__(self, session):
+ super(HostState, self).__init__()
+ self._session = session
+ self._stats = {}
+ self.update_status()
+
+ def get_host_stats(self, refresh=False):
+ """Return the current state of the host. If 'refresh' is
+ True, run the update first.
+ """
+ if refresh:
+ self.update_status()
+ return self._stats
+
+ def update_status(self):
+ """Since under Xenserver, a compute node runs on a given host,
+ we can get host status information using xenapi.
+ """
+ LOG.debug(_("Updating host stats"))
+ # Make it something unlikely to match any actual instance ID
+ task_id = random.randint(-80000, -70000)
+ task = self._session.async_call_plugin("xenhost", "host_data", {})
+ task_result = self._session.wait_for_task(task, task_id)
+ if not task_result:
+ task_result = json.dumps("")
+ try:
+ data = json.loads(task_result)
+ except ValueError as e:
+ # Invalid JSON object
+ LOG.error(_("Unable to get updated status: %s") % e)
+ return
+ # Get the SR usage
+ try:
+ sr_ref = vm_utils.safe_find_sr(self._session)
+ except exception.NotFound as e:
+ # No SR configured
+ LOG.error(_("Unable to get SR for this host: %s") % e)
+ return
+ sr_rec = self._session.get_xenapi().SR.get_record(sr_ref)
+ total = int(sr_rec["virtual_allocation"])
+ used = int(sr_rec["physical_utilisation"])
+ data["disk_total"] = total
+ data["disk_used"] = used
+ data["disk_available"] = total - used
+ host_memory = data.get('host_memory', None)
+ if host_memory:
+ data["host_memory_total"] = host_memory.get('total', 0)
+ data["host_memory_overhead"] = host_memory.get('overhead', 0)
+ data["host_memory_free"] = host_memory.get('free', 0)
+ data["host_memory_free_computed"] = \
+ host_memory.get('free-computed', 0)
+ del data['host_memory']
+ self._stats = data
+
+
def _parse_xmlrpc_value(val):
"""Parse the given value as if it were an XML-RPC value. This is
sometimes used as the format for the task.result field."""
diff --git a/nova/volume/api.py b/nova/volume/api.py
index 4b4bb9dc5..09befb647 100644
--- a/nova/volume/api.py
+++ b/nova/volume/api.py
@@ -103,3 +103,10 @@ class API(base.Base):
# TODO(vish): abstract status checking?
if volume['status'] == "available":
raise exception.ApiError(_("Volume is already detached"))
+
+ def remove_from_compute(self, context, volume_id, host):
+ """Remove volume from specified compute host."""
+ rpc.call(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "remove_volume",
+ "args": {'volume_id': volume_id}})
diff --git a/nova/wsgi.py b/nova/wsgi.py
index 418087641..e60a8820d 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -102,12 +102,16 @@ class Request(webob.Request):
return bm or 'application/json'
def get_content_type(self):
- try:
- ct = self.headers['Content-Type']
- assert ct in ('application/xml', 'application/json')
- return ct
- except Exception:
- raise webob.exc.HTTPBadRequest('Invalid content type')
+ allowed_types = ("application/xml", "application/json")
+ if not "Content-Type" in self.headers:
+ msg = _("Missing Content-Type")
+ LOG.debug(msg)
+ raise webob.exc.HTTPBadRequest(msg)
+ type = self.content_type
+ if type in allowed_types:
+ return type
+ LOG.debug(_("Wrong Content-Type: %s") % type)
+ raise webob.exc.HTTPBadRequest("Invalid content type")
class Application(object):
@@ -424,7 +428,7 @@ class Serializer(object):
try:
return handlers[content_type]
except Exception:
- raise exception.InvalidContentType()
+ raise exception.InvalidContentType(content_type=content_type)
def serialize(self, data, content_type):
"""Serialize a dictionary into the specified content type."""
@@ -447,8 +451,7 @@ class Serializer(object):
try:
return handlers[content_type]
except Exception:
- raise exception.InvalidContentType(_('Invalid content type %s'
- % content_type))
+ raise exception.InvalidContentType(content_type=content_type)
def _from_json(self, datastring):
return utils.loads(datastring)
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent
index 5496a6bd5..9e761f264 100755
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent
@@ -53,7 +53,6 @@ class TimeoutError(StandardError):
pass
-@jsonify
def key_init(self, arg_dict):
"""Handles the Diffie-Hellman key exchange with the agent to
establish the shared secret key used to encrypt/decrypt sensitive
@@ -72,7 +71,6 @@ def key_init(self, arg_dict):
return resp
-@jsonify
def password(self, arg_dict):
"""Writes a request to xenstore that tells the agent to set
the root password for the given VM. The password should be
@@ -80,7 +78,6 @@ def password(self, arg_dict):
previous call to key_init. The encrypted password value should
be passed as the value for the 'enc_pass' key in arg_dict.
"""
- pub = int(arg_dict["pub"])
enc_pass = arg_dict["enc_pass"]
arg_dict["value"] = json.dumps({"name": "password", "value": enc_pass})
request_id = arg_dict["id"]
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost
new file mode 100644
index 000000000..a8428e841
--- /dev/null
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost
@@ -0,0 +1,183 @@
+#!/usr/bin/env python
+
+# Copyright 2011 OpenStack LLC.
+# Copyright 2011 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+#
+# XenAPI plugin for reading/writing information to xenstore
+#
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+import os
+import random
+import re
+import subprocess
+import tempfile
+import time
+
+import XenAPIPlugin
+
+from pluginlib_nova import *
+configure_logging("xenhost")
+
+host_data_pattern = re.compile(r"\s*(\S+) \([^\)]+\) *: ?(.*)")
+
+
+def jsonify(fnc):
+ def wrapper(*args, **kwargs):
+ return json.dumps(fnc(*args, **kwargs))
+ return wrapper
+
+
+class TimeoutError(StandardError):
+ pass
+
+
+def _run_command(cmd):
+ """Abstracts out the basics of issuing system commands. If the command
+ returns anything in stderr, a PluginError is raised with that information.
+ Otherwise, the output from stdout is returned.
+ """
+ pipe = subprocess.PIPE
+ proc = subprocess.Popen([cmd], shell=True, stdin=pipe, stdout=pipe,
+ stderr=pipe, close_fds=True)
+ proc.wait()
+ err = proc.stderr.read()
+ if err:
+ raise pluginlib.PluginError(err)
+ return proc.stdout.read()
+
+
+@jsonify
+def host_data(self, arg_dict):
+ """Runs the commands on the xenstore host to return the current status
+ information.
+ """
+ cmd = "xe host-list | grep uuid"
+ resp = _run_command(cmd)
+ host_uuid = resp.split(":")[-1].strip()
+ cmd = "xe host-param-list uuid=%s" % host_uuid
+ resp = _run_command(cmd)
+ parsed_data = parse_response(resp)
+ # We have the raw dict of values. Extract those that we need,
+ # and convert the data types as needed.
+ ret_dict = cleanup(parsed_data)
+ return ret_dict
+
+
+def parse_response(resp):
+ data = {}
+ for ln in resp.splitlines():
+ if not ln:
+ continue
+ mtch = host_data_pattern.match(ln.strip())
+ try:
+ k, v = mtch.groups()
+ data[k] = v
+ except AttributeError:
+ # Not a valid line; skip it
+ continue
+ return data
+
+
+def cleanup(dct):
+ """Take the raw KV pairs returned and translate them into the
+ appropriate types, discarding any we don't need.
+ """
+ def safe_int(val):
+ """Integer values will either be string versions of numbers,
+ or empty strings. Convert the latter to nulls.
+ """
+ try:
+ return int(val)
+ except ValueError:
+ return None
+
+ def strip_kv(ln):
+ return [val.strip() for val in ln.split(":", 1)]
+
+ out = {}
+
+# sbs = dct.get("supported-bootloaders", "")
+# out["host_supported-bootloaders"] = sbs.split("; ")
+# out["host_suspend-image-sr-uuid"] = dct.get("suspend-image-sr-uuid", "")
+# out["host_crash-dump-sr-uuid"] = dct.get("crash-dump-sr-uuid", "")
+# out["host_local-cache-sr"] = dct.get("local-cache-sr", "")
+ out["host_memory"] = omm = {}
+ omm["total"] = safe_int(dct.get("memory-total", ""))
+ omm["overhead"] = safe_int(dct.get("memory-overhead", ""))
+ omm["free"] = safe_int(dct.get("memory-free", ""))
+ omm["free-computed"] = safe_int(
+ dct.get("memory-free-computed", ""))
+
+# out["host_API-version"] = avv = {}
+# avv["vendor"] = dct.get("API-version-vendor", "")
+# avv["major"] = safe_int(dct.get("API-version-major", ""))
+# avv["minor"] = safe_int(dct.get("API-version-minor", ""))
+
+ out["host_uuid"] = dct.get("uuid", None)
+ out["host_name-label"] = dct.get("name-label", "")
+ out["host_name-description"] = dct.get("name-description", "")
+# out["host_host-metrics-live"] = dct.get(
+# "host-metrics-live", "false") == "true"
+ out["host_hostname"] = dct.get("hostname", "")
+ out["host_ip_address"] = dct.get("address", "")
+ oc = dct.get("other-config", "")
+ out["host_other-config"] = ocd = {}
+ if oc:
+ for oc_fld in oc.split("; "):
+ ock, ocv = strip_kv(oc_fld)
+ ocd[ock] = ocv
+# out["host_capabilities"] = dct.get("capabilities", "").split("; ")
+# out["host_allowed-operations"] = dct.get(
+# "allowed-operations", "").split("; ")
+# lsrv = dct.get("license-server", "")
+# out["host_license-server"] = ols = {}
+# if lsrv:
+# for lspart in lsrv.split("; "):
+# lsk, lsv = lspart.split(": ")
+# if lsk == "port":
+# ols[lsk] = safe_int(lsv)
+# else:
+# ols[lsk] = lsv
+# sv = dct.get("software-version", "")
+# out["host_software-version"] = osv = {}
+# if sv:
+# for svln in sv.split("; "):
+# svk, svv = strip_kv(svln)
+# osv[svk] = svv
+ cpuinf = dct.get("cpu_info", "")
+ out["host_cpu_info"] = ocp = {}
+ if cpuinf:
+ for cpln in cpuinf.split("; "):
+ cpk, cpv = strip_kv(cpln)
+ if cpk in ("cpu_count", "family", "model", "stepping"):
+ ocp[cpk] = safe_int(cpv)
+ else:
+ ocp[cpk] = cpv
+# out["host_edition"] = dct.get("edition", "")
+# out["host_external-auth-service-name"] = dct.get(
+# "external-auth-service-name", "")
+ return out
+
+
+if __name__ == "__main__":
+ XenAPIPlugin.dispatch(
+ {"host_data": host_data})
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py
index d33c7346b..6c589ed29 100755
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py
@@ -59,12 +59,12 @@ def read_record(self, arg_dict):
cmd = ["xenstore-read", "/local/domain/%(dom_id)s/%(path)s" % arg_dict]
try:
ret, result = _run_command(cmd)
- return result.rstrip("\n")
+ return result.strip()
except pluginlib.PluginError, e:
if arg_dict.get("ignore_missing_path", False):
cmd = ["xenstore-exists",
"/local/domain/%(dom_id)s/%(path)s" % arg_dict]
- ret, result = _run_command(cmd).strip()
+ ret, result = _run_command(cmd)
# If the path exists, the cmd should return "0"
if ret != 0:
# No such path, so ignore the error and return the
@@ -171,7 +171,7 @@ def _paths_from_ls(recs):
def _run_command(cmd):
"""Abstracts out the basics of issuing system commands. If the command
returns anything in stderr, a PluginError is raised with that information.
- Otherwise, the output from stdout is returned.
+ Otherwise, a tuple of (return code, stdout data) is returned.
"""
pipe = subprocess.PIPE
proc = subprocess.Popen(cmd, stdin=pipe, stdout=pipe, stderr=pipe,
@@ -180,7 +180,7 @@ def _run_command(cmd):
err = proc.stderr.read()
if err:
raise pluginlib.PluginError(err)
- return proc.stdout.read()
+ return (ret, proc.stdout.read())
if __name__ == "__main__":
diff --git a/run_tests.sh b/run_tests.sh
index 610cf1f27..e3a0bd243 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -6,6 +6,7 @@ function usage {
echo ""
echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
+ echo " -x, --stop Stop running tests after the first error or failure."
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
echo " -p, --pep8 Just run pep8"
echo " -h, --help Print this usage message"
diff --git a/setup.py b/setup.py
index 6c45109bc..c165f40d7 100644
--- a/setup.py
+++ b/setup.py
@@ -16,6 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import gettext
import glob
import os
import subprocess
@@ -24,15 +25,20 @@ import sys
from setuptools import find_packages
from setuptools.command.sdist import sdist
+# In order to run the i18n commands for compiling and
+# installing message catalogs, we use DistUtilsExtra.
+# Don't make this a hard requirement, but warn that
+# i18n commands won't be available if DistUtilsExtra is
+# not installed...
try:
- import DistUtilsExtra.auto
+ from DistUtilsExtra.auto import setup
except ImportError:
- print >> sys.stderr, 'To build nova you need '\
- 'https://launchpad.net/python-distutils-extra'
- sys.exit(1)
-assert DistUtilsExtra.auto.__version__ >= '2.18',\
- 'needs DistUtilsExtra.auto >= 2.18'
+ from setuptools import setup
+ print "Warning: DistUtilsExtra required to use i18n builders. "
+ print "To build nova with support for message catalogs, you need "
+ print " https://launchpad.net/python-distutils-extra >= 2.18"
+gettext.install('nova', unicode=1)
from nova.utils import parse_mailmap, str_dict_replace
from nova import version
@@ -100,7 +106,7 @@ def find_data_files(destdir, srcdir):
package_data += [(destdir, files)]
return package_data
-DistUtilsExtra.auto.setup(name='nova',
+setup(name='nova',
version=version.canonical_version_string(),
description='cloud computing fabric controller',
author='OpenStack',
diff --git a/tools/install_venv.py b/tools/install_venv.py
index 30ec85374..8149a3afa 100644
--- a/tools/install_venv.py
+++ b/tools/install_venv.py
@@ -1,3 +1,4 @@
+
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
@@ -31,11 +32,15 @@ ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
VENV = os.path.join(ROOT, '.nova-venv')
PIP_REQUIRES = os.path.join(ROOT, 'tools', 'pip-requires')
TWISTED_NOVA='http://nova.openstack.org/Twisted-10.0.0Nova.tar.gz'
+PY_VERSION = "python" + str(sys.version_info[0]) + '.' + str(sys.version_info[1])
def die(message, *args):
print >>sys.stderr, message % args
sys.exit(1)
+def check_python_version():
+ if sys.version_info < (2, 6):
+ die("Need Python Version >= 2.6")
def run_command(cmd, redirect_output=True, check_exit_code=True):
"""
@@ -100,12 +105,12 @@ def install_dependencies(venv=VENV):
# Tell the virtual env how to "import nova"
- pthfile = os.path.join(venv, "lib", "python2.6", "site-packages", "nova.pth")
+ pthfile = os.path.join(venv, "lib", PY_VERSION, "site-packages", "nova.pth")
f = open(pthfile, 'w')
f.write("%s\n" % ROOT)
# Patch eventlet (see FAQ # 1485)
patchsrc = os.path.join(ROOT, 'tools', 'eventlet-patch')
- patchfile = os.path.join(venv, "lib", "python2.6", "site-packages", "eventlet",
+ patchfile = os.path.join(venv, "lib", PY_VERSION, "site-packages", "eventlet",
"green", "subprocess.py")
patch_cmd = "patch %s %s" % (patchfile, patchsrc)
os.system(patch_cmd)
@@ -134,6 +139,7 @@ def print_help():
def main(argv):
+ check_python_version()
check_dependencies()
create_virtualenv()
install_dependencies()
diff --git a/tools/pip-requires b/tools/pip-requires
index 2f4136732..8f8018765 100644
--- a/tools/pip-requires
+++ b/tools/pip-requires
@@ -2,7 +2,7 @@ SQLAlchemy==0.6.3
pep8==0.5.0
pylint==0.19
IPy==0.70
-Cheetah==2.4.2.1
+Cheetah==2.4.4
M2Crypto==0.20.2
amqplib==0.6.1
anyjson==0.2.4
@@ -31,3 +31,6 @@ sphinx
glance
nova-adminclient
suds==0.4
+coverage
+nosexcover
+GitPython