From fa4c69330585ead1a1dd58b3bec4cc3f0f92082c Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sun, 12 Sep 2010 04:44:57 -0700 Subject: export devices unique --- nova/db/sqlalchemy/models.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 41013f41b..b6a8c134a 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -25,7 +25,7 @@ import datetime # TODO(vish): clean up these imports from sqlalchemy.orm import relationship, backref, exc, object_mapper -from sqlalchemy import Column, Integer, String +from sqlalchemy import Column, Integer, String, schema from sqlalchemy import ForeignKey, DateTime, Boolean, Text from sqlalchemy.ext.declarative import declarative_base @@ -315,6 +315,7 @@ class Quota(BASE, NovaBase): class ExportDevice(BASE, NovaBase): """Represates a shelf and blade that a volume can be exported on""" __tablename__ = 'export_devices' + __table_args__ = (schema.UniqueConstraint("name", "site"), {'mysql_engine': 'InnoDB'}) id = Column(Integer, primary_key=True) shelf_id = Column(Integer) blade_id = Column(Integer) -- cgit From e77e8a4c368a5c4da1f3e64938bc8940c3603418 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sun, 12 Sep 2010 04:48:49 -0700 Subject: fixed name for unique constraint --- nova/db/sqlalchemy/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index b6a8c134a..a6c7d83c0 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -315,7 +315,7 @@ class Quota(BASE, NovaBase): class ExportDevice(BASE, NovaBase): """Represates a shelf and blade that a volume can be exported on""" __tablename__ = 'export_devices' - __table_args__ = (schema.UniqueConstraint("name", "site"), {'mysql_engine': 'InnoDB'}) + __table_args__ = (schema.UniqueConstraint("shelf_id", "blade_id"), {'mysql_engine': 'InnoDB'}) id = Column(Integer, primary_key=True) shelf_id = Column(Integer) blade_id = Column(Integer) -- cgit From b8516a2239658f0734299049648cbf2828b845eb Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sun, 12 Sep 2010 04:57:46 -0700 Subject: allow multiple volumes to run ensure_blades without creating duplicates --- nova/db/api.py | 10 +++++++--- nova/db/sqlalchemy/api.py | 8 +++++--- nova/volume/manager.py | 2 +- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/nova/db/api.py b/nova/db/api.py index 9f6ff99c3..f78536967 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -413,9 +413,13 @@ def export_device_count(context): return IMPL.export_device_count(context) -def export_device_create(context, values): - """Create an export_device from the values dictionary.""" - return IMPL.export_device_create(context, values) +def export_device_create_safe(context, values): + """Create an export_device from the values dictionary. + + The device is not returned. If the create violates the unique + constraints because the shelf_id and blade_id already exist, + no exception is raised.""" + return IMPL.export_device_create_safe(context, values) ################### diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index d612fe669..c96a97951 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -630,12 +630,14 @@ def export_device_count(_context): return models.ExportDevice.count() -def export_device_create(_context, values): +def export_device_create_safe(_context, values): export_device_ref = models.ExportDevice() for (key, value) in values.iteritems(): export_device_ref[key] = value - export_device_ref.save() - return export_device_ref + try: + export_device_ref.save() + except exc.IntegrityError: + pass ################### diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 37b78fdee..7dbd37623 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -62,7 +62,7 @@ class AOEManager(manager.Manager): for shelf_id in xrange(FLAGS.num_shelves): for blade_id in xrange(FLAGS.blades_per_shelf): dev = {'shelf_id': shelf_id, 'blade_id': blade_id} - self.db.export_device_create(context, dev) + self.db.export_device_create_safe(context, dev) @defer.inlineCallbacks def create_volume(self, context, volume_id): -- cgit From 84a9e5a9ea3105513bb5a7ae9b30d49e6eb3bd3e Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sun, 12 Sep 2010 05:31:27 -0700 Subject: Integrity error is in a different exc file --- nova/db/sqlalchemy/api.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index c96a97951..93c80d27c 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -25,6 +25,7 @@ from nova import flags from nova.db.sqlalchemy import models from nova.db.sqlalchemy.session import get_session from sqlalchemy import or_ +from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import joinedload_all from sqlalchemy.sql import func @@ -636,7 +637,7 @@ def export_device_create_safe(_context, values): export_device_ref[key] = value try: export_device_ref.save() - except exc.IntegrityError: + except IntegrityError: pass -- cgit From fb66d1577a7c49b013f619c620c30bd4b11586e7 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sun, 12 Sep 2010 05:46:13 -0700 Subject: re added missing volume update --- nova/volume/manager.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 7dbd37623..a06070471 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -95,6 +95,9 @@ class AOEManager(manager.Manager): yield self.driver.ensure_exports() now = datetime.datetime.utcnow() + self.db.volume_update(context, + volume_ref['id'], {'status': 'available', + 'launched_at': now}) logging.debug("volume %s: created successfully", volume_id) defer.returnValue(volume_id) -- cgit From 69e30d197dc3c518528bb8d7101c496d753f2122 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sun, 12 Sep 2010 06:05:17 -0700 Subject: deleting is set by cloud --- nova/volume/manager.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/nova/volume/manager.py b/nova/volume/manager.py index a06070471..8472ff33b 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -105,14 +105,11 @@ class AOEManager(manager.Manager): def delete_volume(self, context, volume_id): """Deletes and unexports volume""" volume_ref = self.db.volume_get(context, volume_id) - if volume_ref['status'] != "available": - raise exception.Error("Volume is not available") if volume_ref['attach_status'] == "attached": raise exception.Error("Volume is still attached") if volume_ref['host'] != self.host: raise exception.Error("Volume is not local to this node") logging.debug("Deleting volume with id of: %s", volume_id) - self.db.volume_update(context, volume_id, {'status': 'deleting'}) shelf_id, blade_id = self.db.volume_get_shelf_and_blade(context, volume_id) yield self.driver.remove_export(volume_ref['str_id'], -- cgit From 83a6767ab7be871fd269bf409f819033378e4ea9 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sun, 12 Sep 2010 06:37:08 -0700 Subject: handle exceptions thrown by vblade stop and vblade destroy --- nova/volume/driver.py | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 4604b85d5..a05e34e51 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -24,6 +24,7 @@ import logging from twisted.internet import defer +from nova import exception from nova import flags from nova import process @@ -81,16 +82,34 @@ class AOEDriver(object): @defer.inlineCallbacks def remove_export(self, _volume_name, shelf_id, blade_id): """Removes an export for a logical volume""" - yield self._execute( - "sudo vblade-persist stop %s %s" % (shelf_id, blade_id)) - yield self._execute( - "sudo vblade-persist destroy %s %s" % (shelf_id, blade_id)) + # NOTE(vish): These commands can partially fail sometimes, but + # running them a second time on failure will usually + # pick up the remaining tasks even though it also + # raises an exception + try: + yield self._execute("sudo vblade-persist stop %s %s" % + (shelf_id, blade_id)) + except exception.ProcessExecutionError: + logging.exception("vblade stop threw an error, recovering") + yield self._execute("sleep 2") + yield self._execute("sudo vblade-persist stop %s %s" % + (shelf_id, blade_id), + check_exit_code=False) + try: + yield self._execute("sudo vblade-persist destroy %s %s" % + (shelf_id, blade_id)) + except exception.ProcessExecutionError: + logging.exception("vblade destroy threw an error, recovering") + yield self._execute("sleep 2") + yield self._execute("sudo vblade-persist destroy %s %s" % + (shelf_id, blade_id), + check_exit_code=False) @defer.inlineCallbacks def ensure_exports(self): """Runs all existing exports""" # NOTE(ja): wait for blades to appear - yield self._execute("sleep 5") + yield self._execute("sleep 2") yield self._execute("sudo vblade-persist auto all", check_exit_code=False) yield self._execute("sudo vblade-persist start all", -- cgit From f201f562fe79d09b0bbad42c4630ec8e4c76bf06 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sun, 12 Sep 2010 07:04:36 -0700 Subject: more error handling in volume driver code --- nova/volume/driver.py | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index a05e34e51..e8d11c74d 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -50,17 +50,26 @@ class AOEDriver(object): sizestr = '100M' else: sizestr = '%sG' % size - yield self._execute( - "sudo lvcreate -L %s -n %s %s" % (sizestr, - volume_name, - FLAGS.volume_group)) + yield self._execute("sudo lvcreate -L %s -n %s %s" % + (sizestr, + volume_name, + FLAGS.volume_group)) @defer.inlineCallbacks def delete_volume(self, volume_name): """Deletes a logical volume""" - yield self._execute( - "sudo lvremove -f %s/%s" % (FLAGS.volume_group, - volume_name)) + # NOTE(vish): Sometimes complains that the volume is still + # open, so delay and try again before failing + try: + yield self._execute("sudo lvremove -f %s/%s" % + (FLAGS.volume_group, + volume_name)) + except exception.ProcessExecutionError: + logging.exception("lvremove threw an error, recovering") + yield self._execute("sleep 2") + yield self._execute("sudo lvremove -f %s/%s" % + (FLAGS.volume_group, + volume_name)) @defer.inlineCallbacks def create_export(self, volume_name, shelf_id, blade_id): @@ -85,7 +94,8 @@ class AOEDriver(object): # NOTE(vish): These commands can partially fail sometimes, but # running them a second time on failure will usually # pick up the remaining tasks even though it also - # raises an exception + # raises an exception. We therefore ignore the + # failure on the second try. try: yield self._execute("sudo vblade-persist stop %s %s" % (shelf_id, blade_id)) -- cgit From 517348e33b8cc50e6a0d09f9112b7daab55b132c Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sun, 12 Sep 2010 07:24:31 -0700 Subject: generalized retry into try_execute --- nova/volume/driver.py | 59 +++++++++++++++++---------------------------------- 1 file changed, 20 insertions(+), 39 deletions(-) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index e8d11c74d..a9ea5caa3 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -41,6 +41,19 @@ class AOEDriver(object): def __init__(self, execute=process.simple_execute, *args, **kwargs): self._execute = execute + @defer.inlineCallbacks + def _try_execute(self, command): + # NOTE(vish): Volume commands can partially fail due to timing, but + # running them a second time on failure will usually + # recover nicely. + try: + yield self._execute(command) + except exception.ProcessExecutionError: + logging.exception("Attempting to recover from a failed execute.") + yield self._execute("sleep 2") + yield self._execute(command) + + @defer.inlineCallbacks def create_volume(self, volume_name, size): """Creates a logical volume""" @@ -50,7 +63,7 @@ class AOEDriver(object): sizestr = '100M' else: sizestr = '%sG' % size - yield self._execute("sudo lvcreate -L %s -n %s %s" % + yield self._try_execute("sudo lvcreate -L %s -n %s %s" % (sizestr, volume_name, FLAGS.volume_group)) @@ -58,23 +71,14 @@ class AOEDriver(object): @defer.inlineCallbacks def delete_volume(self, volume_name): """Deletes a logical volume""" - # NOTE(vish): Sometimes complains that the volume is still - # open, so delay and try again before failing - try: - yield self._execute("sudo lvremove -f %s/%s" % - (FLAGS.volume_group, - volume_name)) - except exception.ProcessExecutionError: - logging.exception("lvremove threw an error, recovering") - yield self._execute("sleep 2") - yield self._execute("sudo lvremove -f %s/%s" % + yield self._try_execute("sudo lvremove -f %s/%s" % (FLAGS.volume_group, volume_name)) @defer.inlineCallbacks def create_export(self, volume_name, shelf_id, blade_id): """Creates an export for a logical volume""" - yield self._execute( + yield self._try_execute( "sudo vblade-persist setup %s %s %s /dev/%s/%s" % (shelf_id, blade_id, @@ -91,39 +95,16 @@ class AOEDriver(object): @defer.inlineCallbacks def remove_export(self, _volume_name, shelf_id, blade_id): """Removes an export for a logical volume""" - # NOTE(vish): These commands can partially fail sometimes, but - # running them a second time on failure will usually - # pick up the remaining tasks even though it also - # raises an exception. We therefore ignore the - # failure on the second try. - try: - yield self._execute("sudo vblade-persist stop %s %s" % + yield self._try_execute("sudo vblade-persist stop %s %s" % (shelf_id, blade_id)) - except exception.ProcessExecutionError: - logging.exception("vblade stop threw an error, recovering") - yield self._execute("sleep 2") - yield self._execute("sudo vblade-persist stop %s %s" % - (shelf_id, blade_id), - check_exit_code=False) - try: - yield self._execute("sudo vblade-persist destroy %s %s" % + yield self._try_execute("sudo vblade-persist destroy %s %s" % (shelf_id, blade_id)) - except exception.ProcessExecutionError: - logging.exception("vblade destroy threw an error, recovering") - yield self._execute("sleep 2") - yield self._execute("sudo vblade-persist destroy %s %s" % - (shelf_id, blade_id), - check_exit_code=False) @defer.inlineCallbacks def ensure_exports(self): """Runs all existing exports""" - # NOTE(ja): wait for blades to appear - yield self._execute("sleep 2") - yield self._execute("sudo vblade-persist auto all", - check_exit_code=False) - yield self._execute("sudo vblade-persist start all", - check_exit_code=False) + yield self._try_execute("sudo vblade-persist auto all") + yield self._try_execute("sudo vblade-persist start all") class FakeAOEDriver(AOEDriver): -- cgit From 0fd7cb594e5482d78fed8a026a24c4e1c8dac3bc Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sun, 12 Sep 2010 07:37:03 -0700 Subject: auto all and start all exceptions should be ignored --- nova/volume/driver.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index a9ea5caa3..7d5db4ab0 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -103,8 +103,18 @@ class AOEDriver(object): @defer.inlineCallbacks def ensure_exports(self): """Runs all existing exports""" - yield self._try_execute("sudo vblade-persist auto all") - yield self._try_execute("sudo vblade-persist start all") + # NOTE(vish): The standard _try_execute does not work here + # because these methods throw errors if other + # volumes on this host are in the process of + # being created. The good news is the command + # still works for the other volumes, so we + # just wait a bit for the current volume to + # be ready and ignore any errors. + yield self._execute("sleep 2") + yield self._execute("sudo vblade-persist auto all", + check_exit_code=False) + yield self._execute("sudo vblade-persist start all", + check_exit_code=False) class FakeAOEDriver(AOEDriver): -- cgit From ee766c9c8164ff526a9518c668ba08be4786ac35 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sun, 12 Sep 2010 08:06:44 -0700 Subject: flag for retries on volume commands --- nova/volume/driver.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 7d5db4ab0..a710ee3d6 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -34,6 +34,8 @@ flags.DEFINE_string('volume_group', 'nova-volumes', 'Name for the VG that will contain exported volumes') flags.DEFINE_string('aoe_eth_dev', 'eth0', 'Which device to export the volumes on') +flags.DEFINE_string('num_shell_tries', 3, + 'number of times to attempt to run flakey shell commands') class AOEDriver(object): @@ -46,12 +48,18 @@ class AOEDriver(object): # NOTE(vish): Volume commands can partially fail due to timing, but # running them a second time on failure will usually # recover nicely. - try: - yield self._execute(command) - except exception.ProcessExecutionError: - logging.exception("Attempting to recover from a failed execute.") - yield self._execute("sleep 2") - yield self._execute(command) + tries = 0 + while True: + try: + yield self._execute(command) + defer.returnValue(True) + except exception.ProcessExecutionError: + tries = tries + 1 + if tries >= FLAGS.num_shell_tries: + raise + logging.exception("Recovering from a failed execute." + "Try number %s", tries) + yield self._execute("sleep %s", tries ** 2) @defer.inlineCallbacks -- cgit From 10be00e16c6428bf3709590f13984246fdfaf14b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sun, 12 Sep 2010 08:16:59 -0700 Subject: fixed typo --- nova/volume/driver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index a710ee3d6..cca619550 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -59,7 +59,7 @@ class AOEDriver(object): raise logging.exception("Recovering from a failed execute." "Try number %s", tries) - yield self._execute("sleep %s", tries ** 2) + yield self._execute("sleep %s" % tries ** 2) @defer.inlineCallbacks -- cgit From 7190ad478b5e92a42d5109d01b5f178de2181127 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 28 Sep 2010 09:38:58 -0700 Subject: return a value if possible from export_device_create_safe --- nova/db/sqlalchemy/api.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index b6ac87901..8e6aa317b 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -677,8 +677,9 @@ def export_device_create_safe(_context, values): export_device_ref[key] = value try: export_device_ref.save() + return export_device_ref except IntegrityError: - pass + return None ################### -- cgit From 5e3da5864825a12da5a1ea1102a6efb6cebe204b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 1 Oct 2010 05:57:17 -0700 Subject: Fix the deprecation warnings for passing no context. Moved RequestContext out of nova.api, because it is used by everything Context is passed through the queue. Added some helper methods for converting to admin context. Added a few more fields to request context. --- bin/nova-dhcpbridge | 20 +++++--- nova/api/cloud.py | 10 ++-- nova/api/context.py | 46 ----------------- nova/api/ec2/__init__.py | 7 ++- nova/api/ec2/cloud.py | 107 ++++++++++++++++++++------------------- nova/api/rackspace/context.py | 33 ------------ nova/api/rackspace/servers.py | 58 ++++++++++----------- nova/auth/manager.py | 20 +++++--- nova/cloudpipe/pipelib.py | 4 +- nova/context.py | 106 ++++++++++++++++++++++++++++++++++++++ nova/db/sqlalchemy/api.py | 3 +- nova/network/manager.py | 6 +-- nova/objectstore/handler.py | 12 ++--- nova/rpc.py | 15 ++++-- nova/scheduler/manager.py | 4 +- nova/service.py | 40 ++++++++------- nova/tests/access_unittest.py | 56 +++++++------------- nova/tests/api_unittest.py | 3 +- nova/tests/cloud_unittest.py | 28 +++++----- nova/tests/compute_unittest.py | 11 ++-- nova/tests/network_unittest.py | 66 ++++++++++++------------ nova/tests/quota_unittest.py | 14 ++--- nova/tests/rpc_unittest.py | 30 +++++++++-- nova/tests/scheduler_unittest.py | 22 ++++---- nova/tests/service_unittest.py | 24 +++++---- nova/tests/volume_unittest.py | 29 ++++++----- nova/virt/libvirt_conn.py | 27 ++++++---- 27 files changed, 433 insertions(+), 368 deletions(-) delete mode 100644 nova/api/context.py delete mode 100644 nova/api/rackspace/context.py create mode 100644 nova/context.py diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index a127ed03c..4574f0e20 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -33,6 +33,7 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) +from nova import context from nova import db from nova import flags from nova import rpc @@ -52,9 +53,12 @@ def add_lease(mac, ip_address, _hostname, _interface): if FLAGS.fake_rabbit: logging.debug("leasing ip") network_manager = utils.import_object(FLAGS.network_manager) - network_manager.lease_fixed_ip(None, mac, ip_address) + network_manager.lease_fixed_ip(context.get_admin_context(), + mac, + ip_address) else: - rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.host), + rpc.cast(context.get_admin_context(), + "%s.%s" % (FLAGS.network_topic, FLAGS.host), {"method": "lease_fixed_ip", "args": {"context": None, "mac": mac, @@ -71,9 +75,12 @@ def del_lease(mac, ip_address, _hostname, _interface): if FLAGS.fake_rabbit: logging.debug("releasing ip") network_manager = utils.import_object(FLAGS.network_manager) - network_manager.release_fixed_ip(None, mac, ip_address) + network_manager.release_fixed_ip(context.get_admin_context(), + mac, + ip_address) else: - rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.host), + rpc.cast(context.get_admin_context(), + "%s.%s" % (FLAGS.network_topic, FLAGS.host), {"method": "release_fixed_ip", "args": {"context": None, "mac": mac, @@ -82,8 +89,9 @@ def del_lease(mac, ip_address, _hostname, _interface): def init_leases(interface): """Get the list of hosts for an interface.""" - network_ref = db.network_get_by_bridge(None, interface) - return linux_net.get_dhcp_hosts(None, network_ref['id']) + ctxt = context.get_admin_context() + network_ref = db.network_get_by_bridge(ctxt, interface) + return linux_net.get_dhcp_hosts(ctxt, network_ref['id']) def main(): diff --git a/nova/api/cloud.py b/nova/api/cloud.py index 345677d4f..e16229e7d 100644 --- a/nova/api/cloud.py +++ b/nova/api/cloud.py @@ -30,13 +30,13 @@ FLAGS = flags.FLAGS def reboot(instance_id, context=None): """Reboot the given instance. - + #TODO(gundlach) not actually sure what context is used for by ec2 here -- I think we can just remove it and use None all the time. """ - instance_ref = db.instance_get_by_ec2_id(None, instance_id) + instance_ref = db.instance_get_by_ec2_id(context, instance_id) host = instance_ref['host'] - rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host), + rpc.cast(context, + db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "reboot_instance", - "args": {"context": None, - "instance_id": instance_ref['id']}}) + "args": {"instance_id": instance_ref['id']}}) diff --git a/nova/api/context.py b/nova/api/context.py deleted file mode 100644 index b66cfe468..000000000 --- a/nova/api/context.py +++ /dev/null @@ -1,46 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -APIRequestContext -""" - -import random - - -class APIRequestContext(object): - def __init__(self, user, project): - self.user = user - self.project = project - self.request_id = ''.join( - [random.choice('ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890-') - for x in xrange(20)] - ) - if user: - self.is_admin = user.is_admin() - else: - self.is_admin = False - self.read_deleted = False - - -def get_admin_context(user=None, read_deleted=False): - context_ref = APIRequestContext(user=user, project=None) - context_ref.is_admin = True - context_ref.read_deleted = read_deleted - return context_ref - diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 6b538a7f1..edc818c7d 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -25,9 +25,9 @@ import webob.dec import webob.exc from nova import exception +from nova import context from nova import flags from nova import wsgi -from nova.api import context from nova.api.ec2 import apirequest from nova.api.ec2 import admin from nova.api.ec2 import cloud @@ -78,7 +78,10 @@ class Authenticate(wsgi.Middleware): raise webob.exc.HTTPForbidden() # Authenticated! - req.environ['ec2.context'] = context.APIRequestContext(user, project) + ctxt = context.RequestContext(user=user, + project=project, + remote_address=req.remote_addr) + req.environ['ec2.context'] = ctxt return self.application diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 79c95788b..5e1de9dc0 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -28,6 +28,7 @@ import logging import os import time +from nova import context from nova import crypto from nova import db from nova import exception @@ -100,9 +101,9 @@ class CloudController(object): utils.runthis("Generating root CA: %s", "sh genrootca.sh") os.chdir(start) - def _get_mpi_data(self, project_id): + def _get_mpi_data(self, context, project_id): result = {} - for instance in db.instance_get_all_by_project(None, project_id): + for instance in db.instance_get_all_by_project(context, project_id): if instance['fixed_ip']: line = '%s slots=%d' % (instance['fixed_ip']['address'], INSTANCE_TYPES[instance['instance_type']]['vcpus']) @@ -114,10 +115,11 @@ class CloudController(object): return result def get_metadata(self, address): - instance_ref = db.fixed_ip_get_instance(None, address) + ctxt = context.get_admin_context() + instance_ref = db.fixed_ip_get_instance(ctxt, address) if instance_ref is None: return None - mpi = self._get_mpi_data(instance_ref['project_id']) + mpi = self._get_mpi_data(ctxt, instance_ref['project_id']) if instance_ref['key_name']: keys = { '0': { @@ -128,7 +130,7 @@ class CloudController(object): else: keys = '' hostname = instance_ref['hostname'] - floating_ip = db.instance_get_floating_address(None, + floating_ip = db.instance_get_floating_address(ctxt, instance_ref['id']) data = { 'user-data': base64.b64decode(instance_ref['user_data']), @@ -136,7 +138,7 @@ class CloudController(object): 'ami-id': instance_ref['image_id'], 'ami-launch-index': instance_ref['launch_index'], 'ami-manifest-path': 'FIXME', - 'block-device-mapping': { # TODO(vish): replace with real data + 'block-device-mapping': { # TODO(vish): replace with real data 'ami': 'sda1', 'ephemeral0': 'sda2', 'root': '/dev/sda1', @@ -218,7 +220,7 @@ class CloudController(object): return {'keypairsSet': result} def create_key_pair(self, context, key_name, **kwargs): - data = _gen_key(None, context.user.id, key_name) + data = _gen_key(context, context.user.id, key_name) return {'keyName': key_name, 'keyFingerprint': data['fingerprint'], 'keyMaterial': data['private_key']} @@ -247,11 +249,11 @@ class CloudController(object): def get_console_output(self, context, instance_id, **kwargs): # instance_id is passed in as a list of instances instance_ref = db.instance_get_by_ec2_id(context, instance_id[0]) - return rpc.call('%s.%s' % (FLAGS.compute_topic, + return rpc.call(context, + '%s.%s' % (FLAGS.compute_topic, instance_ref['host']), {"method": "get_console_output", - "args": {"context": None, - "instance_id": instance_ref['id']}}) + "args": {"instance_id": instance_ref['id']}}) def describe_volumes(self, context, **kwargs): if context.user.is_admin(): @@ -310,10 +312,10 @@ class CloudController(object): vol['display_description'] = kwargs.get('display_description') volume_ref = db.volume_create(context, vol) - rpc.cast(FLAGS.scheduler_topic, + rpc.cast(context, + FLAGS.scheduler_topic, {"method": "create_volume", - "args": {"context": None, - "topic": FLAGS.volume_topic, + "args": {"topic": FLAGS.volume_topic, "volume_id": volume_ref['id']}}) return {'volumeSet': [self._format_volume(context, volume_ref)]} @@ -328,10 +330,10 @@ class CloudController(object): raise exception.ApiError("Volume is already attached") instance_ref = db.instance_get_by_ec2_id(context, instance_id) host = instance_ref['host'] - rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host), + rpc.cast(context, + db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "attach_volume", - "args": {"context": None, - "volume_id": volume_ref['id'], + "args": {"volume_id": volume_ref['id'], "instance_id": instance_ref['id'], "mountpoint": device}}) return {'attachTime': volume_ref['attach_time'], @@ -351,10 +353,10 @@ class CloudController(object): raise exception.ApiError("Volume is already detached") try: host = instance_ref['host'] - rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host), + rpc.cast(context, + db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "detach_volume", - "args": {"context": None, - "instance_id": instance_ref['id'], + "args": {"instance_id": instance_ref['id'], "volume_id": volume_ref['id']}}) except exception.NotFound: # If the instance doesn't exist anymore, @@ -388,7 +390,7 @@ class CloudController(object): return self._format_describe_instances(context) def _format_describe_instances(self, context): - return { 'reservationSet': self._format_instances(context) } + return {'reservationSet': self._format_instances(context)} def _format_run_instances(self, context, reservation_id): i = self._format_instances(context, reservation_id) @@ -482,20 +484,20 @@ class CloudController(object): raise QuotaError("Address quota exceeded. You cannot " "allocate any more addresses") network_topic = self._get_network_topic(context) - public_ip = rpc.call(network_topic, + public_ip = rpc.call(context, + network_topic, {"method": "allocate_floating_ip", - "args": {"context": None, - "project_id": context.project.id}}) + "args": {"project_id": context.project.id}}) return {'addressSet': [{'publicIp': public_ip}]} def release_address(self, context, public_ip, **kwargs): # NOTE(vish): Should we make sure this works? floating_ip_ref = db.floating_ip_get_by_address(context, public_ip) network_topic = self._get_network_topic(context) - rpc.cast(network_topic, + rpc.cast(context, + network_topic, {"method": "deallocate_floating_ip", - "args": {"context": None, - "floating_address": floating_ip_ref['address']}}) + "args": {"floating_address": floating_ip_ref['address']}}) return {'releaseResponse': ["Address released."]} def associate_address(self, context, instance_id, public_ip, **kwargs): @@ -504,20 +506,20 @@ class CloudController(object): instance_ref['id']) floating_ip_ref = db.floating_ip_get_by_address(context, public_ip) network_topic = self._get_network_topic(context) - rpc.cast(network_topic, + rpc.cast(context, + network_topic, {"method": "associate_floating_ip", - "args": {"context": None, - "floating_address": floating_ip_ref['address'], + "args": {"floating_address": floating_ip_ref['address'], "fixed_address": fixed_address}}) return {'associateResponse': ["Address associated."]} def disassociate_address(self, context, public_ip, **kwargs): floating_ip_ref = db.floating_ip_get_by_address(context, public_ip) network_topic = self._get_network_topic(context) - rpc.cast(network_topic, + rpc.cast(context, + network_topic, {"method": "disassociate_floating_ip", - "args": {"context": None, - "floating_address": floating_ip_ref['address']}}) + "args": {"floating_address": floating_ip_ref['address']}}) return {'disassociateResponse': ["Address disassociated."]} def _get_network_topic(self, context): @@ -525,10 +527,10 @@ class CloudController(object): network_ref = db.project_get_network(context, context.project.id) host = network_ref['host'] if not host: - host = rpc.call(FLAGS.network_topic, + host = rpc.call(context, + FLAGS.network_topic, {"method": "set_network_host", - "args": {"context": None, - "project_id": context.project.id}}) + "args": {"project_id": context.project.id}}) return db.queue_get_for(context, FLAGS.network_topic, host) def run_instances(self, context, **kwargs): @@ -619,15 +621,15 @@ class CloudController(object): # TODO(vish): This probably should be done in the scheduler # network is setup when host is assigned network_topic = self._get_network_topic(context) - rpc.call(network_topic, + rpc.call(context, + network_topic, {"method": "setup_fixed_ip", - "args": {"context": None, - "address": address}}) + "args": {"address": address}}) - rpc.cast(FLAGS.scheduler_topic, + rpc.cast(context, + FLAGS.scheduler_topic, {"method": "run_instance", - "args": {"context": None, - "topic": FLAGS.compute_topic, + "args": {"topic": FLAGS.compute_topic, "instance_id": inst_id}}) logging.debug("Casting to scheduler for %s/%s's instance %s" % (context.project.name, context.user.name, inst_id)) @@ -658,10 +660,10 @@ class CloudController(object): # disassociated. We may need to worry about # checking this later. Perhaps in the scheduler? network_topic = self._get_network_topic(context) - rpc.cast(network_topic, + rpc.cast(context, + network_topic, {"method": "disassociate_floating_ip", - "args": {"context": None, - "floating_address": address}}) + "args": {"floating_address": address}}) address = db.instance_get_fixed_address(context, instance_ref['id']) @@ -674,10 +676,10 @@ class CloudController(object): host = instance_ref['host'] if host: - rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host), + rpc.cast(context, + db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "terminate_instance", - "args": {"context": None, - "instance_id": instance_ref['id']}}) + "args": {"instance_id": instance_ref['id']}}) else: db.instance_destroy(context, instance_ref['id']) return True @@ -695,9 +697,8 @@ class CloudController(object): if field in kwargs: changes[field] = kwargs[field] if changes: - db_context = {} - inst = db.instance_get_by_ec2_id(db_context, instance_id) - db.instance_update(db_context, inst['id'], kwargs) + inst = db.instance_get_by_ec2_id(context, instance_id) + db.instance_update(context, inst['id'], kwargs) return True def delete_volume(self, context, volume_id, **kwargs): @@ -708,10 +709,10 @@ class CloudController(object): now = datetime.datetime.utcnow() db.volume_update(context, volume_ref['id'], {'terminated_at': now}) host = volume_ref['host'] - rpc.cast(db.queue_get_for(context, FLAGS.volume_topic, host), + rpc.cast(context, + db.queue_get_for(context, FLAGS.volume_topic, host), {"method": "delete_volume", - "args": {"context": None, - "volume_id": volume_ref['id']}}) + "args": {"volume_id": volume_ref['id']}}) return True def describe_images(self, context, image_id=None, **kwargs): diff --git a/nova/api/rackspace/context.py b/nova/api/rackspace/context.py deleted file mode 100644 index 77394615b..000000000 --- a/nova/api/rackspace/context.py +++ /dev/null @@ -1,33 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -APIRequestContext -""" - -import random - -class Project(object): - def __init__(self, user_id): - self.id = user_id - -class APIRequestContext(object): - """ This is an adapter class to get around all of the assumptions made in - the FlatNetworking """ - def __init__(self, user_id): - self.user_id = user_id - self.project = Project(user_id) diff --git a/nova/api/rackspace/servers.py b/nova/api/rackspace/servers.py index 11efd8aef..866bebb44 100644 --- a/nova/api/rackspace/servers.py +++ b/nova/api/rackspace/servers.py @@ -24,9 +24,9 @@ from nova import flags from nova import rpc from nova import utils from nova import wsgi +from nova import context from nova.api import cloud from nova.api.rackspace import _id_translator -from nova.api.rackspace import context from nova.api.rackspace import faults from nova.compute import instance_types from nova.compute import power_state @@ -64,8 +64,8 @@ def _entity_list(entities): def _entity_detail(inst): """ Maps everything to Rackspace-like attributes for return""" - power_mapping = { - power_state.NOSTATE: 'build', + power_mapping = { + power_state.NOSTATE: 'build', power_state.RUNNING: 'active', power_state.BLOCKED: 'active', power_state.PAUSED: 'suspended', @@ -75,7 +75,7 @@ def _entity_detail(inst): } inst_dict = {} - mapped_keys = dict(status='state', imageId='image_id', + mapped_keys = dict(status='state', imageId='image_id', flavorId='instance_type', name='server_name', id='id') for k, v in mapped_keys.iteritems(): @@ -98,7 +98,7 @@ class Controller(wsgi.Controller): _serialization_metadata = { 'application/xml': { "attributes": { - "server": [ "id", "imageId", "name", "flavorId", "hostId", + "server": [ "id", "imageId", "name", "flavorId", "hostId", "status", "progress", "progress" ] } } @@ -164,11 +164,11 @@ class Controller(wsgi.Controller): inst = self._build_server_instance(req, env) except Exception, e: return faults.Fault(exc.HTTPUnprocessableEntity()) - - rpc.cast( - FLAGS.compute_topic, { - "method": "run_instance", - "args": {"instance_id": inst['id']}}) + user_id = req.environ['nova.context']['user']['id'] + rpc.cast(context.RequestContext(user_id, user_id), + FLAGS.compute_topic, + {"method": "run_instance", + "args": {"instance_id": inst['id']}}) return _entity_inst(inst) def update(self, req, id): @@ -178,7 +178,7 @@ class Controller(wsgi.Controller): user_id = req.environ['nova.context']['user']['id'] inst_dict = self._deserialize(req.body, req) - + if not inst_dict: return faults.Fault(exc.HTTPUnprocessableEntity()) @@ -186,12 +186,12 @@ class Controller(wsgi.Controller): if not instance or instance.user_id != user_id: return faults.Fault(exc.HTTPNotFound()) - self.db_driver.instance_update(None, id, + self.db_driver.instance_update(None, id, _filter_params(inst_dict['server'])) return faults.Fault(exc.HTTPNoContent()) def action(self, req, id): - """ multi-purpose method used to reboot, rebuild, and + """ multi-purpose method used to reboot, rebuild, and resize a server """ input_dict = self._deserialize(req.body, req) try: @@ -217,13 +217,13 @@ class Controller(wsgi.Controller): if v['flavorid'] == flavor_id][0] image_id = env['server']['imageId'] - + img_service, image_id_trans = _image_service() - opaque_image_id = image_id_trans.to_rs_id(image_id) + opaque_image_id = image_id_trans.to_rs_id(image_id) image = img_service.show(opaque_image_id) - if not image: + if not image: raise Exception, "Image not found" inst['server_name'] = env['server']['name'] @@ -259,15 +259,15 @@ class Controller(wsgi.Controller): ref = self.db_driver.instance_create(None, inst) inst['id'] = inst_id_trans.to_rs_id(ref.ec2_id) - + # TODO(dietz): this isn't explicitly necessary, but the networking # calls depend on an object with a project_id property, and therefore # should be cleaned up later - api_context = context.APIRequestContext(user_id) - + api_context = context.RequestContext(user_id) + inst['mac_address'] = utils.generate_mac() - - #TODO(dietz) is this necessary? + + #TODO(dietz) is this necessary? inst['launch_index'] = 0 inst['hostname'] = ref.ec2_id @@ -280,20 +280,20 @@ class Controller(wsgi.Controller): # TODO(vish): This probably should be done in the scheduler # network is setup when host is assigned network_topic = self._get_network_topic(user_id) - rpc.call(network_topic, + rpc.call(context.RequestContext(user_id, user_id), + network_topic, {"method": "setup_fixed_ip", - "args": {"context": None, - "address": address}}) + "args": {"address": address}}) return inst def _get_network_topic(self, user_id): """Retrieves the network host for a project""" - network_ref = self.db_driver.project_get_network(None, + network_ref = self.db_driver.project_get_network(None, user_id) host = network_ref['host'] if not host: - host = rpc.call(FLAGS.network_topic, - {"method": "set_network_host", - "args": {"context": None, - "project_id": user_id}}) + host = rpc.call(context.RequestContext(user_id, user_id), + FLAGS.network_topic, + {"method": "set_network_host", + "args": {"project_id": user_id}}) return self.db_driver.queue_get_for(None, FLAGS.network_topic, host) diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 0bc12c80f..9d0c5c95c 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -28,6 +28,7 @@ import tempfile import uuid import zipfile +from nova import context from nova import crypto from nova import db from nova import exception @@ -454,7 +455,7 @@ class AuthManager(object): return [Project(**project_dict) for project_dict in project_list] def create_project(self, name, manager_user, description=None, - member_users=None, context=None): + member_users=None): """Create a project @type name: str @@ -485,7 +486,8 @@ class AuthManager(object): if project_dict: project = Project(**project_dict) try: - self.network_manager.allocate_network(context, + ctxt = context.get_admin_context() + self.network_manager.allocate_network(ctxt, project.id) except: drv.delete_project(project.id) @@ -537,7 +539,7 @@ class AuthManager(object): Project.safe_id(project)) @staticmethod - def get_project_vpn_data(project, context=None): + def get_project_vpn_data(project): """Gets vpn ip and port for project @type project: Project or project_id @@ -548,7 +550,7 @@ class AuthManager(object): not been allocated for user. """ - network_ref = db.project_get_network(context, + network_ref = db.project_get_network(context.get_admin_context(), Project.safe_id(project)) if not network_ref['vpn_public_port']: @@ -556,12 +558,13 @@ class AuthManager(object): return (network_ref['vpn_public_address'], network_ref['vpn_public_port']) - def delete_project(self, project, context=None): + def delete_project(self, project): """Deletes a project""" try: - network_ref = db.project_get_network(context, + ctxt = context.get_admin_context() + network_ref = db.project_get_network(ctxt, Project.safe_id(project)) - db.network_destroy(context, network_ref['id']) + db.network_destroy(ctxt, network_ref['id']) except: logging.exception('Could not destroy network for %s', project) @@ -626,7 +629,8 @@ class AuthManager(object): Additionally deletes all users key_pairs""" uid = User.safe_id(user) - db.key_pair_destroy_all_by_user(None, uid) + db.key_pair_destroy_all_by_user(context.get_admin_context(), + uid) with self.driver() as drv: drv.delete_user(uid) diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py index 706a175d9..4fc2c85cb 100644 --- a/nova/cloudpipe/pipelib.py +++ b/nova/cloudpipe/pipelib.py @@ -28,13 +28,13 @@ import os import tempfile import zipfile +from nova import context from nova import exception from nova import flags from nova import utils from nova.auth import manager # TODO(eday): Eventually changes these to something not ec2-specific from nova.api.ec2 import cloud -from nova.api.ec2 import context FLAGS = flags.FLAGS @@ -62,7 +62,7 @@ class CloudPipe(object): key_name = self.setup_key_pair(project.project_manager_id, project_id) zippy = open(zippath, "r") - context = context.APIRequestContext(user=project.project_manager, project=project) + context = context.RequestContext(user=project.project_manager, project=project) reservation = self.controller.run_instances(context, # run instances expects encoded userdata, it is decoded in the get_metadata_call diff --git a/nova/context.py b/nova/context.py new file mode 100644 index 000000000..3f9de519d --- /dev/null +++ b/nova/context.py @@ -0,0 +1,106 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +RequestContext: context for requests that persist through all of nova. +""" + +import datetime +import random + +from nova import utils + +class RequestContext(object): + def __init__(self, user, project, is_admin=None, read_deleted=False, + remote_address=None, timestamp=None, request_id=None): + if hasattr(user, 'id'): + self._user = user + self.user_id = user.id + else: + self._user = None + self.user_id = user + if hasattr(project, 'id'): + self._project = project + self.project_id = project.id + else: + self._project = None + self.project_id = project + if is_admin is None: + if not user: + user = self.user + self.is_admin = user.is_admin() + else: + self.is_admin = is_admin + self.read_deleted = read_deleted + self.remote_address = remote_address + if not timestamp: + timestamp = datetime.datetime.utcnow() + if isinstance(timestamp, str): + timestamp = utils.parse_isotime(timestamp) + self.timestamp = timestamp + if not request_id: + request_id = ''.join( + [random.choice('ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890-') + for x in xrange(20)] + ) + self.request_id = request_id + + @property + def user(self): + # NOTE(visn): Delay import of manager, so that we can import this + # file from manager. + from nova.auth import manager + if not self._user: + self._user = manager.AuthManager().get_user(self.user_id) + return self._user + + @property + def project(self): + # NOTE(visn): Delay import of manager, so that we can import this + # file from manager. + from nova.auth import manager + if not self._project: + self._project = manager.AuthManager().get_project(self.project_id) + return self._project + + def to_dict(self): + return {'user': self.user_id, + 'project': self.project_id, + 'is_admin': self.is_admin, + 'read_deleted': self.read_deleted, + 'remote_address': self.remote_address, + 'timestamp': utils.isotime(), + 'request_id': self.request_id} + + @classmethod + def from_dict(cls, values): + return cls(**values) + + def admin(self, read_deleted=False): + """Return a version of this context with admin flag set""" + return RequestContext(self.user_id, + self.project_id, + True, + read_deleted, + self.remote_address, + self.timestamp, + self.request_id) + + +def get_admin_context(read_deleted=False): + return RequestContext(None, None, True, read_deleted) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 7f72f66b9..0fef49090 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -40,6 +40,7 @@ def is_admin_context(context): if not context: warnings.warn('Use of empty request context is deprecated', DeprecationWarning) + raise Exception('die') return True return context.is_admin @@ -321,7 +322,7 @@ def floating_ip_destroy(context, address): session = get_session() with session.begin(): # TODO(devcamcar): Ensure address belongs to user. - floating_ip_ref = get_floating_ip_by_address(context, + floating_ip_ref = floating_ip_get_by_address(context, address, session=session) floating_ip_ref.delete(session=session) diff --git a/nova/network/manager.py b/nova/network/manager.py index ef1d01138..25956b267 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -92,7 +92,7 @@ class NetworkManager(manager.Manager): # TODO(vish): can we minimize db access by just getting the # id here instead of the ref? network_id = network_ref['id'] - host = self.db.network_set_host(None, + host = self.db.network_set_host(context, network_id, self.host) self._on_set_network_host(context, network_id) @@ -180,7 +180,7 @@ class FlatManager(NetworkManager): def allocate_fixed_ip(self, context, instance_id, *args, **kwargs): """Gets a fixed ip from the pool""" network_ref = self.db.project_get_network(context, context.project.id) - address = self.db.fixed_ip_associate_pool(context, + address = self.db.fixed_ip_associate_pool(context.admin(), network_ref['id'], instance_id) self.db.fixed_ip_update(context, address, {'allocated': True}) @@ -249,7 +249,7 @@ class VlanManager(NetworkManager): address = network_ref['vpn_private_address'] self.db.fixed_ip_associate(context, address, instance_id) else: - address = self.db.fixed_ip_associate_pool(None, + address = self.db.fixed_ip_associate_pool(context.admin(), network_ref['id'], instance_id) self.db.fixed_ip_update(context, address, {'allocated': True}) diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py index dfee64aca..b93e92fe6 100644 --- a/nova/objectstore/handler.py +++ b/nova/objectstore/handler.py @@ -52,10 +52,10 @@ from twisted.web import resource from twisted.web import server from twisted.web import static +from nova import context from nova import exception from nova import flags from nova.auth import manager -from nova.api.ec2 import context from nova.objectstore import bucket from nova.objectstore import image @@ -131,7 +131,7 @@ def get_context(request): request.uri, headers=request.getAllHeaders(), check_type='s3') - return context.APIRequestContext(user, project) + return context.RequestContext(user, project) except exception.Error as ex: logging.debug("Authentication Failure: %s", ex) raise exception.NotAuthorized @@ -144,7 +144,7 @@ class ErrorHandlingResource(resource.Resource): # plugged in to the right place in twisted... # This doesn't look like it's the right place # (consider exceptions in getChild; or after - # NOT_DONE_YET is returned + # NOT_DONE_YET is returned def render(self, request): """Renders the response as XML""" try: @@ -255,7 +255,7 @@ class ObjectResource(ErrorHandlingResource): def render_GET(self, request): """Returns the object - + Raises NotAuthorized if user in request context is not authorized to delete the object. """ @@ -273,7 +273,7 @@ class ObjectResource(ErrorHandlingResource): def render_PUT(self, request): """Modifies/inserts the object and returns a result code - + Raises NotAuthorized if user in request context is not authorized to delete the object. """ @@ -291,7 +291,7 @@ class ObjectResource(ErrorHandlingResource): def render_DELETE(self, request): """Deletes the object and returns a result code - + Raises NotAuthorized if user in request context is not authorized to delete the object. """ diff --git a/nova/rpc.py b/nova/rpc.py index fe52ad35f..26eff9c55 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -34,7 +34,7 @@ from twisted.internet import task from nova import exception from nova import fakerabbit from nova import flags - +from nova import context FLAGS = flags.FLAGS @@ -151,6 +151,8 @@ class AdapterConsumer(TopicConsumer): """ LOG.debug('received %s' % (message_data)) msg_id = message_data.pop('_msg_id', None) + dict_context = message_data.pop('_context') + ctxt = context.RequestContext.from_dict(dict_context) method = message_data.get('method') args = message_data.get('args', {}) @@ -168,7 +170,7 @@ class AdapterConsumer(TopicConsumer): node_args = dict((str(k), v) for k, v in args.iteritems()) # NOTE(vish): magic is fun! # pylint: disable-msg=W0142 - d = defer.maybeDeferred(node_func, **node_args) + d = defer.maybeDeferred(node_func, context=ctxt, **node_args) if msg_id: d.addCallback(lambda rval: msg_reply(msg_id, rval, None)) d.addErrback(lambda e: msg_reply(msg_id, None, e)) @@ -247,12 +249,13 @@ class RemoteError(exception.Error): traceback)) -def call(topic, msg): +def call(context, topic, msg): """Sends a message on a topic and wait for a response""" LOG.debug("Making asynchronous call...") msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) LOG.debug("MSG_ID is %s" % (msg_id)) + msg.update({'_context': context}) class WaitMessage(object): @@ -282,12 +285,13 @@ def call(topic, msg): return wait_msg.result -def call_twisted(topic, msg): +def call_twisted(context, topic, msg): """Sends a message on a topic and wait for a response""" LOG.debug("Making asynchronous call...") msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) LOG.debug("MSG_ID is %s" % (msg_id)) + msg.update({'_context': context.to_dict()}) conn = Connection.instance() d = defer.Deferred() @@ -313,9 +317,10 @@ def call_twisted(topic, msg): return d -def cast(topic, msg): +def cast(context, topic, msg): """Sends a message on a topic without waiting for a response""" LOG.debug("Making asynchronous cast...") + msg.update({'_context': context.to_dict()}) conn = Connection.instance() publisher = TopicPublisher(connection=conn, topic=topic) publisher.send(msg) diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index 0ad7ca86b..6a933fb37 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -59,8 +59,8 @@ class SchedulerManager(manager.Manager): except AttributeError: host = self.driver.schedule(context, topic, *args, **kwargs) - kwargs.update({"context": None}) - rpc.cast(db.queue_get_for(context, topic, host), + rpc.cast(context, + db.queue_get_for(context, topic, host), {"method": method, "args": kwargs}) logging.debug("Casting to %s %s for %s", topic, host, method) diff --git a/nova/service.py b/nova/service.py index a6c186896..609df81d1 100644 --- a/nova/service.py +++ b/nova/service.py @@ -28,6 +28,7 @@ from twisted.internet import defer from twisted.internet import task from twisted.application import service +from nova import context from nova import db from nova import exception from nova import flags @@ -57,20 +58,22 @@ class Service(object, service.Service): self.manager.init_host() self.model_disconnected = False super(Service, self).__init__(*args, **kwargs) + ctxt = context.get_admin_context() try: - service_ref = db.service_get_by_args(None, - self.host, - self.binary) + service_ref = db.service_get_by_args(ctxt, + self.host, + self.binary) self.service_id = service_ref['id'] except exception.NotFound: - self._create_service_ref() + self._create_service_ref(ctxt) - def _create_service_ref(self): - service_ref = db.service_create(None, {'host': self.host, - 'binary': self.binary, - 'topic': self.topic, - 'report_count': 0}) + def _create_service_ref(self, context): + service_ref = db.service_create(context, + {'host': self.host, + 'binary': self.binary, + 'topic': self.topic, + 'report_count': 0}) self.service_id = service_ref['id'] def __getattr__(self, key): @@ -136,31 +139,32 @@ class Service(object, service.Service): service_obj.setServiceParent(application) return application - def kill(self, context=None): + def kill(self): """Destroy the service object in the datastore""" try: - db.service_destroy(context, self.service_id) + db.service_destroy(context.get_admin_context(), self.service_id) except exception.NotFound: logging.warn("Service killed that has no database entry") @defer.inlineCallbacks - def periodic_tasks(self, context=None): + def periodic_tasks(self): """Tasks to be run at a periodic interval""" - yield self.manager.periodic_tasks(context) + yield self.manager.periodic_tasks(context.get_admin_context()) @defer.inlineCallbacks - def report_state(self, context=None): + def report_state(self): """Update the state of this service in the datastore.""" + ctxt = context.get_admin_context() try: try: - service_ref = db.service_get(context, self.service_id) + service_ref = db.service_get(ctxt, self.service_id) except exception.NotFound: logging.debug("The service database object disappeared, " "Recreating it.") - self._create_service_ref() - service_ref = db.service_get(context, self.service_id) + self._create_service_ref(ctxt) + service_ref = db.service_get(ctxt, self.service_id) - db.service_update(context, + db.service_update(ctxt, self.service_id, {'report_count': service_ref['report_count'] + 1}) diff --git a/nova/tests/access_unittest.py b/nova/tests/access_unittest.py index 4b40ffd0a..8167259c4 100644 --- a/nova/tests/access_unittest.py +++ b/nova/tests/access_unittest.py @@ -20,6 +20,7 @@ import unittest import logging import webob +from nova import context from nova import exception from nova import flags from nova import test @@ -35,44 +36,25 @@ class AccessTestCase(test.TrialTestCase): def setUp(self): super(AccessTestCase, self).setUp() um = manager.AuthManager() + self.context = context.get_admin_context() # Make test users - try: - self.testadmin = um.create_user('testadmin') - except Exception, err: - logging.error(str(err)) - try: - self.testpmsys = um.create_user('testpmsys') - except: pass - try: - self.testnet = um.create_user('testnet') - except: pass - try: - self.testsys = um.create_user('testsys') - except: pass + self.testadmin = um.create_user('testadmin') + self.testpmsys = um.create_user('testpmsys') + self.testnet = um.create_user('testnet') + self.testsys = um.create_user('testsys') # Assign some rules - try: - um.add_role('testadmin', 'cloudadmin') - except: pass - try: - um.add_role('testpmsys', 'sysadmin') - except: pass - try: - um.add_role('testnet', 'netadmin') - except: pass - try: - um.add_role('testsys', 'sysadmin') - except: pass + um.add_role('testadmin', 'cloudadmin') + um.add_role('testpmsys', 'sysadmin') + um.add_role('testnet', 'netadmin') + um.add_role('testsys', 'sysadmin') # Make a test project - try: - self.project = um.create_project('testproj', 'testpmsys', 'a test project', ['testpmsys', 'testnet', 'testsys']) - except: pass - try: - self.project.add_role(self.testnet, 'netadmin') - except: pass - try: - self.project.add_role(self.testsys, 'sysadmin') - except: pass + self.project = um.create_project('testproj', + 'testpmsys', + 'a test project', + ['testpmsys', 'testnet', 'testsys']) + self.project.add_role(self.testnet, 'netadmin') + self.project.add_role(self.testsys, 'sysadmin') #user is set in each test def noopWSGIApp(environ, start_response): start_response('200 OK', []) @@ -97,10 +79,8 @@ class AccessTestCase(test.TrialTestCase): super(AccessTestCase, self).tearDown() def response_status(self, user, methodName): - context = Context() - context.project = self.project - context.user = user - environ = {'ec2.context' : context, + ctxt = context.RequestContext(user, self.project) + environ = {'ec2.context' : ctxt, 'ec2.controller': 'some string', 'ec2.action': methodName} req = webob.Request.blank('/', environ) diff --git a/nova/tests/api_unittest.py b/nova/tests/api_unittest.py index c040cdad3..7c745ca41 100644 --- a/nova/tests/api_unittest.py +++ b/nova/tests/api_unittest.py @@ -25,6 +25,7 @@ import random import StringIO import webob +from nova import context from nova import flags from nova import test from nova import api @@ -131,7 +132,7 @@ class ApiEc2TestCase(test.BaseTestCase): user = self.manager.create_user('fake', 'fake', 'fake') project = self.manager.create_project('fake', 'fake', 'fake') # NOTE(vish): create depends on pool, so call helper directly - cloud._gen_key(None, user.id, keyname) + cloud._gen_key(context.get_admin_context(), user.id, keyname) rv = self.ec2.get_all_key_pairs() results = [k for k in rv if k.name == keyname] diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py index ae7dea1db..8125f4a78 100644 --- a/nova/tests/cloud_unittest.py +++ b/nova/tests/cloud_unittest.py @@ -29,6 +29,7 @@ from twisted.internet import defer import unittest from xml.etree import ElementTree +from nova import context from nova import crypto from nova import db from nova import flags @@ -37,7 +38,6 @@ from nova import test from nova import utils from nova.auth import manager from nova.compute import power_state -from nova.api.ec2 import context from nova.api.ec2 import cloud from nova.objectstore import image @@ -72,7 +72,7 @@ class CloudTestCase(test.TrialTestCase): self.manager = manager.AuthManager() self.user = self.manager.create_user('admin', 'admin', 'admin', True) self.project = self.manager.create_project('proj', 'admin', 'proj') - self.context = context.APIRequestContext(user=self.user, + self.context = context.RequestContext(user=self.user, project=self.project) def tearDown(self): @@ -235,33 +235,33 @@ class CloudTestCase(test.TrialTestCase): self.assertEqual('', img.metadata['description']) def test_update_of_instance_display_fields(self): - inst = db.instance_create({}, {}) + inst = db.instance_create(self.context, {}) self.cloud.update_instance(self.context, inst['ec2_id'], display_name='c00l 1m4g3') - inst = db.instance_get({}, inst['id']) + inst = db.instance_get(self.context, inst['id']) self.assertEqual('c00l 1m4g3', inst['display_name']) - db.instance_destroy({}, inst['id']) + db.instance_destroy(self.context, inst['id']) def test_update_of_instance_wont_update_private_fields(self): - inst = db.instance_create({}, {}) + inst = db.instance_create(self.context, {}) self.cloud.update_instance(self.context, inst['id'], mac_address='DE:AD:BE:EF') - inst = db.instance_get({}, inst['id']) + inst = db.instance_get(self.context, inst['id']) self.assertEqual(None, inst['mac_address']) - db.instance_destroy({}, inst['id']) + db.instance_destroy(self.context, inst['id']) def test_update_of_volume_display_fields(self): - vol = db.volume_create({}, {}) + vol = db.volume_create(self.context, {}) self.cloud.update_volume(self.context, vol['id'], display_name='c00l v0lum3') - vol = db.volume_get({}, vol['id']) + vol = db.volume_get(self.context, vol['id']) self.assertEqual('c00l v0lum3', vol['display_name']) - db.volume_destroy({}, vol['id']) + db.volume_destroy(self.context, vol['id']) def test_update_of_volume_wont_update_private_fields(self): - vol = db.volume_create({}, {}) + vol = db.volume_create(self.context, {}) self.cloud.update_volume(self.context, vol['id'], mountpoint='/not/here') - vol = db.volume_get({}, vol['id']) + vol = db.volume_get(self.context, vol['id']) self.assertEqual(None, vol['mountpoint']) - db.volume_destroy({}, vol['id']) + db.volume_destroy(self.context, vol['id']) diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py index 1e2bb113b..ec24b2537 100644 --- a/nova/tests/compute_unittest.py +++ b/nova/tests/compute_unittest.py @@ -24,13 +24,13 @@ import logging from twisted.internet import defer +from nova import context from nova import db from nova import exception from nova import flags from nova import test from nova import utils from nova.auth import manager -from nova.api import context FLAGS = flags.FLAGS @@ -45,7 +45,7 @@ class ComputeTestCase(test.TrialTestCase): self.manager = manager.AuthManager() self.user = self.manager.create_user('fake', 'fake', 'fake') self.project = self.manager.create_project('fake', 'fake', 'fake') - self.context = None + self.context = context.get_admin_context() def tearDown(self): # pylint: disable-msg=C0103 self.manager.delete_user(self.user) @@ -72,13 +72,13 @@ class ComputeTestCase(test.TrialTestCase): yield self.compute.run_instance(self.context, instance_id) - instances = db.instance_get_all(None) + instances = db.instance_get_all(context.get_admin_context()) logging.info("Running instances: %s", instances) self.assertEqual(len(instances), 1) yield self.compute.terminate_instance(self.context, instance_id) - instances = db.instance_get_all(None) + instances = db.instance_get_all(context.get_admin_context()) logging.info("After terminating instances: %s", instances) self.assertEqual(len(instances), 0) @@ -96,8 +96,7 @@ class ComputeTestCase(test.TrialTestCase): self.assertEqual(instance_ref['deleted_at'], None) terminate = datetime.datetime.utcnow() yield self.compute.terminate_instance(self.context, instance_id) - self.context = context.get_admin_context(user=self.user, - read_deleted=True) + self.context = self.context.admin(True) instance_ref = db.instance_get(self.context, instance_id) self.assert_(instance_ref['launched_at'] < terminate) self.assert_(instance_ref['deleted_at'] > terminate) diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index 5370966d2..85fc2e2a1 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -22,13 +22,13 @@ import IPy import os import logging +from nova import context from nova import db from nova import exception from nova import flags from nova import test from nova import utils from nova.auth import manager -from nova.api.ec2 import context FLAGS = flags.FLAGS @@ -49,16 +49,14 @@ class NetworkTestCase(test.TrialTestCase): self.user = self.manager.create_user('netuser', 'netuser', 'netuser') self.projects = [] self.network = utils.import_object(FLAGS.network_manager) - self.context = context.APIRequestContext(project=None, user=self.user) + self.context = context.RequestContext(project=None, user=self.user) for i in range(5): name = 'project%s' % i self.projects.append(self.manager.create_project(name, 'netuser', name)) # create the necessary network data for the project - user_context = context.APIRequestContext(project=self.projects[i], - user=self.user) - self.network.set_network_host(user_context, self.projects[i].id) + self.network.set_network_host(context.get_admin_context(), self.projects[i].id) instance_ref = self._create_instance(0) self.instance_id = instance_ref['id'] instance_ref = self._create_instance(1) @@ -68,8 +66,8 @@ class NetworkTestCase(test.TrialTestCase): super(NetworkTestCase, self).tearDown() # TODO(termie): this should really be instantiating clean datastores # in between runs, one failure kills all the tests - db.instance_destroy(None, self.instance_id) - db.instance_destroy(None, self.instance2_id) + db.instance_destroy(context.get_admin_context(), self.instance_id) + db.instance_destroy(context.get_admin_context(), self.instance2_id) for project in self.projects: self.manager.delete_project(project) self.manager.delete_user(self.user) @@ -78,7 +76,7 @@ class NetworkTestCase(test.TrialTestCase): if not mac: mac = utils.generate_mac() project = self.projects[project_num] - self.context.project = project + self.context._project = project return db.instance_create(self.context, {'project_id': project.id, 'mac_address': mac}) @@ -87,34 +85,34 @@ class NetworkTestCase(test.TrialTestCase): """Create an address in given project num""" if instance_id is None: instance_id = self.instance_id - self.context.project = self.projects[project_num] + self.context._project = self.projects[project_num] return self.network.allocate_fixed_ip(self.context, instance_id) def _deallocate_address(self, project_num, address): - self.context.project = self.projects[project_num] + self.context._project = self.projects[project_num] self.network.deallocate_fixed_ip(self.context, address) def test_public_network_association(self): """Makes sure that we can allocaate a public ip""" # TODO(vish): better way of adding floating ips - self.context.project = self.projects[0] + self.context._project = self.projects[0] pubnet = IPy.IP(flags.FLAGS.public_range) address = str(pubnet[0]) try: - db.floating_ip_get_by_address(None, address) + db.floating_ip_get_by_address(context.get_admin_context(), address) except exception.NotFound: - db.floating_ip_create(None, {'address': address, + db.floating_ip_create(context.get_admin_context(), {'address': address, 'host': FLAGS.host}) float_addr = self.network.allocate_floating_ip(self.context, self.projects[0].id) fix_addr = self._create_address(0) self.assertEqual(float_addr, str(pubnet[0])) self.network.associate_floating_ip(self.context, float_addr, fix_addr) - address = db.instance_get_floating_address(None, self.instance_id) + address = db.instance_get_floating_address(context.get_admin_context(), self.instance_id) self.assertEqual(address, float_addr) self.network.disassociate_floating_ip(self.context, float_addr) - address = db.instance_get_floating_address(None, self.instance_id) + address = db.instance_get_floating_address(context.get_admin_context(), self.instance_id) self.assertEqual(address, None) self.network.deallocate_floating_ip(self.context, float_addr) self.network.deallocate_fixed_ip(self.context, fix_addr) @@ -175,7 +173,7 @@ class NetworkTestCase(test.TrialTestCase): lease_ip(address) lease_ip(address2) lease_ip(address3) - self.context.project = self.projects[i] + self.context._project = self.projects[i] self.assertFalse(is_allocated_in_project(address, self.projects[0].id)) self.assertFalse(is_allocated_in_project(address2, @@ -189,7 +187,7 @@ class NetworkTestCase(test.TrialTestCase): release_ip(address2) release_ip(address3) for instance_id in instance_ids: - db.instance_destroy(None, instance_id) + db.instance_destroy(context.get_admin_context(), instance_id) release_ip(first) self._deallocate_address(0, first) @@ -203,7 +201,7 @@ class NetworkTestCase(test.TrialTestCase): def test_too_many_networks(self): """Ensure error is raised if we run out of networks""" projects = [] - networks_left = FLAGS.num_networks - db.network_count(None) + networks_left = FLAGS.num_networks - db.network_count(context.get_admin_context()) for i in range(networks_left): project = self.manager.create_project('many%s' % i, self.user) projects.append(project) @@ -236,18 +234,18 @@ class NetworkTestCase(test.TrialTestCase): There are ips reserved at the bottom and top of the range. services (network, gateway, CloudPipe, broadcast) """ - network = db.project_get_network(None, self.projects[0].id) + network = db.project_get_network(context.get_admin_context(), self.projects[0].id) net_size = flags.FLAGS.network_size - total_ips = (db.network_count_available_ips(None, network['id']) + - db.network_count_reserved_ips(None, network['id']) + - db.network_count_allocated_ips(None, network['id'])) + total_ips = (db.network_count_available_ips(context.get_admin_context(), network['id']) + + db.network_count_reserved_ips(context.get_admin_context(), network['id']) + + db.network_count_allocated_ips(context.get_admin_context(), network['id'])) self.assertEqual(total_ips, net_size) def test_too_many_addresses(self): """Test for a NoMoreAddresses exception when all fixed ips are used. """ - network = db.project_get_network(None, self.projects[0].id) - num_available_ips = db.network_count_available_ips(None, + network = db.project_get_network(context.get_admin_context(), self.projects[0].id) + num_available_ips = db.network_count_available_ips(context.get_admin_context(), network['id']) addresses = [] instance_ids = [] @@ -258,7 +256,7 @@ class NetworkTestCase(test.TrialTestCase): addresses.append(address) lease_ip(address) - self.assertEqual(db.network_count_available_ips(None, + self.assertEqual(db.network_count_available_ips(context.get_admin_context(), network['id']), 0) self.assertRaises(db.NoMoreAddresses, self.network.allocate_fixed_ip, @@ -268,17 +266,17 @@ class NetworkTestCase(test.TrialTestCase): for i in range(num_available_ips): self.network.deallocate_fixed_ip(self.context, addresses[i]) release_ip(addresses[i]) - db.instance_destroy(None, instance_ids[i]) - self.assertEqual(db.network_count_available_ips(None, + db.instance_destroy(context.get_admin_context(), instance_ids[i]) + self.assertEqual(db.network_count_available_ips(context.get_admin_context(), network['id']), num_available_ips) def is_allocated_in_project(address, project_id): """Returns true if address is in specified project""" - project_net = db.project_get_network(None, project_id) - network = db.fixed_ip_get_network(None, address) - instance = db.fixed_ip_get_instance(None, address) + project_net = db.project_get_network(context.get_admin_context(), project_id) + network = db.fixed_ip_get_network(context.get_admin_context(), address) + instance = db.fixed_ip_get_instance(context.get_admin_context(), address) # instance exists until release return instance is not None and network['id'] == project_net['id'] @@ -290,8 +288,8 @@ def binpath(script): def lease_ip(private_ip): """Run add command on dhcpbridge""" - network_ref = db.fixed_ip_get_network(None, private_ip) - instance_ref = db.fixed_ip_get_instance(None, private_ip) + network_ref = db.fixed_ip_get_network(context.get_admin_context(), private_ip) + instance_ref = db.fixed_ip_get_instance(context.get_admin_context(), private_ip) cmd = "%s add %s %s fake" % (binpath('nova-dhcpbridge'), instance_ref['mac_address'], private_ip) @@ -304,8 +302,8 @@ def lease_ip(private_ip): def release_ip(private_ip): """Run del command on dhcpbridge""" - network_ref = db.fixed_ip_get_network(None, private_ip) - instance_ref = db.fixed_ip_get_instance(None, private_ip) + network_ref = db.fixed_ip_get_network(context.get_admin_context(), private_ip) + instance_ref = db.fixed_ip_get_instance(context.get_admin_context(), private_ip) cmd = "%s del %s %s fake" % (binpath('nova-dhcpbridge'), instance_ref['mac_address'], private_ip) diff --git a/nova/tests/quota_unittest.py b/nova/tests/quota_unittest.py index 370ccd506..72e44bf52 100644 --- a/nova/tests/quota_unittest.py +++ b/nova/tests/quota_unittest.py @@ -18,6 +18,7 @@ import logging +from nova import context from nova import db from nova import exception from nova import flags @@ -26,7 +27,6 @@ from nova import test from nova import utils from nova.auth import manager from nova.api.ec2 import cloud -from nova.api.ec2 import context FLAGS = flags.FLAGS @@ -48,8 +48,8 @@ class QuotaTestCase(test.TrialTestCase): self.user = self.manager.create_user('admin', 'admin', 'admin', True) self.project = self.manager.create_project('admin', 'admin', 'admin') self.network = utils.import_object(FLAGS.network_manager) - self.context = context.APIRequestContext(project=self.project, - user=self.user) + self.context = context.RequestContext(project=self.project, + user=self.user) def tearDown(self): # pylint: disable-msg=C0103 manager.AuthManager().delete_project(self.project) @@ -94,7 +94,7 @@ class QuotaTestCase(test.TrialTestCase): for i in range(FLAGS.quota_instances): instance_id = self._create_instance() instance_ids.append(instance_id) - self.assertRaises(cloud.QuotaError, self.cloud.run_instances, + self.assertRaises(cloud.QuotaError, self.cloud.run_instances, self.context, min_count=1, max_count=1, @@ -106,7 +106,7 @@ class QuotaTestCase(test.TrialTestCase): instance_ids = [] instance_id = self._create_instance(cores=4) instance_ids.append(instance_id) - self.assertRaises(cloud.QuotaError, self.cloud.run_instances, + self.assertRaises(cloud.QuotaError, self.cloud.run_instances, self.context, min_count=1, max_count=1, @@ -139,9 +139,9 @@ class QuotaTestCase(test.TrialTestCase): def test_too_many_addresses(self): address = '192.168.0.100' try: - db.floating_ip_get_by_address(None, address) + db.floating_ip_get_by_address(context.get_admin_context(), address) except exception.NotFound: - db.floating_ip_create(None, {'address': address, + db.floating_ip_create(context.get_admin_context(), {'address': address, 'host': FLAGS.host}) float_addr = self.network.allocate_floating_ip(self.context, self.project.id) diff --git a/nova/tests/rpc_unittest.py b/nova/tests/rpc_unittest.py index 9652841f2..5d2bb1046 100644 --- a/nova/tests/rpc_unittest.py +++ b/nova/tests/rpc_unittest.py @@ -22,6 +22,7 @@ import logging from twisted.internet import defer +from nova import context from nova import flags from nova import rpc from nova import test @@ -40,14 +41,24 @@ class RpcTestCase(test.TrialTestCase): topic='test', proxy=self.receiver) self.consumer.attach_to_twisted() + self.context= context.get_admin_context() def test_call_succeed(self): """Get a value through rpc call""" value = 42 - result = yield rpc.call_twisted('test', {"method": "echo", + result = yield rpc.call_twisted(self.context, + 'test', {"method": "echo", "args": {"value": value}}) self.assertEqual(value, result) + def test_context_passed(self): + """Makes sure a context is passed through rpc call""" + value = 42 + result = yield rpc.call_twisted(self.context, + 'test', {"method": "context", + "args": {"value": value}}) + self.assertEqual(self.context.to_dict(), result) + def test_call_exception(self): """Test that exception gets passed back properly @@ -56,11 +67,13 @@ class RpcTestCase(test.TrialTestCase): to an int in the test. """ value = 42 - self.assertFailure(rpc.call_twisted('test', {"method": "fail", + self.assertFailure(rpc.call_twisted(self.context, + 'test', {"method": "fail", "args": {"value": value}}), rpc.RemoteError) try: - yield rpc.call_twisted('test', {"method": "fail", + yield rpc.call_twisted(self.context, + 'test', {"method": "fail", "args": {"value": value}}) self.fail("should have thrown rpc.RemoteError") except rpc.RemoteError as exc: @@ -73,12 +86,19 @@ class TestReceiver(object): Uses static methods because we aren't actually storing any state""" @staticmethod - def echo(value): + def echo(context, value): """Simply returns whatever value is sent in""" logging.debug("Received %s", value) return defer.succeed(value) @staticmethod - def fail(value): + def context(context, value): + """Returns dictionary version of context""" + logging.debug("Received %s", context) + return defer.succeed(context.to_dict()) + + @staticmethod + def fail(context, value): """Raises an exception with the value sent in""" raise Exception(value) + diff --git a/nova/tests/scheduler_unittest.py b/nova/tests/scheduler_unittest.py index fde30f81e..027b62987 100644 --- a/nova/tests/scheduler_unittest.py +++ b/nova/tests/scheduler_unittest.py @@ -19,6 +19,7 @@ Tests For Scheduler """ +from nova import context from nova import db from nova import flags from nova import service @@ -50,22 +51,24 @@ class SchedulerTestCase(test.TrialTestCase): def test_fallback(self): scheduler = manager.SchedulerManager() self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) - rpc.cast('topic.fallback_host', + ctxt = context.get_admin_context() + rpc.cast(ctxt, + 'topic.fallback_host', {'method': 'noexist', - 'args': {'context': None, - 'num': 7}}) + 'args': {'num': 7}}) self.mox.ReplayAll() - scheduler.noexist(None, 'topic', num=7) + scheduler.noexist(ctxt, 'topic', num=7) def test_named_method(self): scheduler = manager.SchedulerManager() self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) - rpc.cast('topic.named_host', + ctxt = context.get_admin_context() + rpc.cast(ctxt, + 'topic.named_host', {'method': 'named_method', - 'args': {'context': None, - 'num': 7}}) + 'args': {'num': 7}}) self.mox.ReplayAll() - scheduler.named_method(None, 'topic', num=7) + scheduler.named_method(ctxt, 'topic', num=7) class SimpleDriverTestCase(test.TrialTestCase): @@ -78,11 +81,10 @@ class SimpleDriverTestCase(test.TrialTestCase): volume_driver='nova.volume.driver.FakeAOEDriver', scheduler_driver='nova.scheduler.simple.SimpleScheduler') self.scheduler = manager.SchedulerManager() - self.context = None self.manager = auth_manager.AuthManager() self.user = self.manager.create_user('fake', 'fake', 'fake') self.project = self.manager.create_project('fake', 'fake', 'fake') - self.context = None + self.context = context.get_admin_context() def tearDown(self): # pylint: disable-msg=C0103 self.manager.delete_user(self.user) diff --git a/nova/tests/service_unittest.py b/nova/tests/service_unittest.py index 06f80e82c..f8da7a871 100644 --- a/nova/tests/service_unittest.py +++ b/nova/tests/service_unittest.py @@ -22,6 +22,7 @@ Unit Tests for remote procedure calls using queue import mox +from nova import context from nova import exception from nova import flags from nova import rpc @@ -45,6 +46,7 @@ class ServiceTestCase(test.BaseTestCase): def setUp(self): # pylint: disable=C0103 super(ServiceTestCase, self).setUp() self.mox.StubOutWithMock(service, 'db') + self.context = context.get_admin_context() def test_create(self): host = 'foo' @@ -88,10 +90,10 @@ class ServiceTestCase(test.BaseTestCase): 'report_count': 0, 'id': 1} - service.db.service_get_by_args(None, + service.db.service_get_by_args(mox.IgnoreArg(), host, binary).AndRaise(exception.NotFound()) - service.db.service_create(None, + service.db.service_create(mox.IgnoreArg(), service_create).AndReturn(service_ref) self.mox.ReplayAll() @@ -110,10 +112,10 @@ class ServiceTestCase(test.BaseTestCase): 'report_count': 0, 'id': 1} service.db.__getattr__('report_state') - service.db.service_get_by_args(None, + service.db.service_get_by_args(self.context, host, binary).AndReturn(service_ref) - service.db.service_update(None, service_ref['id'], + service.db.service_update(self.context, service_ref['id'], mox.ContainsKeyValue('report_count', 1)) self.mox.ReplayAll() @@ -132,13 +134,13 @@ class ServiceTestCase(test.BaseTestCase): 'id': 1} service.db.__getattr__('report_state') - service.db.service_get_by_args(None, + service.db.service_get_by_args(self.context, host, binary).AndRaise(exception.NotFound()) - service.db.service_create(None, + service.db.service_create(self.context, service_create).AndReturn(service_ref) - service.db.service_get(None, service_ref['id']).AndReturn(service_ref) - service.db.service_update(None, service_ref['id'], + service.db.service_get(self.context, service_ref['id']).AndReturn(service_ref) + service.db.service_update(self.context, service_ref['id'], mox.ContainsKeyValue('report_count', 1)) self.mox.ReplayAll() @@ -154,7 +156,7 @@ class ServiceTestCase(test.BaseTestCase): 'id': 1} service.db.__getattr__('report_state') - service.db.service_get_by_args(None, + service.db.service_get_by_args(self.context, host, binary).AndRaise(Exception()) @@ -173,10 +175,10 @@ class ServiceTestCase(test.BaseTestCase): 'id': 1} service.db.__getattr__('report_state') - service.db.service_get_by_args(None, + service.db.service_get_by_args(self.context, host, binary).AndReturn(service_ref) - service.db.service_update(None, service_ref['id'], + service.db.service_update(self.context, service_ref['id'], mox.ContainsKeyValue('report_count', 1)) self.mox.ReplayAll() diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py index 1d665b502..8e2fa11c1 100644 --- a/nova/tests/volume_unittest.py +++ b/nova/tests/volume_unittest.py @@ -22,6 +22,7 @@ import logging from twisted.internet import defer +from nova import context from nova import exception from nova import db from nova import flags @@ -39,7 +40,7 @@ class VolumeTestCase(test.TrialTestCase): self.compute = utils.import_object(FLAGS.compute_manager) self.flags(connection_type='fake') self.volume = utils.import_object(FLAGS.volume_manager) - self.context = None + self.context = context.get_admin_context() @staticmethod def _create_volume(size='0'): @@ -51,19 +52,19 @@ class VolumeTestCase(test.TrialTestCase): vol['availability_zone'] = FLAGS.storage_availability_zone vol['status'] = "creating" vol['attach_status'] = "detached" - return db.volume_create(None, vol)['id'] + return db.volume_create(context.get_admin_context(), vol)['id'] @defer.inlineCallbacks def test_create_delete_volume(self): """Test volume can be created and deleted""" volume_id = self._create_volume() yield self.volume.create_volume(self.context, volume_id) - self.assertEqual(volume_id, db.volume_get(None, volume_id).id) + self.assertEqual(volume_id, db.volume_get(context.get_admin_context(), volume_id).id) yield self.volume.delete_volume(self.context, volume_id) self.assertRaises(exception.NotFound, db.volume_get, - None, + self.context, volume_id) @defer.inlineCallbacks @@ -92,7 +93,7 @@ class VolumeTestCase(test.TrialTestCase): self.assertFailure(self.volume.create_volume(self.context, volume_id), db.NoMoreBlades) - db.volume_destroy(None, volume_id) + db.volume_destroy(context.get_admin_context(), volume_id) for volume_id in vols: yield self.volume.delete_volume(self.context, volume_id) @@ -113,12 +114,13 @@ class VolumeTestCase(test.TrialTestCase): volume_id = self._create_volume() yield self.volume.create_volume(self.context, volume_id) if FLAGS.fake_tests: - db.volume_attached(None, volume_id, instance_id, mountpoint) + db.volume_attached(self.context, volume_id, instance_id, mountpoint) else: - yield self.compute.attach_volume(instance_id, + yield self.compute.attach_volume(self.context, + instance_id, volume_id, mountpoint) - vol = db.volume_get(None, volume_id) + vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual(vol['status'], "in-use") self.assertEqual(vol['attach_status'], "attached") self.assertEqual(vol['mountpoint'], mountpoint) @@ -128,17 +130,18 @@ class VolumeTestCase(test.TrialTestCase): self.assertFailure(self.volume.delete_volume(self.context, volume_id), exception.Error) if FLAGS.fake_tests: - db.volume_detached(None, volume_id) + db.volume_detached(self.context, volume_id) else: - yield self.compute.detach_volume(instance_id, + yield self.compute.detach_volume(self.context, + instance_id, volume_id) - vol = db.volume_get(None, volume_id) + vol = db.volume_get(self.context, volume_id) self.assertEqual(vol['status'], "available") yield self.volume.delete_volume(self.context, volume_id) self.assertRaises(exception.Error, db.volume_get, - None, + self.context, volume_id) db.instance_destroy(self.context, instance_id) @@ -151,7 +154,7 @@ class VolumeTestCase(test.TrialTestCase): def _check(volume_id): """Make sure blades aren't duplicated""" volume_ids.append(volume_id) - (shelf_id, blade_id) = db.volume_get_shelf_and_blade(None, + (shelf_id, blade_id) = db.volume_get_shelf_and_blade(context.get_admin_context(), volume_id) shelf_blade = '%s.%s' % (shelf_id, blade_id) self.assert_(shelf_blade not in shelf_blades) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index d868e083c..4976b4a3b 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -28,6 +28,7 @@ import shutil from twisted.internet import defer from twisted.internet import task +from nova import context from nova import db from nova import exception from nova import flags @@ -140,12 +141,13 @@ class LibvirtConnection(object): def _wait_for_shutdown(): try: state = self.get_info(instance['name'])['state'] - db.instance_set_state(None, instance['id'], state) + db.instance_set_state(context.get_admin_context(), + instance['id'], state) if state == power_state.SHUTDOWN: timer.stop() d.callback(None) except Exception: - db.instance_set_state(None, + db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) timer.stop() @@ -190,14 +192,15 @@ class LibvirtConnection(object): def _wait_for_reboot(): try: state = self.get_info(instance['name'])['state'] - db.instance_set_state(None, instance['id'], state) + db.instance_set_state(context.get_admin_context(), + instance['id'], state) if state == power_state.RUNNING: logging.debug('instance %s: rebooted', instance['name']) timer.stop() d.callback(None) except Exception, exn: logging.error('_wait_for_reboot failed: %s', exn) - db.instance_set_state(None, + db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) timer.stop() @@ -210,7 +213,7 @@ class LibvirtConnection(object): @exception.wrap_exception def spawn(self, instance): xml = self.to_xml(instance) - db.instance_set_state(None, + db.instance_set_state(context.get_admin_context(), instance['id'], power_state.NOSTATE, 'launching') @@ -225,7 +228,8 @@ class LibvirtConnection(object): def _wait_for_boot(): try: state = self.get_info(instance['name'])['state'] - db.instance_set_state(None, instance['id'], state) + db.instance_set_state(context.get_admin_context(), + instance['id'], state) if state == power_state.RUNNING: logging.debug('instance %s: booted', instance['name']) timer.stop() @@ -233,7 +237,7 @@ class LibvirtConnection(object): except: logging.exception('instance %s: failed to boot', instance['name']) - db.instance_set_state(None, + db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) timer.stop() @@ -280,9 +284,11 @@ class LibvirtConnection(object): key = str(inst['key_data']) net = None - network_ref = db.project_get_network(None, project.id) + network_ref = db.project_get_network(context.get_admin_context(), + project.id) if network_ref['injected']: - address = db.instance_get_fixed_address(None, inst['id']) + address = db.instance_get_fixed_address(context.get_admin_context(), + inst['id']) with open(FLAGS.injected_network_template) as f: net = f.read() % {'address': address, 'network': network_ref['network'], @@ -314,7 +320,8 @@ class LibvirtConnection(object): def to_xml(self, instance): # TODO(termie): cache? logging.debug('instance %s: starting toXML method', instance['name']) - network = db.project_get_network(None, instance['project_id']) + network = db.project_get_network(context.get_admin_context(), + instance['project_id']) # FIXME(vish): stick this in db instance_type = instance_types.INSTANCE_TYPES[instance['instance_type']] xml_info = {'type': FLAGS.libvirt_type, -- cgit From 3b3c72fc75b5dff8a26c59b0d4bb7f8ef34a18e7 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sat, 2 Oct 2010 11:39:12 -0700 Subject: add missing to_dict --- nova/rpc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/rpc.py b/nova/rpc.py index 26eff9c55..feaa4f042 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -255,7 +255,7 @@ def call(context, topic, msg): msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) LOG.debug("MSG_ID is %s" % (msg_id)) - msg.update({'_context': context}) + msg.update({'_context': context.to_dict()}) class WaitMessage(object): -- cgit From 4bab9061d231b93f5860023e06233fc9c82bb5e3 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Sat, 2 Oct 2010 12:29:58 -0700 Subject: pack and unpack context --- nova/rpc.py | 32 +++++++++++++++++++++++++++----- 1 file changed, 27 insertions(+), 5 deletions(-) diff --git a/nova/rpc.py b/nova/rpc.py index feaa4f042..d14c98d3a 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -151,8 +151,8 @@ class AdapterConsumer(TopicConsumer): """ LOG.debug('received %s' % (message_data)) msg_id = message_data.pop('_msg_id', None) - dict_context = message_data.pop('_context') - ctxt = context.RequestContext.from_dict(dict_context) + + ctxt = _unpack_context(message_data) method = message_data.get('method') args = message_data.get('args', {}) @@ -249,13 +249,35 @@ class RemoteError(exception.Error): traceback)) +def _unpack_context(msg): + """Unpack context from msg.""" + context_dict = {} + for key in list(msg.keys()): + if key.startswith('_context_'): + value = msg.pop(key) + context_dict[key[9:]] = value + LOG.debug('unpacked context: %s', context_dict) + return context.RequestContext.from_dict(context_dict) + +def _pack_context(msg, context): + """Pack context into msg. + + Values for message keys need to be less than 255 chars, so we pull + context out into a bunch of separate keys. If we want to support + more arguments in rabbit messages, we may want to do the same + for args at some point. + """ + context = dict([('_context_%s' % key, value) + for (key, value) in context.to_dict().iteritems()]) + msg.update(context) + def call(context, topic, msg): """Sends a message on a topic and wait for a response""" LOG.debug("Making asynchronous call...") msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) LOG.debug("MSG_ID is %s" % (msg_id)) - msg.update({'_context': context.to_dict()}) + _pack_context(msg, context) class WaitMessage(object): @@ -291,7 +313,7 @@ def call_twisted(context, topic, msg): msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) LOG.debug("MSG_ID is %s" % (msg_id)) - msg.update({'_context': context.to_dict()}) + _pack_context(msg, context) conn = Connection.instance() d = defer.Deferred() @@ -320,7 +342,7 @@ def call_twisted(context, topic, msg): def cast(context, topic, msg): """Sends a message on a topic without waiting for a response""" LOG.debug("Making asynchronous cast...") - msg.update({'_context': context.to_dict()}) + _pack_context(msg, context) conn = Connection.instance() publisher = TopicPublisher(connection=conn, topic=topic) publisher.send(msg) -- cgit From 79a2c349ca5772a69b6f7f28a768e711d6db1524 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 13 Oct 2010 16:36:05 -0400 Subject: Fix several problems keeping AuthMiddleware from functioning in the OpenStack API. --- nova/api/openstack/auth.py | 42 +++++++++++++++++------------------ nova/db/api.py | 4 ++-- nova/db/sqlalchemy/api.py | 5 +++-- nova/tests/api/openstack/fakes.py | 13 +++++++---- nova/tests/api/openstack/test_auth.py | 10 +++++---- 5 files changed, 41 insertions(+), 33 deletions(-) diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index 4c909293e..7aba55728 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -24,9 +24,9 @@ class BasicApiAuthManager(object): def __init__(self, host=None, db_driver=None): if not host: host = FLAGS.host - self.host = host + self.host = host if not db_driver: - db_driver = FLAGS.db_driver + db_driver = FLAGS.db_driver self.db = utils.import_object(db_driver) self.auth = auth.manager.AuthManager() self.context = Context() @@ -40,20 +40,19 @@ class BasicApiAuthManager(object): return faults.Fault(webob.exc.HTTPUnauthorized()) try: - username, key = req.headers['X-Auth-User'], \ - req.headers['X-Auth-Key'] + username = req.headers['X-Auth-User'] + key = req.headers['X-Auth-Key'] except KeyError: return faults.Fault(webob.exc.HTTPUnauthorized()) - username, key = req.headers['X-Auth-User'], req.headers['X-Auth-Key'] token, user = self._authorize_user(username, key) if user and token: res = webob.Response() - res.headers['X-Auth-Token'] = token['token_hash'] + res.headers['X-Auth-Token'] = token.token_hash res.headers['X-Server-Management-Url'] = \ - token['server_management_url'] - res.headers['X-Storage-Url'] = token['storage_url'] - res.headers['X-CDN-Management-Url'] = token['cdn_management_url'] + token.server_management_url + res.headers['X-Storage-Url'] = token.storage_url + res.headers['X-CDN-Management-Url'] = token.cdn_management_url res.content_type = 'text/plain' res.status = '204' return res @@ -65,34 +64,35 @@ class BasicApiAuthManager(object): If the token has expired, returns None If the token is not found, returns None - Otherwise returns the token + Otherwise returns dict(id=(the authorized user's id)) This method will also remove the token if the timestamp is older than 2 days ago. """ token = self.db.auth_get_token(self.context, token_hash) if token: - delta = datetime.datetime.now() - token['created_at'] + delta = datetime.datetime.now() - token.created_at if delta.days >= 2: self.db.auth_destroy_token(self.context, token) else: - user = self.auth.get_user(token['user_id']) - return { 'id':user['uid'] } + #TODO(gundlach): Why not just return dict(id=token.user_id)? + user = self.auth.get_user(token.user_id) + return {'id': user.id} return None def _authorize_user(self, username, key): """ Generates a new token and assigns it to a user """ user = self.auth.get_user_from_access_key(key) - if user and user['name'] == username: + if user and user.name == username: token_hash = hashlib.sha1('%s%s%f' % (username, key, time.time())).hexdigest() - token = {} - token['token_hash'] = token_hash - token['cdn_management_url'] = '' - token['server_management_url'] = self._get_server_mgmt_url() - token['storage_url'] = '' - token['user_id'] = user['uid'] - self.db.auth_create_token(self.context, token) + token_dict = {} + token_dict['token_hash'] = token_hash + token_dict['cdn_management_url'] = '' + token_dict['server_management_url'] = self._get_server_mgmt_url() + token_dict['storage_url'] = '' + token_dict['user_id'] = user.id + token = self.db.auth_create_token(self.context, token_dict) return token, user return None, None diff --git a/nova/db/api.py b/nova/db/api.py index 2f0879c5a..11815991e 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -258,7 +258,7 @@ def instance_get_all(context): def instance_get_all_by_user(context, user_id): """Get all instances.""" - return IMPL.instance_get_all(context, user_id) + return IMPL.instance_get_all_by_user(context, user_id) def instance_get_all_by_project(context, project_id): """Get all instance belonging to a project.""" @@ -473,7 +473,7 @@ def auth_get_token(context, token_hash): def auth_create_token(context, token): """Creates a new token""" - return IMPL.auth_create_token(context, token_hash, token) + return IMPL.auth_create_token(context, token) ################### diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 6f1ea7c23..1043f4bfb 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1024,7 +1024,8 @@ def auth_destroy_token(_context, token): def auth_get_token(_context, token_hash): session = get_session() tk = session.query(models.AuthToken - ).filter_by(token_hash=token_hash) + ).filter_by(token_hash=token_hash + ).first() if not tk: raise exception.NotFound('Token %s does not exist' % token_hash) return tk @@ -1309,7 +1310,7 @@ def user_get_by_access_key(context, access_key, session=None): ).first() if not result: - raise exception.NotFound('No user for id %s' % id) + raise exception.NotFound('No user for access key %s' % access_key) return result diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 34bc1f2a9..6fca19364 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -161,6 +161,10 @@ def stub_out_glance(stubs): stubs.Set(nova.image.service.GlanceImageService, 'delete_all', fake_parallax_client.fake_delete_all) +class FakeToken(object): + def __init__(self, **kwargs): + for k,v in kwargs.iteritems(): + setattr(self, k, v) class FakeAuthDatabase(object): data = {} @@ -171,12 +175,13 @@ class FakeAuthDatabase(object): @staticmethod def auth_create_token(context, token): - token['created_at'] = datetime.datetime.now() - FakeAuthDatabase.data[token['token_hash']] = token + fakeToken = FakeToken(created_at=datetime.datetime.now(), **token) + FakeAuthDatabase.data[fakeToken.token_hash] = fakeToken + return fakeToken @staticmethod def auth_destroy_token(context, token): - if FakeAuthDatabase.data.has_key(token['token_hash']): + if token.token_hash in FakeAuthDatabase.data: del FakeAuthDatabase.data['token_hash'] @@ -188,7 +193,7 @@ class FakeAuthManager(object): def get_user(self, uid): for k, v in FakeAuthManager.auth_data.iteritems(): - if v['uid'] == uid: + if v.id == uid: return v return None diff --git a/nova/tests/api/openstack/test_auth.py b/nova/tests/api/openstack/test_auth.py index d2ba80243..bbfb0fcea 100644 --- a/nova/tests/api/openstack/test_auth.py +++ b/nova/tests/api/openstack/test_auth.py @@ -7,6 +7,7 @@ import webob.dec import nova.api import nova.api.openstack.auth +import nova.auth.manager from nova import auth from nova.tests.api.openstack import fakes @@ -26,7 +27,7 @@ class Test(unittest.TestCase): def test_authorize_user(self): f = fakes.FakeAuthManager() - f.add_user('derp', { 'uid': 1, 'name':'herp' } ) + f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None)) req = webob.Request.blank('/v1.0/') req.headers['X-Auth-User'] = 'herp' @@ -40,7 +41,7 @@ class Test(unittest.TestCase): def test_authorize_token(self): f = fakes.FakeAuthManager() - f.add_user('derp', { 'uid': 1, 'name':'herp' } ) + f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None)) req = webob.Request.blank('/v1.0/') req.headers['X-Auth-User'] = 'herp' @@ -71,8 +72,9 @@ class Test(unittest.TestCase): self.destroy_called = True def bad_token(meh, context, token_hash): - return { 'token_hash':token_hash, - 'created_at':datetime.datetime(1990, 1, 1) } + return fakes.FakeToken( + token_hash=token_hash, + created_at=datetime.datetime(1990, 1, 1)) self.stubs.Set(fakes.FakeAuthDatabase, 'auth_destroy_token', destroy_token_mock) -- cgit From 03962c39bf4ecbe424d3960f7fbbd19c37911757 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 13 Oct 2010 21:55:01 -0400 Subject: Address cerberus's comment --- nova/tests/api/openstack/fakes.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 6fca19364..71da2fd21 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -175,9 +175,9 @@ class FakeAuthDatabase(object): @staticmethod def auth_create_token(context, token): - fakeToken = FakeToken(created_at=datetime.datetime.now(), **token) - FakeAuthDatabase.data[fakeToken.token_hash] = fakeToken - return fakeToken + fake_token = FakeToken(created_at=datetime.datetime.now(), **token) + FakeAuthDatabase.data[fake_token.token_hash] = fake_token + return fake_token @staticmethod def auth_destroy_token(context, token): -- cgit From 40ed78a3a4bd188e60cee1c886d4820f4a578d0c Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 13 Oct 2010 22:05:21 -0700 Subject: elevate in proper places, fix a couple of typos --- nova/api/ec2/cloud.py | 19 ++++++++++++++----- nova/compute/manager.py | 6 ++++++ nova/context.py | 9 +++++---- nova/db/sqlalchemy/api.py | 25 +++++++++++++------------ nova/network/manager.py | 10 ++++++---- nova/quota.py | 9 ++++++--- nova/scheduler/manager.py | 5 +++-- nova/volume/manager.py | 3 +++ 8 files changed, 56 insertions(+), 30 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 02508344c..2226a0a5e 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -52,6 +52,11 @@ class QuotaError(exception.ApiError): """Quota Exceeeded""" pass +../deploy/nova/api/ec2/cloud.py: db.security_group_get_by_name(context.admin(), +../deploy/nova/api/ec2/cloud.py: instance_ref = db.volume_get_instance(context.admin(), volume_ref['id']) +../deploy/nova/api/ec2/cloud.py: db.instance_add_security_group(context.admin(), inst_id, +../deploy/nova/api/ec2/cloud.py: rpc.cast(context.admin(), +../deploy/nova/api/ec2/cloud.py: self.network_manager.deallocate_fixed_ip(context.admin(), def _gen_key(context, user_id, key_name): """Generate a key @@ -310,7 +315,7 @@ class CloudController(object): source_security_group_owner_id) source_security_group = \ - db.security_group_get_by_name(context, + db.security_group_get_by_name(context.elevated(), source_project_id, source_security_group_name) values['group_id'] = source_security_group['id'] @@ -556,7 +561,8 @@ class CloudController(object): def detach_volume(self, context, volume_id, **kwargs): volume_ref = db.volume_get_by_ec2_id(context, volume_id) - instance_ref = db.volume_get_instance(context, volume_ref['id']) + instance_ref = db.volume_get_instance(context.elevated(), + volume_ref['id']) if not instance_ref: raise exception.ApiError("Volume isn't attached to anything!") # TODO(vish): abstract status checking? @@ -842,13 +848,15 @@ class CloudController(object): base_options['memory_mb'] = type_data['memory_mb'] base_options['vcpus'] = type_data['vcpus'] base_options['local_gb'] = type_data['local_gb'] + elevated = context.elevated() for num in range(num_instances): instance_ref = db.instance_create(context, base_options) inst_id = instance_ref['id'] for security_group_id in security_groups: - db.instance_add_security_group(context, inst_id, + db.instance_add_security_group(elevated, + inst_id, security_group_id) inst = {} @@ -866,7 +874,7 @@ class CloudController(object): inst_id, vpn) network_topic = self._get_network_topic(context) - rpc.call(context, + rpc.cast(elevated, network_topic, {"method": "setup_fixed_ip", "args": {"address": address}}) @@ -924,7 +932,8 @@ class CloudController(object): # NOTE(vish): Currently, nothing needs to be done on the # network node until release. If this changes, # we will need to cast here. - self.network_manager.deallocate_fixed_ip(context, address) + self.network_manager.deallocate_fixed_ip(context.elevated(), + address) host = instance_ref['host'] if host: diff --git a/nova/compute/manager.py b/nova/compute/manager.py index c602d013d..694c4ff56 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -71,6 +71,7 @@ class ComputeManager(manager.Manager): @exception.wrap_exception def run_instance(self, context, instance_id, **_kwargs): """Launch a new instance with specified options.""" + context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) if instance_ref['name'] in self.driver.list_instances(): raise exception.Error("Instance has already been created") @@ -106,6 +107,7 @@ class ComputeManager(manager.Manager): @exception.wrap_exception def terminate_instance(self, context, instance_id): """Terminate an instance on this machine.""" + context = context.elevated() logging.debug("instance %s: terminating", instance_id) instance_ref = self.db.instance_get(context, instance_id) @@ -127,6 +129,7 @@ class ComputeManager(manager.Manager): @exception.wrap_exception def reboot_instance(self, context, instance_id): """Reboot an instance on this server.""" + context = context.elevated() self._update_state(context, instance_id) instance_ref = self.db.instance_get(context, instance_id) @@ -149,6 +152,7 @@ class ComputeManager(manager.Manager): @exception.wrap_exception def get_console_output(self, context, instance_id): """Send the console output for an instance.""" + context = context.elevated() logging.debug("instance %s: getting console output", instance_id) instance_ref = self.db.instance_get(context, instance_id) @@ -158,6 +162,7 @@ class ComputeManager(manager.Manager): @exception.wrap_exception def attach_volume(self, context, instance_id, volume_id, mountpoint): """Attach a volume to an instance.""" + context = context.elevated() logging.debug("instance %s: attaching volume %s to %s", instance_id, volume_id, mountpoint) instance_ref = self.db.instance_get(context, instance_id) @@ -173,6 +178,7 @@ class ComputeManager(manager.Manager): @exception.wrap_exception def detach_volume(self, context, instance_id, volume_id): """Detach a volume from an instance.""" + context = context.elevated() logging.debug("instance %s: detaching volume %s", instance_id, volume_id) diff --git a/nova/context.py b/nova/context.py index 3f9de519d..977866d59 100644 --- a/nova/context.py +++ b/nova/context.py @@ -41,9 +41,10 @@ class RequestContext(object): self._project = None self.project_id = project if is_admin is None: - if not user: - user = self.user - self.is_admin = user.is_admin() + if self.user_id and self.user: + self.is_admin = self.user.is_admin() + else: + self.is_admin = False else: self.is_admin = is_admin self.read_deleted = read_deleted @@ -91,7 +92,7 @@ class RequestContext(object): def from_dict(cls, values): return cls(**values) - def admin(self, read_deleted=False): + def elevated(self, read_deleted=False): """Return a version of this context with admin flag set""" return RequestContext(self.user_id, self.project_id, diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index d0adcd9a4..a4b2eaea5 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -33,7 +33,6 @@ from sqlalchemy.orm import joinedload from sqlalchemy.orm import joinedload_all from sqlalchemy.sql import exists from sqlalchemy.sql import func -from sqlalchemy.orm.exc import NoResultFound FLAGS = flags.FLAGS @@ -52,7 +51,9 @@ def is_user_context(context): """Indicates if the request context is a normal user.""" if not context: return False - if not context.user or not context.project: + if context.is_admin: + return False + if not context.user_id or not context.project_id: return False return True @@ -64,7 +65,7 @@ def authorize_project_context(context, project_id): if is_user_context(context): if not context.project: raise exception.NotAuthorized() - elif context.project.id != project_id: + elif context.project_id != project_id: raise exception.NotAuthorized() @@ -75,7 +76,7 @@ def authorize_user_context(context, user_id): if is_user_context(context): if not context.user: raise exception.NotAuthorized() - elif context.user.id != user_id: + elif context.user_id != user_id: raise exception.NotAuthorized() @@ -540,7 +541,7 @@ def instance_create(context, values): with session.begin(): while instance_ref.internal_id == None: internal_id = utils.generate_uid(instance_ref.__prefix__) - if not instance_internal_id_exists(context, internal_id, + if not instance_internal_id_exists(context, internal_id, session=session): instance_ref.internal_id = internal_id instance_ref.save(session=session) @@ -582,7 +583,7 @@ def instance_get(context, instance_id, session=None): elif is_user_context(context): result = session.query(models.Instance ).options(joinedload('security_groups') - ).filter_by(project_id=context.project.id + ).filter_by(project_id=context.project_id ).filter_by(id=instance_id ).filter_by(deleted=False ).first() @@ -641,7 +642,7 @@ def instance_get_all_by_reservation(context, reservation_id): return session.query(models.Instance ).options(joinedload_all('fixed_ip.floating_ips') ).options(joinedload('security_groups') - ).filter_by(project_id=context.project.id + ).filter_by(project_id=context.project_id ).filter_by(reservation_id=reservation_id ).filter_by(deleted=False ).all() @@ -660,7 +661,7 @@ def instance_get_by_internal_id(context, internal_id): elif is_user_context(context): result = session.query(models.Instance ).options(joinedload('security_groups') - ).filter_by(project_id=context.project.id + ).filter_by(project_id=context.project_id ).filter_by(internal_id=internal_id ).filter_by(deleted=False ).first() @@ -898,7 +899,7 @@ def network_get(context, network_id, session=None): ).first() elif is_user_context(context): result = session.query(models.Network - ).filter_by(project_id=context.project.id + ).filter_by(project_id=context.project_id ).filter_by(id=network_id ).filter_by(deleted=False ).first() @@ -1199,7 +1200,7 @@ def volume_get(context, volume_id, session=None): ).first() elif is_user_context(context): result = session.query(models.Volume - ).filter_by(project_id=context.project.id + ).filter_by(project_id=context.project_id ).filter_by(id=volume_id ).filter_by(deleted=False ).first() @@ -1239,7 +1240,7 @@ def volume_get_by_ec2_id(context, ec2_id): ).first() elif is_user_context(context): result = session.query(models.Volume - ).filter_by(project_id=context.project.id + ).filter_by(project_id=context.project_id ).filter_by(ec2_id=ec2_id ).filter_by(deleted=False ).first() @@ -1632,7 +1633,7 @@ def user_remove_project_role(context, user_id, project_id, role): with session.begin(): session.execute('delete from user_project_role_association where ' + \ 'user_id=:user_id and project_id=:project_id and ' + \ - 'role=:role', { 'user_id' : user_id, + 'role=:role', { 'user_id' : user_id, 'project_id' : project_id, 'role' : role }) diff --git a/nova/network/manager.py b/nova/network/manager.py index 391a4fc7f..cb5759a2b 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -229,7 +229,7 @@ class FlatManager(NetworkManager): # network_get_by_compute_host network_ref = self.db.network_get_by_bridge(context, FLAGS.flat_network_bridge) - address = self.db.fixed_ip_associate_pool(context.admin(), + address = self.db.fixed_ip_associate_pool(context.elevated(), network_ref['id'], instance_id) self.db.fixed_ip_update(context, address, {'allocated': True}) @@ -338,12 +338,13 @@ class VlanManager(NetworkManager): # TODO(vish): This should probably be getting project_id from # the instance, but it is another trip to the db. # Perhaps this method should take an instance_ref. - network_ref = self.db.project_get_network(context, context.project.id) + network_ref = self.db.project_get_network(context.elevated(), + context.project_id) if kwargs.get('vpn', None): address = network_ref['vpn_private_address'] self.db.fixed_ip_associate(None, address, instance_id) else: - address = self.db.fixed_ip_associate_pool(context.admin(), + address = self.db.fixed_ip_associate_pool(context.elevated(), network_ref['id'], instance_id) self.db.fixed_ip_update(context, address, {'allocated': True}) @@ -402,7 +403,8 @@ class VlanManager(NetworkManager): def get_network(self, context): """Get the network for the current context""" - return self.db.project_get_network(None, context.project.id) + return self.db.project_get_network(context.elevated(), + context.project_id) def _on_set_network_host(self, context, network_id): """Called when this host becomes the host for a network""" diff --git a/nova/quota.py b/nova/quota.py index edbb83111..045051207 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -54,7 +54,8 @@ def get_quota(context, project_id): def allowed_instances(context, num_instances, instance_type): """Check quota and return min(num_instances, allowed_instances)""" - project_id = context.project.id + project_id = context.project_id + context = context.elevated() used_instances, used_cores = db.instance_data_get_for_project(context, project_id) quota = get_quota(context, project_id) @@ -69,7 +70,8 @@ def allowed_instances(context, num_instances, instance_type): def allowed_volumes(context, num_volumes, size): """Check quota and return min(num_volumes, allowed_volumes)""" - project_id = context.project.id + project_id = context.project_id + context = context.elevated() used_volumes, used_gigabytes = db.volume_data_get_for_project(context, project_id) quota = get_quota(context, project_id) @@ -84,7 +86,8 @@ def allowed_volumes(context, num_volumes, size): def allowed_floating_ips(context, num_floating_ips): """Check quota and return min(num_floating_ips, allowed_floating_ips)""" - project_id = context.project.id + project_id = context.project_id + context = context.elevated() used_floating_ips = db.floating_ip_count_by_project(context, project_id) quota = get_quota(context, project_id) allowed_floating_ips = quota['floating_ips'] - used_floating_ips diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index 6a933fb37..b3b2b4dce 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -54,10 +54,11 @@ class SchedulerManager(manager.Manager): Falls back to schedule(context, topic) if method doesn't exist. """ driver_method = 'schedule_%s' % method + elevated = context.elevated() try: - host = getattr(self.driver, driver_method)(context, *args, **kwargs) + host = getattr(self.driver, driver_method)(elevated, *args, **kwargs) except AttributeError: - host = self.driver.schedule(context, topic, *args, **kwargs) + host = self.driver.schedule(elevated, topic, *args, **kwargs) rpc.cast(context, db.queue_get_for(context, topic, host), diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 8508f27b2..89290cc16 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -67,6 +67,7 @@ class AOEManager(manager.Manager): @defer.inlineCallbacks def create_volume(self, context, volume_id): """Creates and exports the volume""" + context = context.elevated() logging.info("volume %s: creating", volume_id) volume_ref = self.db.volume_get(context, volume_id) @@ -103,6 +104,7 @@ class AOEManager(manager.Manager): @defer.inlineCallbacks def delete_volume(self, context, volume_id): """Deletes and unexports volume""" + context = context.elevated() logging.debug("Deleting volume with id of: %s", volume_id) volume_ref = self.db.volume_get(context, volume_id) if volume_ref['attach_status'] == "attached": @@ -124,6 +126,7 @@ class AOEManager(manager.Manager): Returns path to device. """ + context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) yield self.driver.discover_volume(volume_ref['ec2_id']) shelf_id, blade_id = self.db.volume_get_shelf_and_blade(context, -- cgit From c40996e8b036c96079d99831a239be8df57d6ce2 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 13 Oct 2010 22:07:43 -0700 Subject: use context.project_id because it is more efficient --- nova/api/ec2/cloud.py | 38 +++++++++++++++++++------------------- nova/objectstore/bucket.py | 4 ++-- nova/objectstore/image.py | 14 +++++++------- 3 files changed, 28 insertions(+), 28 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 2226a0a5e..c4cfdc3ba 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -271,7 +271,7 @@ class CloudController(object): groups = db.security_group_get_all(context) else: groups = db.security_group_get_by_project(context, - context.project.id) + context.project_id) groups = [self._format_security_group(context, g) for g in groups] if not group_name is None: groups = [g for g in groups if g.name in group_name] @@ -371,7 +371,7 @@ class CloudController(object): def revoke_security_group_ingress(self, context, group_name, **kwargs): self._ensure_default_security_group(context) security_group = db.security_group_get_by_name(context, - context.project.id, + context.project_id, group_name) criteria = self._authorize_revoke_rule_args_to_dict(context, **kwargs) @@ -396,7 +396,7 @@ class CloudController(object): def authorize_security_group_ingress(self, context, group_name, **kwargs): self._ensure_default_security_group(context) security_group = db.security_group_get_by_name(context, - context.project.id, + context.project_id, group_name) values = self._authorize_revoke_rule_args_to_dict(context, **kwargs) @@ -426,18 +426,18 @@ class CloudController(object): else: source_project_id = source_parts[0] else: - source_project_id = context.project.id + source_project_id = context.project_id return source_project_id def create_security_group(self, context, group_name, group_description): self._ensure_default_security_group(context) - if db.security_group_exists(context, context.project.id, group_name): + if db.security_group_exists(context, context.project_id, group_name): raise exception.ApiError('group %s already exists' % group_name) group = {'user_id' : context.user.id, - 'project_id': context.project.id, + 'project_id': context.project_id, 'name': group_name, 'description': group_description} group_ref = db.security_group_create(context, group) @@ -448,7 +448,7 @@ class CloudController(object): def delete_security_group(self, context, group_name, **kwargs): security_group = db.security_group_get_by_name(context, - context.project.id, + context.project_id, group_name) db.security_group_destroy(context, security_group.id) return True @@ -474,7 +474,7 @@ class CloudController(object): if context.user.is_admin(): volumes = db.volume_get_all(context) else: - volumes = db.volume_get_all_by_project(context, context.project.id) + volumes = db.volume_get_all_by_project(context, context.project_id) volumes = [self._format_volume(context, v) for v in volumes] @@ -512,14 +512,14 @@ class CloudController(object): # check quota if quota.allowed_volumes(context, 1, size) < 1: logging.warn("Quota exceeeded for %s, tried to create %sG volume", - context.project.id, size) + context.project_id, size) raise QuotaError("Volume quota exceeded. You cannot " "create a volume of size %s" % size) vol = {} vol['size'] = size vol['user_id'] = context.user.id - vol['project_id'] = context.project.id + vol['project_id'] = context.project_id vol['availability_zone'] = FLAGS.storage_availability_zone vol['status'] = "creating" vol['attach_status'] = "detached" @@ -626,7 +626,7 @@ class CloudController(object): instances = db.instance_get_all(context) else: instances = db.instance_get_all_by_project(context, - context.project.id) + context.project_id) for instance in instances: if not context.user.is_admin(): if instance['image_id'] == FLAGS.vpn_image_id: @@ -681,7 +681,7 @@ class CloudController(object): iterator = db.floating_ip_get_all(context) else: iterator = db.floating_ip_get_all_by_project(context, - context.project.id) + context.project_id) for floating_ip_ref in iterator: address = floating_ip_ref['address'] instance_id = None @@ -702,14 +702,14 @@ class CloudController(object): # check quota if quota.allowed_floating_ips(context, 1) < 1: logging.warn("Quota exceeeded for %s, tried to allocate address", - context.project.id) + context.project_id) raise QuotaError("Address quota exceeded. You cannot " "allocate any more addresses") network_topic = self._get_network_topic(context) public_ip = rpc.call(context, network_topic, {"method": "allocate_floating_ip", - "args": {"project_id": context.project.id}}) + "args": {"project_id": context.project_id}}) return {'addressSet': [{'publicIp': public_ip}]} def release_address(self, context, public_ip, **kwargs): @@ -759,13 +759,13 @@ class CloudController(object): def _ensure_default_security_group(self, context): try: db.security_group_get_by_name(context, - context.project.id, + context.project_id, 'default') except exception.NotFound: values = { 'name' : 'default', 'description' : 'default', 'user_id' : context.user.id, - 'project_id' : context.project.id } + 'project_id' : context.project_id } group = db.security_group_create(context, values) def run_instances(self, context, **kwargs): @@ -781,7 +781,7 @@ class CloudController(object): instance_type) if num_instances < min_instances: logging.warn("Quota exceeeded for %s, tried to run %s instances", - context.project.id, min_instances) + context.project_id, min_instances) raise QuotaError("Instance quota exceeded. You can only " "run %s more instances of this type." % num_instances, "InstanceLimitExceeded") @@ -823,7 +823,7 @@ class CloudController(object): self._ensure_default_security_group(context) for security_group_name in security_group_arg: group = db.security_group_get_by_name(context, - context.project.id, + context.project_id, security_group_name) security_groups.append(group['id']) @@ -837,7 +837,7 @@ class CloudController(object): base_options['key_data'] = key_data base_options['key_name'] = kwargs.get('key_name', None) base_options['user_id'] = context.user.id - base_options['project_id'] = context.project.id + base_options['project_id'] = context.project_id base_options['user_data'] = kwargs.get('user_data', '') base_options['display_name'] = kwargs.get('display_name') diff --git a/nova/objectstore/bucket.py b/nova/objectstore/bucket.py index c2b412dd7..cfe5b14d8 100644 --- a/nova/objectstore/bucket.py +++ b/nova/objectstore/bucket.py @@ -83,7 +83,7 @@ class Bucket(object): os.makedirs(path) with open(path+'.json', 'w') as f: - json.dump({'ownerId': context.project.id}, f) + json.dump({'ownerId': context.project_id}, f) @property def metadata(self): @@ -106,7 +106,7 @@ class Bucket(object): def is_authorized(self, context): try: - return context.user.is_admin() or self.owner_id == context.project.id + return context.user.is_admin() or self.owner_id == context.project_id except Exception, e: return False diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py index c01b041bb..413b269b7 100644 --- a/nova/objectstore/image.py +++ b/nova/objectstore/image.py @@ -72,7 +72,7 @@ class Image(object): try: return (self.metadata['isPublic'] and readonly) or \ context.user.is_admin() or \ - self.metadata['imageOwnerId'] == context.project.id + self.metadata['imageOwnerId'] == context.project_id except: return False @@ -133,11 +133,11 @@ class Image(object): @type public: bool @param public: determine if this is a public image or private - + @rtype: str @return: a string with the image id """ - + image_type = 'machine' image_id = utils.generate_uid('ami') @@ -162,7 +162,7 @@ class Image(object): 'imageType': image_type, 'state': 'available' } - + if type(kernel) is str and len(kernel) > 0: info['kernelId'] = kernel @@ -203,7 +203,7 @@ class Image(object): info = { 'imageId': image_id, 'imageLocation': image_location, - 'imageOwnerId': context.project.id, + 'imageOwnerId': context.project_id, 'isPublic': False, # FIXME: grab public from manifest 'architecture': 'x86_64', # FIXME: grab architecture from manifest 'imageType' : image_type @@ -249,13 +249,13 @@ class Image(object): @staticmethod def decrypt_image(encrypted_filename, encrypted_key, encrypted_iv, cloud_private_key, decrypted_filename): key, err = utils.execute( - 'openssl rsautl -decrypt -inkey %s' % cloud_private_key, + 'openssl rsautl -decrypt -inkey %s' % cloud_private_key, process_input=encrypted_key, check_exit_code=False) if err: raise exception.Error("Failed to decrypt private key: %s" % err) iv, err = utils.execute( - 'openssl rsautl -decrypt -inkey %s' % cloud_private_key, + 'openssl rsautl -decrypt -inkey %s' % cloud_private_key, process_input=encrypted_iv, check_exit_code=False) if err: -- cgit From 68e716cbb2901e8f54291aacdcf4f2dc1d0a47ff Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 13 Oct 2010 22:09:29 -0700 Subject: remove accidental paste --- nova/api/ec2/cloud.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index c4cfdc3ba..13c038f17 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -52,11 +52,6 @@ class QuotaError(exception.ApiError): """Quota Exceeeded""" pass -../deploy/nova/api/ec2/cloud.py: db.security_group_get_by_name(context.admin(), -../deploy/nova/api/ec2/cloud.py: instance_ref = db.volume_get_instance(context.admin(), volume_ref['id']) -../deploy/nova/api/ec2/cloud.py: db.instance_add_security_group(context.admin(), inst_id, -../deploy/nova/api/ec2/cloud.py: rpc.cast(context.admin(), -../deploy/nova/api/ec2/cloud.py: self.network_manager.deallocate_fixed_ip(context.admin(), def _gen_key(context, user_id, key_name): """Generate a key -- cgit From 14e956b2319821ef9d6f595347e4057413f2c0ee Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 13 Oct 2010 22:18:01 -0700 Subject: cleaned up most of the issues --- nova/api/openstack/servers.py | 2 +- nova/auth/dbdriver.py | 67 +++++++++++++++++++++++++------------------ nova/test.py | 13 +++++---- nova/tests/virt_unittest.py | 2 +- 4 files changed, 49 insertions(+), 35 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 26b72afac..8c41944d2 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -26,7 +26,7 @@ from nova import utils from nova import wsgi from nova import context from nova.api import cloud -from nova.api.rackspace import faults +from nova.api.openstack import faults from nova.compute import instance_types from nova.compute import power_state import nova.api.openstack diff --git a/nova/auth/dbdriver.py b/nova/auth/dbdriver.py index 09d15018b..648d6e828 100644 --- a/nova/auth/dbdriver.py +++ b/nova/auth/dbdriver.py @@ -23,6 +23,7 @@ Auth driver using the DB as its backend. import logging import sys +from nova import context from nova import exception from nova import db @@ -46,26 +47,26 @@ class DbDriver(object): def get_user(self, uid): """Retrieve user by id""" - return self._db_user_to_auth_user(db.user_get({}, uid)) + return self._db_user_to_auth_user(db.user_get(context.get_admin_context(), uid)) def get_user_from_access_key(self, access): """Retrieve user by access key""" - return self._db_user_to_auth_user(db.user_get_by_access_key({}, access)) + return self._db_user_to_auth_user(db.user_get_by_access_key(context.get_admin_context(), access)) def get_project(self, pid): """Retrieve project by id""" - return self._db_project_to_auth_projectuser(db.project_get({}, pid)) + return self._db_project_to_auth_projectuser(db.project_get(context.get_admin_context(), pid)) def get_users(self): """Retrieve list of users""" - return [self._db_user_to_auth_user(user) for user in db.user_get_all({})] + return [self._db_user_to_auth_user(user) for user in db.user_get_all(context.get_admin_context())] def get_projects(self, uid=None): """Retrieve list of projects""" if uid: - result = db.project_get_by_user({}, uid) + result = db.project_get_by_user(context.get_admin_context(), uid) else: - result = db.project_get_all({}) + result = db.project_get_all(context.get_admin_context()) return [self._db_project_to_auth_projectuser(proj) for proj in result] def create_user(self, name, access_key, secret_key, is_admin): @@ -76,7 +77,7 @@ class DbDriver(object): 'is_admin' : is_admin } try: - user_ref = db.user_create({}, values) + user_ref = db.user_create(context.get_admin_context(), values) return self._db_user_to_auth_user(user_ref) except exception.Duplicate, e: raise exception.Duplicate('User %s already exists' % name) @@ -98,7 +99,7 @@ class DbDriver(object): def create_project(self, name, manager_uid, description=None, member_uids=None): """Create a project""" - manager = db.user_get({}, manager_uid) + manager = db.user_get(context.get_admin_context(), manager_uid) if not manager: raise exception.NotFound("Project can't be created because " "manager %s doesn't exist" % manager_uid) @@ -113,7 +114,7 @@ class DbDriver(object): members = set([manager]) if member_uids != None: for member_uid in member_uids: - member = db.user_get({}, member_uid) + member = db.user_get(context.get_admin_context(), member_uid) if not member: raise exception.NotFound("Project can't be created " "because user %s doesn't exist" @@ -126,17 +127,20 @@ class DbDriver(object): 'description': description } try: - project = db.project_create({}, values) + project = db.project_create(context.get_admin_context(), values) except exception.Duplicate: raise exception.Duplicate("Project can't be created because " "project %s already exists" % name) for member in members: - db.project_add_member({}, project['id'], member['id']) + db.project_add_member(context.get_admin_context(), + project['id'], + member['id']) # This looks silly, but ensures that the members element has been # correctly populated - project_ref = db.project_get({}, project['id']) + project_ref = db.project_get(context.get_admin_context(), + project['id']) return self._db_project_to_auth_projectuser(project_ref) def modify_project(self, project_id, manager_uid=None, description=None): @@ -145,7 +149,7 @@ class DbDriver(object): return values = {} if manager_uid: - manager = db.user_get({}, manager_uid) + manager = db.user_get(context.get_admin_context(), manager_uid) if not manager: raise exception.NotFound("Project can't be modified because " "manager %s doesn't exist" % @@ -154,17 +158,21 @@ class DbDriver(object): if description: values['description'] = description - db.project_update({}, project_id, values) + db.project_update(context.get_admin_context(), project_id, values) def add_to_project(self, uid, project_id): """Add user to project""" user, project = self._validate_user_and_project(uid, project_id) - db.project_add_member({}, project['id'], user['id']) + db.project_add_member(context.get_admin_context(), + project['id'], + user['id']) def remove_from_project(self, uid, project_id): """Remove user from project""" user, project = self._validate_user_and_project(uid, project_id) - db.project_remove_member({}, project['id'], user['id']) + db.project_remove_member(context.get_admin_context(), + project['id'], + user['id']) def is_in_project(self, uid, project_id): """Check if user is in project""" @@ -183,34 +191,37 @@ class DbDriver(object): def add_role(self, uid, role, project_id=None): """Add role for user (or user and project)""" if not project_id: - db.user_add_role({}, uid, role) + db.user_add_role(context.get_admin_context(), uid, role) return - db.user_add_project_role({}, uid, project_id, role) + db.user_add_project_role(context.get_admin_context(), + uid, project_id, role) def remove_role(self, uid, role, project_id=None): """Remove role for user (or user and project)""" if not project_id: - db.user_remove_role({}, uid, role) + db.user_remove_role(context.get_admin_context(), uid, role) return - db.user_remove_project_role({}, uid, project_id, role) + db.user_remove_project_role(context.get_admin_context(), + uid, project_id, role) def get_user_roles(self, uid, project_id=None): """Retrieve list of roles for user (or user and project)""" if project_id is None: - roles = db.user_get_roles({}, uid) + roles = db.user_get_roles(context.get_admin_context(), uid) return roles else: - roles = db.user_get_roles_for_project({}, uid, project_id) + roles = db.user_get_roles_for_project(context.get_admin_context(), + uid, project_id) return roles def delete_user(self, id): """Delete a user""" - user = db.user_get({}, id) - db.user_delete({}, user['id']) + user = db.user_get(context.get_admin_context(), id) + db.user_delete(context.get_admin_context(), user['id']) def delete_project(self, project_id): """Delete a project""" - db.project_delete({}, project_id) + db.project_delete(context.get_admin_context(), project_id) def modify_user(self, uid, access_key=None, secret_key=None, admin=None): """Modify an existing user""" @@ -223,13 +234,13 @@ class DbDriver(object): values['secret_key'] = secret_key if admin is not None: values['is_admin'] = admin - db.user_update({}, uid, values) + db.user_update(context.get_admin_context(), uid, values) def _validate_user_and_project(self, user_id, project_id): - user = db.user_get({}, user_id) + user = db.user_get(context.get_admin_context(), user_id) if not user: raise exception.NotFound('User "%s" not found' % user_id) - project = db.project_get({}, project_id) + project = db.project_get(context.get_admin_context(), project_id) if not project: raise exception.NotFound('Project "%s" not found' % project_id) return user, project diff --git a/nova/test.py b/nova/test.py index f6485377d..b9ea36e1d 100644 --- a/nova/test.py +++ b/nova/test.py @@ -32,6 +32,7 @@ from tornado import ioloop from twisted.internet import defer from twisted.trial import unittest +from nova import context from nova import db from nova import fakerabbit from nova import flags @@ -64,8 +65,9 @@ class TrialTestCase(unittest.TestCase): # now that we have some required db setup for the system # to work properly. self.start = datetime.datetime.utcnow() - if db.network_count(None) != 5: - network_manager.VlanManager().create_networks(None, + ctxt = context.get_admin_context() + if db.network_count(ctxt) != 5: + network_manager.VlanManager().create_networks(ctxt, FLAGS.fixed_range, 5, 16, FLAGS.vlan_start, @@ -87,8 +89,9 @@ class TrialTestCase(unittest.TestCase): self.stubs.SmartUnsetAll() self.mox.VerifyAll() # NOTE(vish): Clean up any ips associated during the test. - db.fixed_ip_disassociate_all_by_timeout(None, FLAGS.host, self.start) - db.network_disassociate_all(None) + ctxt = context.get_admin_context() + db.fixed_ip_disassociate_all_by_timeout(ctxt, FLAGS.host, self.start) + db.network_disassociate_all(ctxt) rpc.Consumer.attach_to_twisted = self.originalAttach for x in self.injected: try: @@ -98,7 +101,7 @@ class TrialTestCase(unittest.TestCase): if FLAGS.fake_rabbit: fakerabbit.reset_all() - db.security_group_destroy_all(None) + db.security_group_destroy_all(ctxt) super(TrialTestCase, self).tearDown() diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py index edcdba425..78adb2e07 100644 --- a/nova/tests/virt_unittest.py +++ b/nova/tests/virt_unittest.py @@ -17,11 +17,11 @@ from xml.etree.ElementTree import fromstring as xml_to_tree from xml.dom.minidom import parseString as xml_to_dom +from nova import context from nova import db from nova import flags from nova import test from nova import utils -from nova.api import context from nova.api.ec2 import cloud from nova.auth import manager from nova.virt import libvirt_conn -- cgit From 8b329b5d1d79676f9d2d0d91426c882c0cea784a Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 13 Oct 2010 22:51:55 -0700 Subject: fix remaining tests --- nova/api/ec2/cloud.py | 44 +++++++++++++++++++------------------- nova/tests/compute_unittest.py | 2 +- nova/tests/network_unittest.py | 20 +++++++++++------ nova/tests/objectstore_unittest.py | 28 +++++++----------------- nova/tests/virt_unittest.py | 15 +++++++------ nova/virt/libvirt_conn.py | 9 +++++--- 6 files changed, 59 insertions(+), 59 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 13c038f17..e96838f99 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -131,14 +131,14 @@ class CloudController(object): result[key] = [line] return result - def _trigger_refresh_security_group(self, security_group): + def _trigger_refresh_security_group(self, context, security_group): nodes = set([instance['host'] for instance in security_group.instances if instance['host'] is not None]) for node in nodes: - rpc.call('%s.%s' % (FLAGS.compute_topic, node), + rpc.cast(context, + '%s.%s' % (FLAGS.compute_topic, node), { "method": "refresh_security_group", - "args": { "context": None, - "security_group_id": security_group.id}}) + "args": {"security_group_id": security_group.id}}) def get_metadata(self, address): ctxt = context.get_admin_context() @@ -380,7 +380,7 @@ class CloudController(object): match = False if match: db.security_group_rule_destroy(context, rule['id']) - self._trigger_refresh_security_group(security_group) + self._trigger_refresh_security_group(context, security_group) return True raise exception.ApiError("No rule for the specified parameters.") @@ -403,7 +403,7 @@ class CloudController(object): security_group_rule = db.security_group_rule_create(context, values) - self._trigger_refresh_security_group(security_group) + self._trigger_refresh_security_group(context, security_group) return True @@ -454,11 +454,11 @@ class CloudController(object): ec2_id = instance_id[0] internal_id = ec2_id_to_internal_id(ec2_id) instance_ref = db.instance_get_by_internal_id(context, internal_id) - output = rpc.call('%s.%s' % (FLAGS.compute_topic, - instance_ref['host']), - { "method" : "get_console_output", - "args" : { "context": None, - "instance_id": instance_ref['id']}}) + output = rpc.call(context, + '%s.%s' % (FLAGS.compute_topic, + instance_ref['host']), + {"method" : "get_console_output", + "args" : {"instance_id": instance_ref['id']}}) now = datetime.datetime.utcnow() return { "InstanceId" : ec2_id, @@ -543,10 +543,10 @@ class CloudController(object): host = instance_ref['host'] rpc.cast(context, db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "attach_volume", - "args": {"volume_id": volume_ref['id'], - "instance_id": instance_ref['id'], - "mountpoint": device}}) + {"method": "attach_volume", + "args": {"volume_id": volume_ref['id'], + "instance_id": instance_ref['id'], + "mountpoint": device}}) return {'attachTime': volume_ref['attach_time'], 'device': volume_ref['mountpoint'], 'instanceId': instance_ref['id'], @@ -567,9 +567,9 @@ class CloudController(object): host = instance_ref['host'] rpc.cast(context, db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "detach_volume", - "args": {"instance_id": instance_ref['id'], - "volume_id": volume_ref['id']}}) + {"method": "detach_volume", + "args": {"instance_id": instance_ref['id'], + "volume_id": volume_ref['id']}}) except exception.NotFound: # If the instance doesn't exist anymore, # then we need to call detach blind @@ -703,8 +703,8 @@ class CloudController(object): network_topic = self._get_network_topic(context) public_ip = rpc.call(context, network_topic, - {"method": "allocate_floating_ip", - "args": {"project_id": context.project_id}}) + {"method": "allocate_floating_ip", + "args": {"project_id": context.project_id}}) return {'addressSet': [{'publicIp': public_ip}]} def release_address(self, context, public_ip, **kwargs): @@ -747,8 +747,8 @@ class CloudController(object): if not host: host = rpc.call(context, FLAGS.network_topic, - {"method": "set_network_host", - "args": {"network_id": network_ref['id']}}) + {"method": "set_network_host", + "args": {"network_id": network_ref['id']}}) return db.queue_get_for(context, FLAGS.network_topic, host) def _ensure_default_security_group(self, context): diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py index 6dc976114..01e1bcd30 100644 --- a/nova/tests/compute_unittest.py +++ b/nova/tests/compute_unittest.py @@ -97,7 +97,7 @@ class ComputeTestCase(test.TrialTestCase): self.assertEqual(instance_ref['deleted_at'], None) terminate = datetime.datetime.utcnow() yield self.compute.terminate_instance(self.context, instance_id) - self.context = self.context.admin(True) + self.context = self.context.elevated(True) instance_ref = db.instance_get(self.context, instance_id) self.assert_(instance_ref['launched_at'] < terminate) self.assert_(instance_ref['deleted_at'] > terminate) diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py index d88926b29..e8dd2624f 100644 --- a/nova/tests/network_unittest.py +++ b/nova/tests/network_unittest.py @@ -55,7 +55,7 @@ class NetworkTestCase(test.TrialTestCase): project = self.manager.create_project(name, 'netuser', name) self.projects.append(project) # create the necessary network data for the project - user_context = context.APIRequestContext(project=self.projects[i], + user_context = context.RequestContext(project=self.projects[i], user=self.user) network_ref = self.network.get_network(user_context) self.network.set_network_host(context.get_admin_context(), @@ -80,6 +80,7 @@ class NetworkTestCase(test.TrialTestCase): mac = utils.generate_mac() project = self.projects[project_num] self.context._project = project + self.context.project_id = project.id return db.instance_create(self.context, {'project_id': project.id, 'mac_address': mac}) @@ -89,10 +90,12 @@ class NetworkTestCase(test.TrialTestCase): if instance_id is None: instance_id = self.instance_id self.context._project = self.projects[project_num] + self.context.project_id = self.projects[project_num].id return self.network.allocate_fixed_ip(self.context, instance_id) def _deallocate_address(self, project_num, address): self.context._project = self.projects[project_num] + self.context.project_id = self.projects[project_num].id self.network.deallocate_fixed_ip(self.context, address) @@ -100,13 +103,15 @@ class NetworkTestCase(test.TrialTestCase): """Makes sure that we can allocaate a public ip""" # TODO(vish): better way of adding floating ips self.context._project = self.projects[0] + self.context.project_id = self.projects[0].id pubnet = IPy.IP(flags.FLAGS.floating_range) address = str(pubnet[0]) try: db.floating_ip_get_by_address(context.get_admin_context(), address) except exception.NotFound: - db.floating_ip_create(context.get_admin_context(), {'address': address, - 'host': FLAGS.host}) + db.floating_ip_create(context.get_admin_context(), + {'address': address, + 'host': FLAGS.host}) float_addr = self.network.allocate_floating_ip(self.context, self.projects[0].id) fix_addr = self._create_address(0) @@ -179,6 +184,7 @@ class NetworkTestCase(test.TrialTestCase): lease_ip(address2) lease_ip(address3) self.context._project = self.projects[i] + self.context.project_id = self.projects[i].id self.assertFalse(is_allocated_in_project(address, self.projects[0].id)) self.assertFalse(is_allocated_in_project(address2, @@ -194,6 +200,7 @@ class NetworkTestCase(test.TrialTestCase): for instance_id in instance_ids: db.instance_destroy(context.get_admin_context(), instance_id) self.context._project = self.projects[0] + self.context.project_id = self.projects[0].id self.network.deallocate_fixed_ip(self.context, first) self._deallocate_address(0, first) release_ip(first) @@ -208,16 +215,17 @@ class NetworkTestCase(test.TrialTestCase): def test_too_many_networks(self): """Ensure error is raised if we run out of networks""" projects = [] - networks_left = FLAGS.num_networks - db.network_count(context.get_admin_context()) + networks_left = (FLAGS.num_networks - + db.network_count(context.get_admin_context())) for i in range(networks_left): project = self.manager.create_project('many%s' % i, self.user) projects.append(project) - db.project_get_network(None, project.id) + db.project_get_network(context.get_admin_context(), project.id) project = self.manager.create_project('last', self.user) projects.append(project) self.assertRaises(db.NoMoreNetworks, db.project_get_network, - None, + context.get_admin_context(), project.id) for project in projects: self.manager.delete_project(project) diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py index 872f1ab23..f096ac6fe 100644 --- a/nova/tests/objectstore_unittest.py +++ b/nova/tests/objectstore_unittest.py @@ -32,6 +32,7 @@ from boto.s3.connection import S3Connection, OrdinaryCallingFormat from twisted.internet import reactor, threads, defer from twisted.web import http, server +from nova import context from nova import flags from nova import objectstore from nova import test @@ -70,13 +71,7 @@ class ObjectStoreTestCase(test.TrialTestCase): self.auth_manager.create_user('admin_user', admin=True) self.auth_manager.create_project('proj1', 'user1', 'a proj', ['user1']) self.auth_manager.create_project('proj2', 'user2', 'a proj', ['user2']) - - class Context(object): - """Dummy context for running tests.""" - user = None - project = None - - self.context = Context() + self.context = context.RequestContext('user1', 'proj1') def tearDown(self): # pylint: disable-msg=C0103 """Tear down users and projects.""" @@ -89,8 +84,6 @@ class ObjectStoreTestCase(test.TrialTestCase): def test_buckets(self): """Test the bucket API.""" - self.context.user = self.auth_manager.get_user('user1') - self.context.project = self.auth_manager.get_project('proj1') objectstore.bucket.Bucket.create('new_bucket', self.context) bucket = objectstore.bucket.Bucket('new_bucket') @@ -98,14 +91,12 @@ class ObjectStoreTestCase(test.TrialTestCase): self.assert_(bucket.is_authorized(self.context)) # another user is not authorized - self.context.user = self.auth_manager.get_user('user2') - self.context.project = self.auth_manager.get_project('proj2') - self.assertFalse(bucket.is_authorized(self.context)) + context2 = context.RequestContext('user2', 'proj2') + self.assertFalse(bucket.is_authorized(context2)) # admin is authorized to use bucket - self.context.user = self.auth_manager.get_user('admin_user') - self.context.project = None - self.assertTrue(bucket.is_authorized(self.context)) + admin_context = context.RequestContext('admin_user', None) + self.assertTrue(bucket.is_authorized(admin_context)) # new buckets are empty self.assertTrue(bucket.list_keys()['Contents'] == []) @@ -143,8 +134,6 @@ class ObjectStoreTestCase(test.TrialTestCase): def do_test_images(self, manifest_file, expect_kernel_and_ramdisk, image_bucket, image_name): "Test the image API." - self.context.user = self.auth_manager.get_user('user1') - self.context.project = self.auth_manager.get_project('proj1') # create a bucket for our bundle objectstore.bucket.Bucket.create(image_bucket, self.context) @@ -179,9 +168,8 @@ class ObjectStoreTestCase(test.TrialTestCase): self.assertFalse('ramdiskId' in my_img.metadata) # verify image permissions - self.context.user = self.auth_manager.get_user('user2') - self.context.project = self.auth_manager.get_project('proj2') - self.assertFalse(my_img.is_authorized(self.context)) + context2 = context.RequestContext('user2', 'proj2') + self.assertFalse(my_img.is_authorized(context2)) # change user-editable fields my_img.update_user_editable_fields({'display_name': 'my cool image'}) diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py index 78adb2e07..76af5cabd 100644 --- a/nova/tests/virt_unittest.py +++ b/nova/tests/virt_unittest.py @@ -51,9 +51,9 @@ class LibvirtConnTestCase(test.TrialTestCase): 'bridge' : 'br101', 'instance_type' : 'm1.small'} - instance_ref = db.instance_create(None, instance) - user_context = context.APIRequestContext(project=self.project, - user=self.user) + user_context = context.RequestContext(project=self.project, + user=self.user) + instance_ref = db.instance_create(user_context, instance) network_ref = self.network.get_network(user_context) self.network.set_network_host(context.get_admin_context(), network_ref['id']) @@ -61,9 +61,10 @@ class LibvirtConnTestCase(test.TrialTestCase): fixed_ip = { 'address' : ip, 'network_id' : network_ref['id'] } - fixed_ip_ref = db.fixed_ip_create(None, fixed_ip) - db.fixed_ip_update(None, ip, { 'allocated' : True, - 'instance_id' : instance_ref['id'] }) + ctxt = context.get_admin_context() + fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip) + db.fixed_ip_update(ctxt, ip, {'allocated': True, + 'instance_id': instance_ref['id'] }) type_uri_map = { 'qemu' : ('qemu:///system', [(lambda t: t.find('.').get('type'), 'qemu'), @@ -132,7 +133,7 @@ class NWFilterTestCase(test.TrialTestCase): self.manager = manager.AuthManager() self.user = self.manager.create_user('fake', 'fake', 'fake', admin=True) self.project = self.manager.create_project('fake', 'fake', 'fake') - self.context = context.APIRequestContext(self.user, self.project) + self.context = context.RequestContext(self.user, self.project) self.fake_libvirt_connection = Mock() diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index ebe37c5e7..d8d36ff65 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -376,7 +376,8 @@ class LibvirtConnection(object): instance['project_id']) # FIXME(vish): stick this in db instance_type = instance_types.INSTANCE_TYPES[instance['instance_type']] - ip_address = db.instance_get_fixed_address({}, instance['id']) + ip_address = db.instance_get_fixed_address(context.get_admin_context(), + instance['id']) # Assume that the gateway also acts as the dhcp server. dhcp_server = network['gateway'] xml_info = {'type': FLAGS.libvirt_type, @@ -648,7 +649,8 @@ class NWFilterFirewall(object): ) % instance['name'] if FLAGS.allow_project_net_traffic: - network_ref = db.project_get_network({}, instance['project_id']) + network_ref = db.project_get_network(context.get_admin_context(), + instance['project_id']) net, mask = self._get_net_and_mask(network_ref['cidr']) project_filter = self.nova_project_filter(instance['project_id'], net, mask) @@ -673,7 +675,8 @@ class NWFilterFirewall(object): def security_group_to_nwfilter_xml(self, security_group_id): - security_group = db.security_group_get({}, security_group_id) + security_group = db.security_group_get(context.get_admin_context(), + security_group_id) rule_xml = "" for rule in security_group.rules: rule_xml += "" -- cgit From af9910d935b7b3839774e3485b87b4a0dcdb78ff Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 13 Oct 2010 23:44:04 -0700 Subject: fix context in bin files --- bin/nova-dhcpbridge | 6 ++---- bin/nova-manage | 26 +++++++++++++++----------- 2 files changed, 17 insertions(+), 15 deletions(-) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index 4574f0e20..2b7a083d2 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -60,8 +60,7 @@ def add_lease(mac, ip_address, _hostname, _interface): rpc.cast(context.get_admin_context(), "%s.%s" % (FLAGS.network_topic, FLAGS.host), {"method": "lease_fixed_ip", - "args": {"context": None, - "mac": mac, + "args": {"mac": mac, "address": ip_address}}) @@ -82,8 +81,7 @@ def del_lease(mac, ip_address, _hostname, _interface): rpc.cast(context.get_admin_context(), "%s.%s" % (FLAGS.network_topic, FLAGS.host), {"method": "release_fixed_ip", - "args": {"context": None, - "mac": mac, + "args": {"mac": mac, "address": ip_address}}) diff --git a/bin/nova-manage b/bin/nova-manage index d36b0f53a..cf574c6b3 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -67,13 +67,13 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) +from nova import context from nova import db from nova import exception from nova import flags from nova import quota from nova import utils from nova.auth import manager -from nova.network import manager as network_manager from nova.cloudpipe import pipelib @@ -121,7 +121,7 @@ class VpnCommands(object): def _vpn_for(self, project_id): """Get the VPN instance for a project ID.""" - for instance in db.instance_get_all(None): + for instance in db.instance_get_all(context.get_admin_context()): if (instance['image_id'] == FLAGS.vpn_image_id and not instance['state_description'] in ['shutting_down', 'shutdown'] @@ -323,13 +323,14 @@ class ProjectCommands(object): def quota(self, project_id, key=None, value=None): """Set or display quotas for project arguments: project_id [key] [value]""" + ctxt = context.get_admin_context() if key: quo = {'project_id': project_id, key: value} try: - db.quota_update(None, project_id, quo) + db.quota_update(ctxt, project_id, quo) except exception.NotFound: - db.quota_create(None, quo) - project_quota = quota.get_quota(None, project_id) + db.quota_create(ctxt, quo) + project_quota = quota.get_quota(ctxt, project_id) for key, value in project_quota.iteritems(): print '%s: %s' % (key, value) @@ -353,23 +354,26 @@ class FloatingIpCommands(object): """Creates floating ips for host by range arguments: host ip_range""" for address in IPy.IP(range): - db.floating_ip_create(None, {'address': str(address), - 'host': host}) + db.floating_ip_create(context.get_admin_context(), + {'address': str(address), + 'host': host}) def delete(self, ip_range): """Deletes floating ips by range arguments: range""" for address in IPy.IP(ip_range): - db.floating_ip_destroy(None, str(address)) + db.floating_ip_destroy(context.get_admin_context(), + str(address)) def list(self, host=None): """Lists all floating ips (optionally by host) arguments: [host]""" + ctxt = context.get_admin_context() if host == None: - floating_ips = db.floating_ip_get_all(None) + floating_ips = db.floating_ip_get_all(ctxt) else: - floating_ips = db.floating_ip_get_all_by_host(None, host) + floating_ips = db.floating_ip_get_all_by_host(ctxt, host) for floating_ip in floating_ips: instance = None if floating_ip['fixed_ip']: @@ -451,7 +455,7 @@ def main(): if FLAGS.verbose: logging.getLogger().setLevel(logging.DEBUG) - + script_name = argv.pop(0) if len(argv) < 1: print script_name + " category action []" -- cgit From 914786b8f9d30e2762e290ef911710efcbe6d310 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 14 Oct 2010 00:30:42 -0700 Subject: review fixes --- nova/api/cloud.py | 8 ++------ nova/context.py | 6 +++--- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/nova/api/cloud.py b/nova/api/cloud.py index e16229e7d..aa84075dc 100644 --- a/nova/api/cloud.py +++ b/nova/api/cloud.py @@ -29,12 +29,8 @@ FLAGS = flags.FLAGS def reboot(instance_id, context=None): - """Reboot the given instance. - - #TODO(gundlach) not actually sure what context is used for by ec2 here - -- I think we can just remove it and use None all the time. - """ - instance_ref = db.instance_get_by_ec2_id(context, instance_id) + """Reboot the given instance.""" + instance_ref = db.instance_get_by_internal_id(context, instance_id) host = instance_ref['host'] rpc.cast(context, db.queue_get_for(context, FLAGS.compute_topic, host), diff --git a/nova/context.py b/nova/context.py index 977866d59..2a94d643d 100644 --- a/nova/context.py +++ b/nova/context.py @@ -63,7 +63,7 @@ class RequestContext(object): @property def user(self): - # NOTE(visn): Delay import of manager, so that we can import this + # NOTE(vish): Delay import of manager, so that we can import this # file from manager. from nova.auth import manager if not self._user: @@ -72,7 +72,7 @@ class RequestContext(object): @property def project(self): - # NOTE(visn): Delay import of manager, so that we can import this + # NOTE(vish): Delay import of manager, so that we can import this # file from manager. from nova.auth import manager if not self._project: @@ -85,7 +85,7 @@ class RequestContext(object): 'is_admin': self.is_admin, 'read_deleted': self.read_deleted, 'remote_address': self.remote_address, - 'timestamp': utils.isotime(), + 'timestamp': utils.isotime(self.timestamp), 'request_id': self.request_id} @classmethod -- cgit From f8e41d8a1e53b7fc7f4bd91815ed5e2a17dcd7da Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 14 Oct 2010 01:46:06 -0700 Subject: fix nosetests --- nova/api/openstack/servers.py | 30 ++++++++++++++++-------------- nova/context.py | 11 +++++++++-- 2 files changed, 25 insertions(+), 16 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 8c41944d2..869ce73ca 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -93,6 +93,7 @@ class Controller(wsgi.Controller): if not db_driver: db_driver = FLAGS.db_driver self.db_driver = utils.import_object(db_driver) + self.network_manager = utils.import_object(FLAGS.network_manager) super(Controller, self).__init__() def index(self, req): @@ -109,7 +110,8 @@ class Controller(wsgi.Controller): entity_maker - either _entity_detail or _entity_inst """ user_id = req.environ['nova.context']['user']['id'] - instance_list = self.db_driver.instance_get_all_by_user(None, user_id) + ctxt = context.RequestContext(user_id, user_id) + instance_list = self.db_driver.instance_get_all_by_user(ctxt, user_id) limited_list = nova.api.openstack.limited(instance_list, req) res = [entity_maker(inst)['server'] for inst in limited_list] return _entity_list(res) @@ -117,7 +119,8 @@ class Controller(wsgi.Controller): def show(self, req, id): """ Returns server details by server id """ user_id = req.environ['nova.context']['user']['id'] - inst = self.db_driver.instance_get_by_internal_id(None, int(id)) + ctxt = context.RequestContext(user_id, user_id) + inst = self.db_driver.instance_get_by_internal_id(ctxt, int(id)) if inst: if inst.user_id == user_id: return _entity_detail(inst) @@ -126,9 +129,10 @@ class Controller(wsgi.Controller): def delete(self, req, id): """ Destroys a server """ user_id = req.environ['nova.context']['user']['id'] - instance = self.db_driver.instance_get_by_internal_id(None, int(id)) + ctxt = context.RequestContext(user_id, user_id) + instance = self.db_driver.instance_get_by_internal_id(ctxt, int(id)) if instance and instance['user_id'] == user_id: - self.db_driver.instance_destroy(None, id) + self.db_driver.instance_destroy(ctxt, id) return faults.Fault(exc.HTTPAccepted()) return faults.Fault(exc.HTTPNotFound()) @@ -154,13 +158,13 @@ class Controller(wsgi.Controller): def update(self, req, id): """ Updates the server name or password """ user_id = req.environ['nova.context']['user']['id'] + ctxt = context.RequestContext(user_id, user_id) inst_dict = self._deserialize(req.body, req) if not inst_dict: return faults.Fault(exc.HTTPUnprocessableEntity()) - ctxt = context.get_admin_context() instance = self.db_driver.instance_get_by_internal_id(ctxt, int(id)) if not instance or instance.user_id != user_id: return faults.Fault(exc.HTTPNotFound()) @@ -174,12 +178,13 @@ class Controller(wsgi.Controller): """ multi-purpose method used to reboot, rebuild, and resize a server """ user_id = req.environ['nova.context']['user']['id'] + ctxt = context.RequestContext(user_id, user_id) input_dict = self._deserialize(req.body, req) try: reboot_type = input_dict['reboot']['type'] except Exception: raise faults.Fault(webob.exc.HTTPNotImplemented()) - inst_ref = self.db.instance_get_by_internal_id(None, int(id)) + inst_ref = self.db.instance_get_by_internal_id(ctxt, int(id)) if not inst_ref or (inst_ref and not inst_ref.user_id == user_id): return faults.Fault(exc.HTTPUnprocessableEntity()) cloud.reboot(id) @@ -190,6 +195,7 @@ class Controller(wsgi.Controller): inst = {} user_id = req.environ['nova.context']['user']['id'] + ctxt = context.RequestContext(user_id, user_id) flavor_id = env['server']['flavorId'] @@ -236,12 +242,8 @@ class Controller(wsgi.Controller): inst['vcpus'] = flavor['vcpus'] inst['local_gb'] = flavor['local_gb'] - ref = self.db_driver.instance_create(None, inst) + ref = self.db_driver.instance_create(ctxt, inst) inst['id'] = ref.internal_id - # TODO(dietz): this isn't explicitly necessary, but the networking - # calls depend on an object with a project_id property, and therefore - # should be cleaned up later - api_context = context.RequestContext(user_id) inst['mac_address'] = utils.generate_mac() @@ -249,10 +251,10 @@ class Controller(wsgi.Controller): inst['launch_index'] = 0 inst['hostname'] = str(ref.internal_id) - self.db_driver.instance_update(None, inst['id'], inst) + self.db_driver.instance_update(ctxt, inst['id'], inst) network_manager = utils.import_object(FLAGS.network_manager) - address = network_manager.allocate_fixed_ip(api_context, + address = network_manager.allocate_fixed_ip(ctxt, inst['id']) # TODO(vish): This probably should be done in the scheduler @@ -274,4 +276,4 @@ class Controller(wsgi.Controller): FLAGS.network_topic, {"method": "set_network_host", "args": {"network_id": network_ref['id']}}) - return self.db_driver.queue_get_for(None, FLAGS.network_topic, host) + return self.db_driver.queue_get_for(context, FLAGS.network_topic, host) diff --git a/nova/context.py b/nova/context.py index 2a94d643d..7e2a54c04 100644 --- a/nova/context.py +++ b/nova/context.py @@ -23,6 +23,7 @@ RequestContext: context for requests that persist through all of nova. import datetime import random +from nova import exception from nova import utils class RequestContext(object): @@ -67,7 +68,10 @@ class RequestContext(object): # file from manager. from nova.auth import manager if not self._user: - self._user = manager.AuthManager().get_user(self.user_id) + try: + self._user = manager.AuthManager().get_user(self.user_id) + except exception.NotFound: + pass return self._user @property @@ -76,7 +80,10 @@ class RequestContext(object): # file from manager. from nova.auth import manager if not self._project: - self._project = manager.AuthManager().get_project(self.project_id) + try: + self._project = manager.AuthManager().get_project(self.project_id) + except exception.NotFound: + pass return self._project def to_dict(self): -- cgit From 3f737b0a84e0f7f14984b074b97b617c198c64bf Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Thu, 14 Oct 2010 15:07:37 +0200 Subject: Move Redis code into fakeldap, since it's the only thing that still uses it. Adjust auth unittests to skip fakeldap tests if Redis isn't around. Adjust auth unittests to actually run the fakeldap tests if Redis /is/ around. --- nova/auth/fakeldap.py | 34 ++++++++++++++++++++++++----- nova/auth/manager.py | 2 +- nova/datastore.py | 53 --------------------------------------------- nova/tests/auth_unittest.py | 17 +++++++++++++-- run_tests.py | 7 ------ 5 files changed, 45 insertions(+), 68 deletions(-) delete mode 100644 nova/datastore.py diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py index 2791dfde6..3e92c38f6 100644 --- a/nova/auth/fakeldap.py +++ b/nova/auth/fakeldap.py @@ -24,8 +24,30 @@ library to work with nova. """ import json +import redis -from nova import datastore +from nova import flags + +FLAGS = flags.FLAGS +flags.DEFINE_string('redis_host', '127.0.0.1', + 'Host that redis is running on.') +flags.DEFINE_integer('redis_port', 6379, + 'Port that redis is running on.') +flags.DEFINE_integer('redis_db', 0, 'Multiple DB keeps tests away') + +class Redis(object): + def __init__(self): + if hasattr(self.__class__, '_instance'): + raise Exception('Attempted to instantiate singleton') + + @classmethod + def instance(cls): + if not hasattr(cls, '_instance'): + inst = redis.Redis(host=FLAGS.redis_host, + port=FLAGS.redis_port, + db=FLAGS.redis_db) + cls._instance = inst + return cls._instance SCOPE_BASE = 0 @@ -164,11 +186,11 @@ class FakeLDAP(object): key = "%s%s" % (self.__redis_prefix, dn) value_dict = dict([(k, _to_json(v)) for k, v in attr]) - datastore.Redis.instance().hmset(key, value_dict) + Redis.instance().hmset(key, value_dict) def delete_s(self, dn): """Remove the ldap object at specified dn.""" - datastore.Redis.instance().delete("%s%s" % (self.__redis_prefix, dn)) + Redis.instance().delete("%s%s" % (self.__redis_prefix, dn)) def modify_s(self, dn, attrs): """Modify the object at dn using the attribute list. @@ -179,7 +201,7 @@ class FakeLDAP(object): ([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value) """ - redis = datastore.Redis.instance() + redis = Redis.instance() key = "%s%s" % (self.__redis_prefix, dn) for cmd, k, v in attrs: @@ -204,7 +226,7 @@ class FakeLDAP(object): """ if scope != SCOPE_BASE and scope != SCOPE_SUBTREE: raise NotImplementedError(str(scope)) - redis = datastore.Redis.instance() + redis = Redis.instance() if scope == SCOPE_BASE: keys = ["%s%s" % (self.__redis_prefix, dn)] else: @@ -232,3 +254,5 @@ class FakeLDAP(object): def __redis_prefix(self): # pylint: disable-msg=R0201 """Get the prefix to use for all redis keys.""" return 'ldap:' + + diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 9c499c98d..90aa2ae3c 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -201,7 +201,7 @@ class AuthManager(object): def __new__(cls, *args, **kwargs): """Returns the AuthManager singleton""" - if not cls._instance: + if not cls._instance or ('new' in kwargs and kwargs['new']): cls._instance = super(AuthManager, cls).__new__(cls) return cls._instance diff --git a/nova/datastore.py b/nova/datastore.py deleted file mode 100644 index 8e2519429..000000000 --- a/nova/datastore.py +++ /dev/null @@ -1,53 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Datastore: - -MAKE Sure that ReDIS is running, and your flags are set properly, -before trying to run this. -""" - -import logging -import redis - -from nova import flags - -FLAGS = flags.FLAGS -flags.DEFINE_string('redis_host', '127.0.0.1', - 'Host that redis is running on.') -flags.DEFINE_integer('redis_port', 6379, - 'Port that redis is running on.') -flags.DEFINE_integer('redis_db', 0, 'Multiple DB keeps tests away') - - -class Redis(object): - def __init__(self): - if hasattr(self.__class__, '_instance'): - raise Exception('Attempted to instantiate singleton') - - @classmethod - def instance(cls): - if not hasattr(cls, '_instance'): - inst = redis.Redis(host=FLAGS.redis_host, - port=FLAGS.redis_port, - db=FLAGS.redis_db) - cls._instance = inst - return cls._instance - - diff --git a/nova/tests/auth_unittest.py b/nova/tests/auth_unittest.py index 99f7ab599..97d22d702 100644 --- a/nova/tests/auth_unittest.py +++ b/nova/tests/auth_unittest.py @@ -80,7 +80,7 @@ class AuthManagerTestCase(object): FLAGS.auth_driver = self.auth_driver super(AuthManagerTestCase, self).setUp() self.flags(connection_type='fake') - self.manager = manager.AuthManager() + self.manager = manager.AuthManager(new=True) def test_create_and_find_user(self): with user_generator(self.manager): @@ -117,7 +117,7 @@ class AuthManagerTestCase(object): self.assert_(filter(lambda u: u.id == 'test1', users)) self.assert_(filter(lambda u: u.id == 'test2', users)) self.assert_(not filter(lambda u: u.id == 'test3', users)) - + def test_can_add_and_remove_user_role(self): with user_generator(self.manager): self.assertFalse(self.manager.has_role('test1', 'itsec')) @@ -324,6 +324,19 @@ class AuthManagerTestCase(object): class AuthManagerLdapTestCase(AuthManagerTestCase, test.TrialTestCase): auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver' + def __init__(self, *args, **kwargs): + AuthManagerTestCase.__init__(self) + test.TrialTestCase.__init__(self, *args, **kwargs) + import nova.auth.fakeldap as fakeldap + FLAGS.redis_db = 8 + if FLAGS.flush_db: + logging.info("Flushing redis datastore") + try: + r = fakeldap.Redis.instance() + r.flushdb() + except: + self.skip = True + class AuthManagerDbTestCase(AuthManagerTestCase, test.TrialTestCase): auth_driver = 'nova.auth.dbdriver.DbDriver' diff --git a/run_tests.py b/run_tests.py index 0b27ec6cf..b1a3f1d66 100644 --- a/run_tests.py +++ b/run_tests.py @@ -45,7 +45,6 @@ import sys from twisted.scripts import trial as trial_script -from nova import datastore from nova import flags from nova import twistd @@ -86,12 +85,6 @@ if __name__ == '__main__': # TODO(termie): these should make a call instead of doing work on import if FLAGS.fake_tests: from nova.tests.fake_flags import * - # use db 8 for fake tests - FLAGS.redis_db = 8 - if FLAGS.flush_db: - logging.info("Flushing redis datastore") - r = datastore.Redis.instance() - r.flushdb() else: from nova.tests.real_flags import * -- cgit From 81cdbc42d41509f629fe8ec0c7605958134e9ed0 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 14 Oct 2010 14:00:01 -0700 Subject: timestamps are passed as unicode --- nova/context.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/context.py b/nova/context.py index 7e2a54c04..f5d3fed08 100644 --- a/nova/context.py +++ b/nova/context.py @@ -52,7 +52,7 @@ class RequestContext(object): self.remote_address = remote_address if not timestamp: timestamp = datetime.datetime.utcnow() - if isinstance(timestamp, str): + if isinstance(timestamp, str) or isinstance(timestamp, unicode): timestamp = utils.parse_isotime(timestamp) self.timestamp = timestamp if not request_id: -- cgit From 951f4f2f8bfcdc2fc1f638a1c568979eb01dcb1b Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 14 Oct 2010 16:44:58 -0700 Subject: get flags for nova-manage and fix a couple more deprecations --- bin/nova-manage | 5 +++++ nova/network/manager.py | 17 +++++++++++------ 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index cf574c6b3..1c5700190 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -78,6 +78,11 @@ from nova.cloudpipe import pipelib FLAGS = flags.FLAGS +flags.DECLARE('fixed_range', 'nova.network.manager') +flags.DECLARE('num_networks', 'nova.network.manager') +flags.DECLARE('network_size', 'nova.network.manager') +flags.DECLARE('vlan_start', 'nova.network.manager') +flags.DECLARE('vpn_start', 'nova.network.manager') class VpnCommands(object): diff --git a/nova/network/manager.py b/nova/network/manager.py index cb5759a2b..c7080ccd8 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -27,6 +27,7 @@ import math import IPy from twisted.internet import defer +from nova import context from nova import db from nova import exception from nova import flags @@ -79,8 +80,9 @@ class NetworkManager(manager.Manager): def init_host(self): # Set up networking for the projects for which we're already # the designated network host. - for network in self.db.host_get_networks(None, self.host): - self._on_set_network_host(None, network['id']) + ctxt = context.get_admin_context() + for network in self.db.host_get_networks(ctxt, self.host): + self._on_set_network_host(ctxt, network['id']) def set_network_host(self, context, network_id): """Safely sets the host of the network""" @@ -238,7 +240,7 @@ class FlatManager(NetworkManager): def deallocate_fixed_ip(self, context, address, *args, **kwargs): """Returns a fixed ip to the pool""" self.db.fixed_ip_update(context, address, {'allocated': False}) - self.db.fixed_ip_disassociate(None, address) + self.db.fixed_ip_disassociate(context.elevated(), address) def setup_compute_network(self, context, instance_id): """Network is created manually""" @@ -338,13 +340,16 @@ class VlanManager(NetworkManager): # TODO(vish): This should probably be getting project_id from # the instance, but it is another trip to the db. # Perhaps this method should take an instance_ref. - network_ref = self.db.project_get_network(context.elevated(), + ctxt = context.elevated() + network_ref = self.db.project_get_network(ctxt, context.project_id) if kwargs.get('vpn', None): address = network_ref['vpn_private_address'] - self.db.fixed_ip_associate(None, address, instance_id) + self.db.fixed_ip_associate(ctxt, + address, + instance_id) else: - address = self.db.fixed_ip_associate_pool(context.elevated(), + address = self.db.fixed_ip_associate_pool(ctxt, network_ref['id'], instance_id) self.db.fixed_ip_update(context, address, {'allocated': True}) -- cgit From 68c8f2ba174a5052a5681bb55f4e21cf5f0febbf Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Fri, 15 Oct 2010 15:25:29 +0900 Subject: make run_tests.sh's default perform as expected --- run_tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/run_tests.sh b/run_tests.sh index ec727d094..6a2b15b8f 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -55,7 +55,7 @@ else else echo -e "No virtual environment found...create one? (Y/n) \c" read use_ve - if [ "x$use_ve" = "xY" ]; then + if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" ]; then # Install the virtualenv and run the test suite in it python tools/install_venv.py else -- cgit From e80e91bd653d16b069d82cb26990718daf38d0c2 Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Fri, 15 Oct 2010 16:06:12 +0900 Subject: Also accept 'y' --- run_tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/run_tests.sh b/run_tests.sh index 6a2b15b8f..a11dcd7cc 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -55,7 +55,7 @@ else else echo -e "No virtual environment found...create one? (Y/n) \c" read use_ve - if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" ]; then + if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then # Install the virtualenv and run the test suite in it python tools/install_venv.py else -- cgit