summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xbin/nova-dhcpbridge20
-rw-r--r--nova/api/cloud.py10
-rw-r--r--nova/api/context.py46
-rw-r--r--nova/api/ec2/__init__.py7
-rw-r--r--nova/api/ec2/cloud.py107
-rw-r--r--nova/api/rackspace/context.py33
-rw-r--r--nova/api/rackspace/servers.py58
-rw-r--r--nova/auth/manager.py20
-rw-r--r--nova/cloudpipe/pipelib.py4
-rw-r--r--nova/context.py106
-rw-r--r--nova/db/sqlalchemy/api.py3
-rw-r--r--nova/network/manager.py6
-rw-r--r--nova/objectstore/handler.py12
-rw-r--r--nova/rpc.py15
-rw-r--r--nova/scheduler/manager.py4
-rw-r--r--nova/service.py40
-rw-r--r--nova/tests/access_unittest.py56
-rw-r--r--nova/tests/api_unittest.py3
-rw-r--r--nova/tests/cloud_unittest.py28
-rw-r--r--nova/tests/compute_unittest.py11
-rw-r--r--nova/tests/network_unittest.py66
-rw-r--r--nova/tests/quota_unittest.py14
-rw-r--r--nova/tests/rpc_unittest.py30
-rw-r--r--nova/tests/scheduler_unittest.py22
-rw-r--r--nova/tests/service_unittest.py24
-rw-r--r--nova/tests/volume_unittest.py29
-rw-r--r--nova/virt/libvirt_conn.py27
27 files changed, 433 insertions, 368 deletions
diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge
index a127ed03c..4574f0e20 100755
--- a/bin/nova-dhcpbridge
+++ b/bin/nova-dhcpbridge
@@ -33,6 +33,7 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir)
+from nova import context
from nova import db
from nova import flags
from nova import rpc
@@ -52,9 +53,12 @@ def add_lease(mac, ip_address, _hostname, _interface):
if FLAGS.fake_rabbit:
logging.debug("leasing ip")
network_manager = utils.import_object(FLAGS.network_manager)
- network_manager.lease_fixed_ip(None, mac, ip_address)
+ network_manager.lease_fixed_ip(context.get_admin_context(),
+ mac,
+ ip_address)
else:
- rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.host),
+ rpc.cast(context.get_admin_context(),
+ "%s.%s" % (FLAGS.network_topic, FLAGS.host),
{"method": "lease_fixed_ip",
"args": {"context": None,
"mac": mac,
@@ -71,9 +75,12 @@ def del_lease(mac, ip_address, _hostname, _interface):
if FLAGS.fake_rabbit:
logging.debug("releasing ip")
network_manager = utils.import_object(FLAGS.network_manager)
- network_manager.release_fixed_ip(None, mac, ip_address)
+ network_manager.release_fixed_ip(context.get_admin_context(),
+ mac,
+ ip_address)
else:
- rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.host),
+ rpc.cast(context.get_admin_context(),
+ "%s.%s" % (FLAGS.network_topic, FLAGS.host),
{"method": "release_fixed_ip",
"args": {"context": None,
"mac": mac,
@@ -82,8 +89,9 @@ def del_lease(mac, ip_address, _hostname, _interface):
def init_leases(interface):
"""Get the list of hosts for an interface."""
- network_ref = db.network_get_by_bridge(None, interface)
- return linux_net.get_dhcp_hosts(None, network_ref['id'])
+ ctxt = context.get_admin_context()
+ network_ref = db.network_get_by_bridge(ctxt, interface)
+ return linux_net.get_dhcp_hosts(ctxt, network_ref['id'])
def main():
diff --git a/nova/api/cloud.py b/nova/api/cloud.py
index 345677d4f..e16229e7d 100644
--- a/nova/api/cloud.py
+++ b/nova/api/cloud.py
@@ -30,13 +30,13 @@ FLAGS = flags.FLAGS
def reboot(instance_id, context=None):
"""Reboot the given instance.
-
+
#TODO(gundlach) not actually sure what context is used for by ec2 here
-- I think we can just remove it and use None all the time.
"""
- instance_ref = db.instance_get_by_ec2_id(None, instance_id)
+ instance_ref = db.instance_get_by_ec2_id(context, instance_id)
host = instance_ref['host']
- rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
+ rpc.cast(context,
+ db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "reboot_instance",
- "args": {"context": None,
- "instance_id": instance_ref['id']}})
+ "args": {"instance_id": instance_ref['id']}})
diff --git a/nova/api/context.py b/nova/api/context.py
deleted file mode 100644
index b66cfe468..000000000
--- a/nova/api/context.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-APIRequestContext
-"""
-
-import random
-
-
-class APIRequestContext(object):
- def __init__(self, user, project):
- self.user = user
- self.project = project
- self.request_id = ''.join(
- [random.choice('ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890-')
- for x in xrange(20)]
- )
- if user:
- self.is_admin = user.is_admin()
- else:
- self.is_admin = False
- self.read_deleted = False
-
-
-def get_admin_context(user=None, read_deleted=False):
- context_ref = APIRequestContext(user=user, project=None)
- context_ref.is_admin = True
- context_ref.read_deleted = read_deleted
- return context_ref
-
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index 6b538a7f1..edc818c7d 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -25,9 +25,9 @@ import webob.dec
import webob.exc
from nova import exception
+from nova import context
from nova import flags
from nova import wsgi
-from nova.api import context
from nova.api.ec2 import apirequest
from nova.api.ec2 import admin
from nova.api.ec2 import cloud
@@ -78,7 +78,10 @@ class Authenticate(wsgi.Middleware):
raise webob.exc.HTTPForbidden()
# Authenticated!
- req.environ['ec2.context'] = context.APIRequestContext(user, project)
+ ctxt = context.RequestContext(user=user,
+ project=project,
+ remote_address=req.remote_addr)
+ req.environ['ec2.context'] = ctxt
return self.application
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 79c95788b..5e1de9dc0 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -28,6 +28,7 @@ import logging
import os
import time
+from nova import context
from nova import crypto
from nova import db
from nova import exception
@@ -100,9 +101,9 @@ class CloudController(object):
utils.runthis("Generating root CA: %s", "sh genrootca.sh")
os.chdir(start)
- def _get_mpi_data(self, project_id):
+ def _get_mpi_data(self, context, project_id):
result = {}
- for instance in db.instance_get_all_by_project(None, project_id):
+ for instance in db.instance_get_all_by_project(context, project_id):
if instance['fixed_ip']:
line = '%s slots=%d' % (instance['fixed_ip']['address'],
INSTANCE_TYPES[instance['instance_type']]['vcpus'])
@@ -114,10 +115,11 @@ class CloudController(object):
return result
def get_metadata(self, address):
- instance_ref = db.fixed_ip_get_instance(None, address)
+ ctxt = context.get_admin_context()
+ instance_ref = db.fixed_ip_get_instance(ctxt, address)
if instance_ref is None:
return None
- mpi = self._get_mpi_data(instance_ref['project_id'])
+ mpi = self._get_mpi_data(ctxt, instance_ref['project_id'])
if instance_ref['key_name']:
keys = {
'0': {
@@ -128,7 +130,7 @@ class CloudController(object):
else:
keys = ''
hostname = instance_ref['hostname']
- floating_ip = db.instance_get_floating_address(None,
+ floating_ip = db.instance_get_floating_address(ctxt,
instance_ref['id'])
data = {
'user-data': base64.b64decode(instance_ref['user_data']),
@@ -136,7 +138,7 @@ class CloudController(object):
'ami-id': instance_ref['image_id'],
'ami-launch-index': instance_ref['launch_index'],
'ami-manifest-path': 'FIXME',
- 'block-device-mapping': { # TODO(vish): replace with real data
+ 'block-device-mapping': { # TODO(vish): replace with real data
'ami': 'sda1',
'ephemeral0': 'sda2',
'root': '/dev/sda1',
@@ -218,7 +220,7 @@ class CloudController(object):
return {'keypairsSet': result}
def create_key_pair(self, context, key_name, **kwargs):
- data = _gen_key(None, context.user.id, key_name)
+ data = _gen_key(context, context.user.id, key_name)
return {'keyName': key_name,
'keyFingerprint': data['fingerprint'],
'keyMaterial': data['private_key']}
@@ -247,11 +249,11 @@ class CloudController(object):
def get_console_output(self, context, instance_id, **kwargs):
# instance_id is passed in as a list of instances
instance_ref = db.instance_get_by_ec2_id(context, instance_id[0])
- return rpc.call('%s.%s' % (FLAGS.compute_topic,
+ return rpc.call(context,
+ '%s.%s' % (FLAGS.compute_topic,
instance_ref['host']),
{"method": "get_console_output",
- "args": {"context": None,
- "instance_id": instance_ref['id']}})
+ "args": {"instance_id": instance_ref['id']}})
def describe_volumes(self, context, **kwargs):
if context.user.is_admin():
@@ -310,10 +312,10 @@ class CloudController(object):
vol['display_description'] = kwargs.get('display_description')
volume_ref = db.volume_create(context, vol)
- rpc.cast(FLAGS.scheduler_topic,
+ rpc.cast(context,
+ FLAGS.scheduler_topic,
{"method": "create_volume",
- "args": {"context": None,
- "topic": FLAGS.volume_topic,
+ "args": {"topic": FLAGS.volume_topic,
"volume_id": volume_ref['id']}})
return {'volumeSet': [self._format_volume(context, volume_ref)]}
@@ -328,10 +330,10 @@ class CloudController(object):
raise exception.ApiError("Volume is already attached")
instance_ref = db.instance_get_by_ec2_id(context, instance_id)
host = instance_ref['host']
- rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
+ rpc.cast(context,
+ db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "attach_volume",
- "args": {"context": None,
- "volume_id": volume_ref['id'],
+ "args": {"volume_id": volume_ref['id'],
"instance_id": instance_ref['id'],
"mountpoint": device}})
return {'attachTime': volume_ref['attach_time'],
@@ -351,10 +353,10 @@ class CloudController(object):
raise exception.ApiError("Volume is already detached")
try:
host = instance_ref['host']
- rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
+ rpc.cast(context,
+ db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "detach_volume",
- "args": {"context": None,
- "instance_id": instance_ref['id'],
+ "args": {"instance_id": instance_ref['id'],
"volume_id": volume_ref['id']}})
except exception.NotFound:
# If the instance doesn't exist anymore,
@@ -388,7 +390,7 @@ class CloudController(object):
return self._format_describe_instances(context)
def _format_describe_instances(self, context):
- return { 'reservationSet': self._format_instances(context) }
+ return {'reservationSet': self._format_instances(context)}
def _format_run_instances(self, context, reservation_id):
i = self._format_instances(context, reservation_id)
@@ -482,20 +484,20 @@ class CloudController(object):
raise QuotaError("Address quota exceeded. You cannot "
"allocate any more addresses")
network_topic = self._get_network_topic(context)
- public_ip = rpc.call(network_topic,
+ public_ip = rpc.call(context,
+ network_topic,
{"method": "allocate_floating_ip",
- "args": {"context": None,
- "project_id": context.project.id}})
+ "args": {"project_id": context.project.id}})
return {'addressSet': [{'publicIp': public_ip}]}
def release_address(self, context, public_ip, **kwargs):
# NOTE(vish): Should we make sure this works?
floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
network_topic = self._get_network_topic(context)
- rpc.cast(network_topic,
+ rpc.cast(context,
+ network_topic,
{"method": "deallocate_floating_ip",
- "args": {"context": None,
- "floating_address": floating_ip_ref['address']}})
+ "args": {"floating_address": floating_ip_ref['address']}})
return {'releaseResponse': ["Address released."]}
def associate_address(self, context, instance_id, public_ip, **kwargs):
@@ -504,20 +506,20 @@ class CloudController(object):
instance_ref['id'])
floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
network_topic = self._get_network_topic(context)
- rpc.cast(network_topic,
+ rpc.cast(context,
+ network_topic,
{"method": "associate_floating_ip",
- "args": {"context": None,
- "floating_address": floating_ip_ref['address'],
+ "args": {"floating_address": floating_ip_ref['address'],
"fixed_address": fixed_address}})
return {'associateResponse': ["Address associated."]}
def disassociate_address(self, context, public_ip, **kwargs):
floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
network_topic = self._get_network_topic(context)
- rpc.cast(network_topic,
+ rpc.cast(context,
+ network_topic,
{"method": "disassociate_floating_ip",
- "args": {"context": None,
- "floating_address": floating_ip_ref['address']}})
+ "args": {"floating_address": floating_ip_ref['address']}})
return {'disassociateResponse': ["Address disassociated."]}
def _get_network_topic(self, context):
@@ -525,10 +527,10 @@ class CloudController(object):
network_ref = db.project_get_network(context, context.project.id)
host = network_ref['host']
if not host:
- host = rpc.call(FLAGS.network_topic,
+ host = rpc.call(context,
+ FLAGS.network_topic,
{"method": "set_network_host",
- "args": {"context": None,
- "project_id": context.project.id}})
+ "args": {"project_id": context.project.id}})
return db.queue_get_for(context, FLAGS.network_topic, host)
def run_instances(self, context, **kwargs):
@@ -619,15 +621,15 @@ class CloudController(object):
# TODO(vish): This probably should be done in the scheduler
# network is setup when host is assigned
network_topic = self._get_network_topic(context)
- rpc.call(network_topic,
+ rpc.call(context,
+ network_topic,
{"method": "setup_fixed_ip",
- "args": {"context": None,
- "address": address}})
+ "args": {"address": address}})
- rpc.cast(FLAGS.scheduler_topic,
+ rpc.cast(context,
+ FLAGS.scheduler_topic,
{"method": "run_instance",
- "args": {"context": None,
- "topic": FLAGS.compute_topic,
+ "args": {"topic": FLAGS.compute_topic,
"instance_id": inst_id}})
logging.debug("Casting to scheduler for %s/%s's instance %s" %
(context.project.name, context.user.name, inst_id))
@@ -658,10 +660,10 @@ class CloudController(object):
# disassociated. We may need to worry about
# checking this later. Perhaps in the scheduler?
network_topic = self._get_network_topic(context)
- rpc.cast(network_topic,
+ rpc.cast(context,
+ network_topic,
{"method": "disassociate_floating_ip",
- "args": {"context": None,
- "floating_address": address}})
+ "args": {"floating_address": address}})
address = db.instance_get_fixed_address(context,
instance_ref['id'])
@@ -674,10 +676,10 @@ class CloudController(object):
host = instance_ref['host']
if host:
- rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host),
+ rpc.cast(context,
+ db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "terminate_instance",
- "args": {"context": None,
- "instance_id": instance_ref['id']}})
+ "args": {"instance_id": instance_ref['id']}})
else:
db.instance_destroy(context, instance_ref['id'])
return True
@@ -695,9 +697,8 @@ class CloudController(object):
if field in kwargs:
changes[field] = kwargs[field]
if changes:
- db_context = {}
- inst = db.instance_get_by_ec2_id(db_context, instance_id)
- db.instance_update(db_context, inst['id'], kwargs)
+ inst = db.instance_get_by_ec2_id(context, instance_id)
+ db.instance_update(context, inst['id'], kwargs)
return True
def delete_volume(self, context, volume_id, **kwargs):
@@ -708,10 +709,10 @@ class CloudController(object):
now = datetime.datetime.utcnow()
db.volume_update(context, volume_ref['id'], {'terminated_at': now})
host = volume_ref['host']
- rpc.cast(db.queue_get_for(context, FLAGS.volume_topic, host),
+ rpc.cast(context,
+ db.queue_get_for(context, FLAGS.volume_topic, host),
{"method": "delete_volume",
- "args": {"context": None,
- "volume_id": volume_ref['id']}})
+ "args": {"volume_id": volume_ref['id']}})
return True
def describe_images(self, context, image_id=None, **kwargs):
diff --git a/nova/api/rackspace/context.py b/nova/api/rackspace/context.py
deleted file mode 100644
index 77394615b..000000000
--- a/nova/api/rackspace/context.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-APIRequestContext
-"""
-
-import random
-
-class Project(object):
- def __init__(self, user_id):
- self.id = user_id
-
-class APIRequestContext(object):
- """ This is an adapter class to get around all of the assumptions made in
- the FlatNetworking """
- def __init__(self, user_id):
- self.user_id = user_id
- self.project = Project(user_id)
diff --git a/nova/api/rackspace/servers.py b/nova/api/rackspace/servers.py
index 11efd8aef..866bebb44 100644
--- a/nova/api/rackspace/servers.py
+++ b/nova/api/rackspace/servers.py
@@ -24,9 +24,9 @@ from nova import flags
from nova import rpc
from nova import utils
from nova import wsgi
+from nova import context
from nova.api import cloud
from nova.api.rackspace import _id_translator
-from nova.api.rackspace import context
from nova.api.rackspace import faults
from nova.compute import instance_types
from nova.compute import power_state
@@ -64,8 +64,8 @@ def _entity_list(entities):
def _entity_detail(inst):
""" Maps everything to Rackspace-like attributes for return"""
- power_mapping = {
- power_state.NOSTATE: 'build',
+ power_mapping = {
+ power_state.NOSTATE: 'build',
power_state.RUNNING: 'active',
power_state.BLOCKED: 'active',
power_state.PAUSED: 'suspended',
@@ -75,7 +75,7 @@ def _entity_detail(inst):
}
inst_dict = {}
- mapped_keys = dict(status='state', imageId='image_id',
+ mapped_keys = dict(status='state', imageId='image_id',
flavorId='instance_type', name='server_name', id='id')
for k, v in mapped_keys.iteritems():
@@ -98,7 +98,7 @@ class Controller(wsgi.Controller):
_serialization_metadata = {
'application/xml': {
"attributes": {
- "server": [ "id", "imageId", "name", "flavorId", "hostId",
+ "server": [ "id", "imageId", "name", "flavorId", "hostId",
"status", "progress", "progress" ]
}
}
@@ -164,11 +164,11 @@ class Controller(wsgi.Controller):
inst = self._build_server_instance(req, env)
except Exception, e:
return faults.Fault(exc.HTTPUnprocessableEntity())
-
- rpc.cast(
- FLAGS.compute_topic, {
- "method": "run_instance",
- "args": {"instance_id": inst['id']}})
+ user_id = req.environ['nova.context']['user']['id']
+ rpc.cast(context.RequestContext(user_id, user_id),
+ FLAGS.compute_topic,
+ {"method": "run_instance",
+ "args": {"instance_id": inst['id']}})
return _entity_inst(inst)
def update(self, req, id):
@@ -178,7 +178,7 @@ class Controller(wsgi.Controller):
user_id = req.environ['nova.context']['user']['id']
inst_dict = self._deserialize(req.body, req)
-
+
if not inst_dict:
return faults.Fault(exc.HTTPUnprocessableEntity())
@@ -186,12 +186,12 @@ class Controller(wsgi.Controller):
if not instance or instance.user_id != user_id:
return faults.Fault(exc.HTTPNotFound())
- self.db_driver.instance_update(None, id,
+ self.db_driver.instance_update(None, id,
_filter_params(inst_dict['server']))
return faults.Fault(exc.HTTPNoContent())
def action(self, req, id):
- """ multi-purpose method used to reboot, rebuild, and
+ """ multi-purpose method used to reboot, rebuild, and
resize a server """
input_dict = self._deserialize(req.body, req)
try:
@@ -217,13 +217,13 @@ class Controller(wsgi.Controller):
if v['flavorid'] == flavor_id][0]
image_id = env['server']['imageId']
-
+
img_service, image_id_trans = _image_service()
- opaque_image_id = image_id_trans.to_rs_id(image_id)
+ opaque_image_id = image_id_trans.to_rs_id(image_id)
image = img_service.show(opaque_image_id)
- if not image:
+ if not image:
raise Exception, "Image not found"
inst['server_name'] = env['server']['name']
@@ -259,15 +259,15 @@ class Controller(wsgi.Controller):
ref = self.db_driver.instance_create(None, inst)
inst['id'] = inst_id_trans.to_rs_id(ref.ec2_id)
-
+
# TODO(dietz): this isn't explicitly necessary, but the networking
# calls depend on an object with a project_id property, and therefore
# should be cleaned up later
- api_context = context.APIRequestContext(user_id)
-
+ api_context = context.RequestContext(user_id)
+
inst['mac_address'] = utils.generate_mac()
-
- #TODO(dietz) is this necessary?
+
+ #TODO(dietz) is this necessary?
inst['launch_index'] = 0
inst['hostname'] = ref.ec2_id
@@ -280,20 +280,20 @@ class Controller(wsgi.Controller):
# TODO(vish): This probably should be done in the scheduler
# network is setup when host is assigned
network_topic = self._get_network_topic(user_id)
- rpc.call(network_topic,
+ rpc.call(context.RequestContext(user_id, user_id),
+ network_topic,
{"method": "setup_fixed_ip",
- "args": {"context": None,
- "address": address}})
+ "args": {"address": address}})
return inst
def _get_network_topic(self, user_id):
"""Retrieves the network host for a project"""
- network_ref = self.db_driver.project_get_network(None,
+ network_ref = self.db_driver.project_get_network(None,
user_id)
host = network_ref['host']
if not host:
- host = rpc.call(FLAGS.network_topic,
- {"method": "set_network_host",
- "args": {"context": None,
- "project_id": user_id}})
+ host = rpc.call(context.RequestContext(user_id, user_id),
+ FLAGS.network_topic,
+ {"method": "set_network_host",
+ "args": {"project_id": user_id}})
return self.db_driver.queue_get_for(None, FLAGS.network_topic, host)
diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index 0bc12c80f..9d0c5c95c 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -28,6 +28,7 @@ import tempfile
import uuid
import zipfile
+from nova import context
from nova import crypto
from nova import db
from nova import exception
@@ -454,7 +455,7 @@ class AuthManager(object):
return [Project(**project_dict) for project_dict in project_list]
def create_project(self, name, manager_user, description=None,
- member_users=None, context=None):
+ member_users=None):
"""Create a project
@type name: str
@@ -485,7 +486,8 @@ class AuthManager(object):
if project_dict:
project = Project(**project_dict)
try:
- self.network_manager.allocate_network(context,
+ ctxt = context.get_admin_context()
+ self.network_manager.allocate_network(ctxt,
project.id)
except:
drv.delete_project(project.id)
@@ -537,7 +539,7 @@ class AuthManager(object):
Project.safe_id(project))
@staticmethod
- def get_project_vpn_data(project, context=None):
+ def get_project_vpn_data(project):
"""Gets vpn ip and port for project
@type project: Project or project_id
@@ -548,7 +550,7 @@ class AuthManager(object):
not been allocated for user.
"""
- network_ref = db.project_get_network(context,
+ network_ref = db.project_get_network(context.get_admin_context(),
Project.safe_id(project))
if not network_ref['vpn_public_port']:
@@ -556,12 +558,13 @@ class AuthManager(object):
return (network_ref['vpn_public_address'],
network_ref['vpn_public_port'])
- def delete_project(self, project, context=None):
+ def delete_project(self, project):
"""Deletes a project"""
try:
- network_ref = db.project_get_network(context,
+ ctxt = context.get_admin_context()
+ network_ref = db.project_get_network(ctxt,
Project.safe_id(project))
- db.network_destroy(context, network_ref['id'])
+ db.network_destroy(ctxt, network_ref['id'])
except:
logging.exception('Could not destroy network for %s',
project)
@@ -626,7 +629,8 @@ class AuthManager(object):
Additionally deletes all users key_pairs"""
uid = User.safe_id(user)
- db.key_pair_destroy_all_by_user(None, uid)
+ db.key_pair_destroy_all_by_user(context.get_admin_context(),
+ uid)
with self.driver() as drv:
drv.delete_user(uid)
diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py
index 706a175d9..4fc2c85cb 100644
--- a/nova/cloudpipe/pipelib.py
+++ b/nova/cloudpipe/pipelib.py
@@ -28,13 +28,13 @@ import os
import tempfile
import zipfile
+from nova import context
from nova import exception
from nova import flags
from nova import utils
from nova.auth import manager
# TODO(eday): Eventually changes these to something not ec2-specific
from nova.api.ec2 import cloud
-from nova.api.ec2 import context
FLAGS = flags.FLAGS
@@ -62,7 +62,7 @@ class CloudPipe(object):
key_name = self.setup_key_pair(project.project_manager_id, project_id)
zippy = open(zippath, "r")
- context = context.APIRequestContext(user=project.project_manager, project=project)
+ context = context.RequestContext(user=project.project_manager, project=project)
reservation = self.controller.run_instances(context,
# run instances expects encoded userdata, it is decoded in the get_metadata_call
diff --git a/nova/context.py b/nova/context.py
new file mode 100644
index 000000000..3f9de519d
--- /dev/null
+++ b/nova/context.py
@@ -0,0 +1,106 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+RequestContext: context for requests that persist through all of nova.
+"""
+
+import datetime
+import random
+
+from nova import utils
+
+class RequestContext(object):
+ def __init__(self, user, project, is_admin=None, read_deleted=False,
+ remote_address=None, timestamp=None, request_id=None):
+ if hasattr(user, 'id'):
+ self._user = user
+ self.user_id = user.id
+ else:
+ self._user = None
+ self.user_id = user
+ if hasattr(project, 'id'):
+ self._project = project
+ self.project_id = project.id
+ else:
+ self._project = None
+ self.project_id = project
+ if is_admin is None:
+ if not user:
+ user = self.user
+ self.is_admin = user.is_admin()
+ else:
+ self.is_admin = is_admin
+ self.read_deleted = read_deleted
+ self.remote_address = remote_address
+ if not timestamp:
+ timestamp = datetime.datetime.utcnow()
+ if isinstance(timestamp, str):
+ timestamp = utils.parse_isotime(timestamp)
+ self.timestamp = timestamp
+ if not request_id:
+ request_id = ''.join(
+ [random.choice('ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890-')
+ for x in xrange(20)]
+ )
+ self.request_id = request_id
+
+ @property
+ def user(self):
+ # NOTE(visn): Delay import of manager, so that we can import this
+ # file from manager.
+ from nova.auth import manager
+ if not self._user:
+ self._user = manager.AuthManager().get_user(self.user_id)
+ return self._user
+
+ @property
+ def project(self):
+ # NOTE(visn): Delay import of manager, so that we can import this
+ # file from manager.
+ from nova.auth import manager
+ if not self._project:
+ self._project = manager.AuthManager().get_project(self.project_id)
+ return self._project
+
+ def to_dict(self):
+ return {'user': self.user_id,
+ 'project': self.project_id,
+ 'is_admin': self.is_admin,
+ 'read_deleted': self.read_deleted,
+ 'remote_address': self.remote_address,
+ 'timestamp': utils.isotime(),
+ 'request_id': self.request_id}
+
+ @classmethod
+ def from_dict(cls, values):
+ return cls(**values)
+
+ def admin(self, read_deleted=False):
+ """Return a version of this context with admin flag set"""
+ return RequestContext(self.user_id,
+ self.project_id,
+ True,
+ read_deleted,
+ self.remote_address,
+ self.timestamp,
+ self.request_id)
+
+
+def get_admin_context(read_deleted=False):
+ return RequestContext(None, None, True, read_deleted)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 7f72f66b9..0fef49090 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -40,6 +40,7 @@ def is_admin_context(context):
if not context:
warnings.warn('Use of empty request context is deprecated',
DeprecationWarning)
+ raise Exception('die')
return True
return context.is_admin
@@ -321,7 +322,7 @@ def floating_ip_destroy(context, address):
session = get_session()
with session.begin():
# TODO(devcamcar): Ensure address belongs to user.
- floating_ip_ref = get_floating_ip_by_address(context,
+ floating_ip_ref = floating_ip_get_by_address(context,
address,
session=session)
floating_ip_ref.delete(session=session)
diff --git a/nova/network/manager.py b/nova/network/manager.py
index ef1d01138..25956b267 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -92,7 +92,7 @@ class NetworkManager(manager.Manager):
# TODO(vish): can we minimize db access by just getting the
# id here instead of the ref?
network_id = network_ref['id']
- host = self.db.network_set_host(None,
+ host = self.db.network_set_host(context,
network_id,
self.host)
self._on_set_network_host(context, network_id)
@@ -180,7 +180,7 @@ class FlatManager(NetworkManager):
def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
"""Gets a fixed ip from the pool"""
network_ref = self.db.project_get_network(context, context.project.id)
- address = self.db.fixed_ip_associate_pool(context,
+ address = self.db.fixed_ip_associate_pool(context.admin(),
network_ref['id'],
instance_id)
self.db.fixed_ip_update(context, address, {'allocated': True})
@@ -249,7 +249,7 @@ class VlanManager(NetworkManager):
address = network_ref['vpn_private_address']
self.db.fixed_ip_associate(context, address, instance_id)
else:
- address = self.db.fixed_ip_associate_pool(None,
+ address = self.db.fixed_ip_associate_pool(context.admin(),
network_ref['id'],
instance_id)
self.db.fixed_ip_update(context, address, {'allocated': True})
diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py
index dfee64aca..b93e92fe6 100644
--- a/nova/objectstore/handler.py
+++ b/nova/objectstore/handler.py
@@ -52,10 +52,10 @@ from twisted.web import resource
from twisted.web import server
from twisted.web import static
+from nova import context
from nova import exception
from nova import flags
from nova.auth import manager
-from nova.api.ec2 import context
from nova.objectstore import bucket
from nova.objectstore import image
@@ -131,7 +131,7 @@ def get_context(request):
request.uri,
headers=request.getAllHeaders(),
check_type='s3')
- return context.APIRequestContext(user, project)
+ return context.RequestContext(user, project)
except exception.Error as ex:
logging.debug("Authentication Failure: %s", ex)
raise exception.NotAuthorized
@@ -144,7 +144,7 @@ class ErrorHandlingResource(resource.Resource):
# plugged in to the right place in twisted...
# This doesn't look like it's the right place
# (consider exceptions in getChild; or after
- # NOT_DONE_YET is returned
+ # NOT_DONE_YET is returned
def render(self, request):
"""Renders the response as XML"""
try:
@@ -255,7 +255,7 @@ class ObjectResource(ErrorHandlingResource):
def render_GET(self, request):
"""Returns the object
-
+
Raises NotAuthorized if user in request context is not
authorized to delete the object.
"""
@@ -273,7 +273,7 @@ class ObjectResource(ErrorHandlingResource):
def render_PUT(self, request):
"""Modifies/inserts the object and returns a result code
-
+
Raises NotAuthorized if user in request context is not
authorized to delete the object.
"""
@@ -291,7 +291,7 @@ class ObjectResource(ErrorHandlingResource):
def render_DELETE(self, request):
"""Deletes the object and returns a result code
-
+
Raises NotAuthorized if user in request context is not
authorized to delete the object.
"""
diff --git a/nova/rpc.py b/nova/rpc.py
index fe52ad35f..26eff9c55 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -34,7 +34,7 @@ from twisted.internet import task
from nova import exception
from nova import fakerabbit
from nova import flags
-
+from nova import context
FLAGS = flags.FLAGS
@@ -151,6 +151,8 @@ class AdapterConsumer(TopicConsumer):
"""
LOG.debug('received %s' % (message_data))
msg_id = message_data.pop('_msg_id', None)
+ dict_context = message_data.pop('_context')
+ ctxt = context.RequestContext.from_dict(dict_context)
method = message_data.get('method')
args = message_data.get('args', {})
@@ -168,7 +170,7 @@ class AdapterConsumer(TopicConsumer):
node_args = dict((str(k), v) for k, v in args.iteritems())
# NOTE(vish): magic is fun!
# pylint: disable-msg=W0142
- d = defer.maybeDeferred(node_func, **node_args)
+ d = defer.maybeDeferred(node_func, context=ctxt, **node_args)
if msg_id:
d.addCallback(lambda rval: msg_reply(msg_id, rval, None))
d.addErrback(lambda e: msg_reply(msg_id, None, e))
@@ -247,12 +249,13 @@ class RemoteError(exception.Error):
traceback))
-def call(topic, msg):
+def call(context, topic, msg):
"""Sends a message on a topic and wait for a response"""
LOG.debug("Making asynchronous call...")
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
LOG.debug("MSG_ID is %s" % (msg_id))
+ msg.update({'_context': context})
class WaitMessage(object):
@@ -282,12 +285,13 @@ def call(topic, msg):
return wait_msg.result
-def call_twisted(topic, msg):
+def call_twisted(context, topic, msg):
"""Sends a message on a topic and wait for a response"""
LOG.debug("Making asynchronous call...")
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
LOG.debug("MSG_ID is %s" % (msg_id))
+ msg.update({'_context': context.to_dict()})
conn = Connection.instance()
d = defer.Deferred()
@@ -313,9 +317,10 @@ def call_twisted(topic, msg):
return d
-def cast(topic, msg):
+def cast(context, topic, msg):
"""Sends a message on a topic without waiting for a response"""
LOG.debug("Making asynchronous cast...")
+ msg.update({'_context': context.to_dict()})
conn = Connection.instance()
publisher = TopicPublisher(connection=conn, topic=topic)
publisher.send(msg)
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 0ad7ca86b..6a933fb37 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -59,8 +59,8 @@ class SchedulerManager(manager.Manager):
except AttributeError:
host = self.driver.schedule(context, topic, *args, **kwargs)
- kwargs.update({"context": None})
- rpc.cast(db.queue_get_for(context, topic, host),
+ rpc.cast(context,
+ db.queue_get_for(context, topic, host),
{"method": method,
"args": kwargs})
logging.debug("Casting to %s %s for %s", topic, host, method)
diff --git a/nova/service.py b/nova/service.py
index a6c186896..609df81d1 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -28,6 +28,7 @@ from twisted.internet import defer
from twisted.internet import task
from twisted.application import service
+from nova import context
from nova import db
from nova import exception
from nova import flags
@@ -57,20 +58,22 @@ class Service(object, service.Service):
self.manager.init_host()
self.model_disconnected = False
super(Service, self).__init__(*args, **kwargs)
+ ctxt = context.get_admin_context()
try:
- service_ref = db.service_get_by_args(None,
- self.host,
- self.binary)
+ service_ref = db.service_get_by_args(ctxt,
+ self.host,
+ self.binary)
self.service_id = service_ref['id']
except exception.NotFound:
- self._create_service_ref()
+ self._create_service_ref(ctxt)
- def _create_service_ref(self):
- service_ref = db.service_create(None, {'host': self.host,
- 'binary': self.binary,
- 'topic': self.topic,
- 'report_count': 0})
+ def _create_service_ref(self, context):
+ service_ref = db.service_create(context,
+ {'host': self.host,
+ 'binary': self.binary,
+ 'topic': self.topic,
+ 'report_count': 0})
self.service_id = service_ref['id']
def __getattr__(self, key):
@@ -136,31 +139,32 @@ class Service(object, service.Service):
service_obj.setServiceParent(application)
return application
- def kill(self, context=None):
+ def kill(self):
"""Destroy the service object in the datastore"""
try:
- db.service_destroy(context, self.service_id)
+ db.service_destroy(context.get_admin_context(), self.service_id)
except exception.NotFound:
logging.warn("Service killed that has no database entry")
@defer.inlineCallbacks
- def periodic_tasks(self, context=None):
+ def periodic_tasks(self):
"""Tasks to be run at a periodic interval"""
- yield self.manager.periodic_tasks(context)
+ yield self.manager.periodic_tasks(context.get_admin_context())
@defer.inlineCallbacks
- def report_state(self, context=None):
+ def report_state(self):
"""Update the state of this service in the datastore."""
+ ctxt = context.get_admin_context()
try:
try:
- service_ref = db.service_get(context, self.service_id)
+ service_ref = db.service_get(ctxt, self.service_id)
except exception.NotFound:
logging.debug("The service database object disappeared, "
"Recreating it.")
- self._create_service_ref()
- service_ref = db.service_get(context, self.service_id)
+ self._create_service_ref(ctxt)
+ service_ref = db.service_get(ctxt, self.service_id)
- db.service_update(context,
+ db.service_update(ctxt,
self.service_id,
{'report_count': service_ref['report_count'] + 1})
diff --git a/nova/tests/access_unittest.py b/nova/tests/access_unittest.py
index 4b40ffd0a..8167259c4 100644
--- a/nova/tests/access_unittest.py
+++ b/nova/tests/access_unittest.py
@@ -20,6 +20,7 @@ import unittest
import logging
import webob
+from nova import context
from nova import exception
from nova import flags
from nova import test
@@ -35,44 +36,25 @@ class AccessTestCase(test.TrialTestCase):
def setUp(self):
super(AccessTestCase, self).setUp()
um = manager.AuthManager()
+ self.context = context.get_admin_context()
# Make test users
- try:
- self.testadmin = um.create_user('testadmin')
- except Exception, err:
- logging.error(str(err))
- try:
- self.testpmsys = um.create_user('testpmsys')
- except: pass
- try:
- self.testnet = um.create_user('testnet')
- except: pass
- try:
- self.testsys = um.create_user('testsys')
- except: pass
+ self.testadmin = um.create_user('testadmin')
+ self.testpmsys = um.create_user('testpmsys')
+ self.testnet = um.create_user('testnet')
+ self.testsys = um.create_user('testsys')
# Assign some rules
- try:
- um.add_role('testadmin', 'cloudadmin')
- except: pass
- try:
- um.add_role('testpmsys', 'sysadmin')
- except: pass
- try:
- um.add_role('testnet', 'netadmin')
- except: pass
- try:
- um.add_role('testsys', 'sysadmin')
- except: pass
+ um.add_role('testadmin', 'cloudadmin')
+ um.add_role('testpmsys', 'sysadmin')
+ um.add_role('testnet', 'netadmin')
+ um.add_role('testsys', 'sysadmin')
# Make a test project
- try:
- self.project = um.create_project('testproj', 'testpmsys', 'a test project', ['testpmsys', 'testnet', 'testsys'])
- except: pass
- try:
- self.project.add_role(self.testnet, 'netadmin')
- except: pass
- try:
- self.project.add_role(self.testsys, 'sysadmin')
- except: pass
+ self.project = um.create_project('testproj',
+ 'testpmsys',
+ 'a test project',
+ ['testpmsys', 'testnet', 'testsys'])
+ self.project.add_role(self.testnet, 'netadmin')
+ self.project.add_role(self.testsys, 'sysadmin')
#user is set in each test
def noopWSGIApp(environ, start_response):
start_response('200 OK', [])
@@ -97,10 +79,8 @@ class AccessTestCase(test.TrialTestCase):
super(AccessTestCase, self).tearDown()
def response_status(self, user, methodName):
- context = Context()
- context.project = self.project
- context.user = user
- environ = {'ec2.context' : context,
+ ctxt = context.RequestContext(user, self.project)
+ environ = {'ec2.context' : ctxt,
'ec2.controller': 'some string',
'ec2.action': methodName}
req = webob.Request.blank('/', environ)
diff --git a/nova/tests/api_unittest.py b/nova/tests/api_unittest.py
index c040cdad3..7c745ca41 100644
--- a/nova/tests/api_unittest.py
+++ b/nova/tests/api_unittest.py
@@ -25,6 +25,7 @@ import random
import StringIO
import webob
+from nova import context
from nova import flags
from nova import test
from nova import api
@@ -131,7 +132,7 @@ class ApiEc2TestCase(test.BaseTestCase):
user = self.manager.create_user('fake', 'fake', 'fake')
project = self.manager.create_project('fake', 'fake', 'fake')
# NOTE(vish): create depends on pool, so call helper directly
- cloud._gen_key(None, user.id, keyname)
+ cloud._gen_key(context.get_admin_context(), user.id, keyname)
rv = self.ec2.get_all_key_pairs()
results = [k for k in rv if k.name == keyname]
diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py
index ae7dea1db..8125f4a78 100644
--- a/nova/tests/cloud_unittest.py
+++ b/nova/tests/cloud_unittest.py
@@ -29,6 +29,7 @@ from twisted.internet import defer
import unittest
from xml.etree import ElementTree
+from nova import context
from nova import crypto
from nova import db
from nova import flags
@@ -37,7 +38,6 @@ from nova import test
from nova import utils
from nova.auth import manager
from nova.compute import power_state
-from nova.api.ec2 import context
from nova.api.ec2 import cloud
from nova.objectstore import image
@@ -72,7 +72,7 @@ class CloudTestCase(test.TrialTestCase):
self.manager = manager.AuthManager()
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
self.project = self.manager.create_project('proj', 'admin', 'proj')
- self.context = context.APIRequestContext(user=self.user,
+ self.context = context.RequestContext(user=self.user,
project=self.project)
def tearDown(self):
@@ -235,33 +235,33 @@ class CloudTestCase(test.TrialTestCase):
self.assertEqual('', img.metadata['description'])
def test_update_of_instance_display_fields(self):
- inst = db.instance_create({}, {})
+ inst = db.instance_create(self.context, {})
self.cloud.update_instance(self.context, inst['ec2_id'],
display_name='c00l 1m4g3')
- inst = db.instance_get({}, inst['id'])
+ inst = db.instance_get(self.context, inst['id'])
self.assertEqual('c00l 1m4g3', inst['display_name'])
- db.instance_destroy({}, inst['id'])
+ db.instance_destroy(self.context, inst['id'])
def test_update_of_instance_wont_update_private_fields(self):
- inst = db.instance_create({}, {})
+ inst = db.instance_create(self.context, {})
self.cloud.update_instance(self.context, inst['id'],
mac_address='DE:AD:BE:EF')
- inst = db.instance_get({}, inst['id'])
+ inst = db.instance_get(self.context, inst['id'])
self.assertEqual(None, inst['mac_address'])
- db.instance_destroy({}, inst['id'])
+ db.instance_destroy(self.context, inst['id'])
def test_update_of_volume_display_fields(self):
- vol = db.volume_create({}, {})
+ vol = db.volume_create(self.context, {})
self.cloud.update_volume(self.context, vol['id'],
display_name='c00l v0lum3')
- vol = db.volume_get({}, vol['id'])
+ vol = db.volume_get(self.context, vol['id'])
self.assertEqual('c00l v0lum3', vol['display_name'])
- db.volume_destroy({}, vol['id'])
+ db.volume_destroy(self.context, vol['id'])
def test_update_of_volume_wont_update_private_fields(self):
- vol = db.volume_create({}, {})
+ vol = db.volume_create(self.context, {})
self.cloud.update_volume(self.context, vol['id'],
mountpoint='/not/here')
- vol = db.volume_get({}, vol['id'])
+ vol = db.volume_get(self.context, vol['id'])
self.assertEqual(None, vol['mountpoint'])
- db.volume_destroy({}, vol['id'])
+ db.volume_destroy(self.context, vol['id'])
diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py
index 1e2bb113b..ec24b2537 100644
--- a/nova/tests/compute_unittest.py
+++ b/nova/tests/compute_unittest.py
@@ -24,13 +24,13 @@ import logging
from twisted.internet import defer
+from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import test
from nova import utils
from nova.auth import manager
-from nova.api import context
FLAGS = flags.FLAGS
@@ -45,7 +45,7 @@ class ComputeTestCase(test.TrialTestCase):
self.manager = manager.AuthManager()
self.user = self.manager.create_user('fake', 'fake', 'fake')
self.project = self.manager.create_project('fake', 'fake', 'fake')
- self.context = None
+ self.context = context.get_admin_context()
def tearDown(self): # pylint: disable-msg=C0103
self.manager.delete_user(self.user)
@@ -72,13 +72,13 @@ class ComputeTestCase(test.TrialTestCase):
yield self.compute.run_instance(self.context, instance_id)
- instances = db.instance_get_all(None)
+ instances = db.instance_get_all(context.get_admin_context())
logging.info("Running instances: %s", instances)
self.assertEqual(len(instances), 1)
yield self.compute.terminate_instance(self.context, instance_id)
- instances = db.instance_get_all(None)
+ instances = db.instance_get_all(context.get_admin_context())
logging.info("After terminating instances: %s", instances)
self.assertEqual(len(instances), 0)
@@ -96,8 +96,7 @@ class ComputeTestCase(test.TrialTestCase):
self.assertEqual(instance_ref['deleted_at'], None)
terminate = datetime.datetime.utcnow()
yield self.compute.terminate_instance(self.context, instance_id)
- self.context = context.get_admin_context(user=self.user,
- read_deleted=True)
+ self.context = self.context.admin(True)
instance_ref = db.instance_get(self.context, instance_id)
self.assert_(instance_ref['launched_at'] < terminate)
self.assert_(instance_ref['deleted_at'] > terminate)
diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py
index 5370966d2..85fc2e2a1 100644
--- a/nova/tests/network_unittest.py
+++ b/nova/tests/network_unittest.py
@@ -22,13 +22,13 @@ import IPy
import os
import logging
+from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import test
from nova import utils
from nova.auth import manager
-from nova.api.ec2 import context
FLAGS = flags.FLAGS
@@ -49,16 +49,14 @@ class NetworkTestCase(test.TrialTestCase):
self.user = self.manager.create_user('netuser', 'netuser', 'netuser')
self.projects = []
self.network = utils.import_object(FLAGS.network_manager)
- self.context = context.APIRequestContext(project=None, user=self.user)
+ self.context = context.RequestContext(project=None, user=self.user)
for i in range(5):
name = 'project%s' % i
self.projects.append(self.manager.create_project(name,
'netuser',
name))
# create the necessary network data for the project
- user_context = context.APIRequestContext(project=self.projects[i],
- user=self.user)
- self.network.set_network_host(user_context, self.projects[i].id)
+ self.network.set_network_host(context.get_admin_context(), self.projects[i].id)
instance_ref = self._create_instance(0)
self.instance_id = instance_ref['id']
instance_ref = self._create_instance(1)
@@ -68,8 +66,8 @@ class NetworkTestCase(test.TrialTestCase):
super(NetworkTestCase, self).tearDown()
# TODO(termie): this should really be instantiating clean datastores
# in between runs, one failure kills all the tests
- db.instance_destroy(None, self.instance_id)
- db.instance_destroy(None, self.instance2_id)
+ db.instance_destroy(context.get_admin_context(), self.instance_id)
+ db.instance_destroy(context.get_admin_context(), self.instance2_id)
for project in self.projects:
self.manager.delete_project(project)
self.manager.delete_user(self.user)
@@ -78,7 +76,7 @@ class NetworkTestCase(test.TrialTestCase):
if not mac:
mac = utils.generate_mac()
project = self.projects[project_num]
- self.context.project = project
+ self.context._project = project
return db.instance_create(self.context,
{'project_id': project.id,
'mac_address': mac})
@@ -87,34 +85,34 @@ class NetworkTestCase(test.TrialTestCase):
"""Create an address in given project num"""
if instance_id is None:
instance_id = self.instance_id
- self.context.project = self.projects[project_num]
+ self.context._project = self.projects[project_num]
return self.network.allocate_fixed_ip(self.context, instance_id)
def _deallocate_address(self, project_num, address):
- self.context.project = self.projects[project_num]
+ self.context._project = self.projects[project_num]
self.network.deallocate_fixed_ip(self.context, address)
def test_public_network_association(self):
"""Makes sure that we can allocaate a public ip"""
# TODO(vish): better way of adding floating ips
- self.context.project = self.projects[0]
+ self.context._project = self.projects[0]
pubnet = IPy.IP(flags.FLAGS.public_range)
address = str(pubnet[0])
try:
- db.floating_ip_get_by_address(None, address)
+ db.floating_ip_get_by_address(context.get_admin_context(), address)
except exception.NotFound:
- db.floating_ip_create(None, {'address': address,
+ db.floating_ip_create(context.get_admin_context(), {'address': address,
'host': FLAGS.host})
float_addr = self.network.allocate_floating_ip(self.context,
self.projects[0].id)
fix_addr = self._create_address(0)
self.assertEqual(float_addr, str(pubnet[0]))
self.network.associate_floating_ip(self.context, float_addr, fix_addr)
- address = db.instance_get_floating_address(None, self.instance_id)
+ address = db.instance_get_floating_address(context.get_admin_context(), self.instance_id)
self.assertEqual(address, float_addr)
self.network.disassociate_floating_ip(self.context, float_addr)
- address = db.instance_get_floating_address(None, self.instance_id)
+ address = db.instance_get_floating_address(context.get_admin_context(), self.instance_id)
self.assertEqual(address, None)
self.network.deallocate_floating_ip(self.context, float_addr)
self.network.deallocate_fixed_ip(self.context, fix_addr)
@@ -175,7 +173,7 @@ class NetworkTestCase(test.TrialTestCase):
lease_ip(address)
lease_ip(address2)
lease_ip(address3)
- self.context.project = self.projects[i]
+ self.context._project = self.projects[i]
self.assertFalse(is_allocated_in_project(address,
self.projects[0].id))
self.assertFalse(is_allocated_in_project(address2,
@@ -189,7 +187,7 @@ class NetworkTestCase(test.TrialTestCase):
release_ip(address2)
release_ip(address3)
for instance_id in instance_ids:
- db.instance_destroy(None, instance_id)
+ db.instance_destroy(context.get_admin_context(), instance_id)
release_ip(first)
self._deallocate_address(0, first)
@@ -203,7 +201,7 @@ class NetworkTestCase(test.TrialTestCase):
def test_too_many_networks(self):
"""Ensure error is raised if we run out of networks"""
projects = []
- networks_left = FLAGS.num_networks - db.network_count(None)
+ networks_left = FLAGS.num_networks - db.network_count(context.get_admin_context())
for i in range(networks_left):
project = self.manager.create_project('many%s' % i, self.user)
projects.append(project)
@@ -236,18 +234,18 @@ class NetworkTestCase(test.TrialTestCase):
There are ips reserved at the bottom and top of the range.
services (network, gateway, CloudPipe, broadcast)
"""
- network = db.project_get_network(None, self.projects[0].id)
+ network = db.project_get_network(context.get_admin_context(), self.projects[0].id)
net_size = flags.FLAGS.network_size
- total_ips = (db.network_count_available_ips(None, network['id']) +
- db.network_count_reserved_ips(None, network['id']) +
- db.network_count_allocated_ips(None, network['id']))
+ total_ips = (db.network_count_available_ips(context.get_admin_context(), network['id']) +
+ db.network_count_reserved_ips(context.get_admin_context(), network['id']) +
+ db.network_count_allocated_ips(context.get_admin_context(), network['id']))
self.assertEqual(total_ips, net_size)
def test_too_many_addresses(self):
"""Test for a NoMoreAddresses exception when all fixed ips are used.
"""
- network = db.project_get_network(None, self.projects[0].id)
- num_available_ips = db.network_count_available_ips(None,
+ network = db.project_get_network(context.get_admin_context(), self.projects[0].id)
+ num_available_ips = db.network_count_available_ips(context.get_admin_context(),
network['id'])
addresses = []
instance_ids = []
@@ -258,7 +256,7 @@ class NetworkTestCase(test.TrialTestCase):
addresses.append(address)
lease_ip(address)
- self.assertEqual(db.network_count_available_ips(None,
+ self.assertEqual(db.network_count_available_ips(context.get_admin_context(),
network['id']), 0)
self.assertRaises(db.NoMoreAddresses,
self.network.allocate_fixed_ip,
@@ -268,17 +266,17 @@ class NetworkTestCase(test.TrialTestCase):
for i in range(num_available_ips):
self.network.deallocate_fixed_ip(self.context, addresses[i])
release_ip(addresses[i])
- db.instance_destroy(None, instance_ids[i])
- self.assertEqual(db.network_count_available_ips(None,
+ db.instance_destroy(context.get_admin_context(), instance_ids[i])
+ self.assertEqual(db.network_count_available_ips(context.get_admin_context(),
network['id']),
num_available_ips)
def is_allocated_in_project(address, project_id):
"""Returns true if address is in specified project"""
- project_net = db.project_get_network(None, project_id)
- network = db.fixed_ip_get_network(None, address)
- instance = db.fixed_ip_get_instance(None, address)
+ project_net = db.project_get_network(context.get_admin_context(), project_id)
+ network = db.fixed_ip_get_network(context.get_admin_context(), address)
+ instance = db.fixed_ip_get_instance(context.get_admin_context(), address)
# instance exists until release
return instance is not None and network['id'] == project_net['id']
@@ -290,8 +288,8 @@ def binpath(script):
def lease_ip(private_ip):
"""Run add command on dhcpbridge"""
- network_ref = db.fixed_ip_get_network(None, private_ip)
- instance_ref = db.fixed_ip_get_instance(None, private_ip)
+ network_ref = db.fixed_ip_get_network(context.get_admin_context(), private_ip)
+ instance_ref = db.fixed_ip_get_instance(context.get_admin_context(), private_ip)
cmd = "%s add %s %s fake" % (binpath('nova-dhcpbridge'),
instance_ref['mac_address'],
private_ip)
@@ -304,8 +302,8 @@ def lease_ip(private_ip):
def release_ip(private_ip):
"""Run del command on dhcpbridge"""
- network_ref = db.fixed_ip_get_network(None, private_ip)
- instance_ref = db.fixed_ip_get_instance(None, private_ip)
+ network_ref = db.fixed_ip_get_network(context.get_admin_context(), private_ip)
+ instance_ref = db.fixed_ip_get_instance(context.get_admin_context(), private_ip)
cmd = "%s del %s %s fake" % (binpath('nova-dhcpbridge'),
instance_ref['mac_address'],
private_ip)
diff --git a/nova/tests/quota_unittest.py b/nova/tests/quota_unittest.py
index 370ccd506..72e44bf52 100644
--- a/nova/tests/quota_unittest.py
+++ b/nova/tests/quota_unittest.py
@@ -18,6 +18,7 @@
import logging
+from nova import context
from nova import db
from nova import exception
from nova import flags
@@ -26,7 +27,6 @@ from nova import test
from nova import utils
from nova.auth import manager
from nova.api.ec2 import cloud
-from nova.api.ec2 import context
FLAGS = flags.FLAGS
@@ -48,8 +48,8 @@ class QuotaTestCase(test.TrialTestCase):
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
self.project = self.manager.create_project('admin', 'admin', 'admin')
self.network = utils.import_object(FLAGS.network_manager)
- self.context = context.APIRequestContext(project=self.project,
- user=self.user)
+ self.context = context.RequestContext(project=self.project,
+ user=self.user)
def tearDown(self): # pylint: disable-msg=C0103
manager.AuthManager().delete_project(self.project)
@@ -94,7 +94,7 @@ class QuotaTestCase(test.TrialTestCase):
for i in range(FLAGS.quota_instances):
instance_id = self._create_instance()
instance_ids.append(instance_id)
- self.assertRaises(cloud.QuotaError, self.cloud.run_instances,
+ self.assertRaises(cloud.QuotaError, self.cloud.run_instances,
self.context,
min_count=1,
max_count=1,
@@ -106,7 +106,7 @@ class QuotaTestCase(test.TrialTestCase):
instance_ids = []
instance_id = self._create_instance(cores=4)
instance_ids.append(instance_id)
- self.assertRaises(cloud.QuotaError, self.cloud.run_instances,
+ self.assertRaises(cloud.QuotaError, self.cloud.run_instances,
self.context,
min_count=1,
max_count=1,
@@ -139,9 +139,9 @@ class QuotaTestCase(test.TrialTestCase):
def test_too_many_addresses(self):
address = '192.168.0.100'
try:
- db.floating_ip_get_by_address(None, address)
+ db.floating_ip_get_by_address(context.get_admin_context(), address)
except exception.NotFound:
- db.floating_ip_create(None, {'address': address,
+ db.floating_ip_create(context.get_admin_context(), {'address': address,
'host': FLAGS.host})
float_addr = self.network.allocate_floating_ip(self.context,
self.project.id)
diff --git a/nova/tests/rpc_unittest.py b/nova/tests/rpc_unittest.py
index 9652841f2..5d2bb1046 100644
--- a/nova/tests/rpc_unittest.py
+++ b/nova/tests/rpc_unittest.py
@@ -22,6 +22,7 @@ import logging
from twisted.internet import defer
+from nova import context
from nova import flags
from nova import rpc
from nova import test
@@ -40,14 +41,24 @@ class RpcTestCase(test.TrialTestCase):
topic='test',
proxy=self.receiver)
self.consumer.attach_to_twisted()
+ self.context= context.get_admin_context()
def test_call_succeed(self):
"""Get a value through rpc call"""
value = 42
- result = yield rpc.call_twisted('test', {"method": "echo",
+ result = yield rpc.call_twisted(self.context,
+ 'test', {"method": "echo",
"args": {"value": value}})
self.assertEqual(value, result)
+ def test_context_passed(self):
+ """Makes sure a context is passed through rpc call"""
+ value = 42
+ result = yield rpc.call_twisted(self.context,
+ 'test', {"method": "context",
+ "args": {"value": value}})
+ self.assertEqual(self.context.to_dict(), result)
+
def test_call_exception(self):
"""Test that exception gets passed back properly
@@ -56,11 +67,13 @@ class RpcTestCase(test.TrialTestCase):
to an int in the test.
"""
value = 42
- self.assertFailure(rpc.call_twisted('test', {"method": "fail",
+ self.assertFailure(rpc.call_twisted(self.context,
+ 'test', {"method": "fail",
"args": {"value": value}}),
rpc.RemoteError)
try:
- yield rpc.call_twisted('test', {"method": "fail",
+ yield rpc.call_twisted(self.context,
+ 'test', {"method": "fail",
"args": {"value": value}})
self.fail("should have thrown rpc.RemoteError")
except rpc.RemoteError as exc:
@@ -73,12 +86,19 @@ class TestReceiver(object):
Uses static methods because we aren't actually storing any state"""
@staticmethod
- def echo(value):
+ def echo(context, value):
"""Simply returns whatever value is sent in"""
logging.debug("Received %s", value)
return defer.succeed(value)
@staticmethod
- def fail(value):
+ def context(context, value):
+ """Returns dictionary version of context"""
+ logging.debug("Received %s", context)
+ return defer.succeed(context.to_dict())
+
+ @staticmethod
+ def fail(context, value):
"""Raises an exception with the value sent in"""
raise Exception(value)
+
diff --git a/nova/tests/scheduler_unittest.py b/nova/tests/scheduler_unittest.py
index fde30f81e..027b62987 100644
--- a/nova/tests/scheduler_unittest.py
+++ b/nova/tests/scheduler_unittest.py
@@ -19,6 +19,7 @@
Tests For Scheduler
"""
+from nova import context
from nova import db
from nova import flags
from nova import service
@@ -50,22 +51,24 @@ class SchedulerTestCase(test.TrialTestCase):
def test_fallback(self):
scheduler = manager.SchedulerManager()
self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
- rpc.cast('topic.fallback_host',
+ ctxt = context.get_admin_context()
+ rpc.cast(ctxt,
+ 'topic.fallback_host',
{'method': 'noexist',
- 'args': {'context': None,
- 'num': 7}})
+ 'args': {'num': 7}})
self.mox.ReplayAll()
- scheduler.noexist(None, 'topic', num=7)
+ scheduler.noexist(ctxt, 'topic', num=7)
def test_named_method(self):
scheduler = manager.SchedulerManager()
self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
- rpc.cast('topic.named_host',
+ ctxt = context.get_admin_context()
+ rpc.cast(ctxt,
+ 'topic.named_host',
{'method': 'named_method',
- 'args': {'context': None,
- 'num': 7}})
+ 'args': {'num': 7}})
self.mox.ReplayAll()
- scheduler.named_method(None, 'topic', num=7)
+ scheduler.named_method(ctxt, 'topic', num=7)
class SimpleDriverTestCase(test.TrialTestCase):
@@ -78,11 +81,10 @@ class SimpleDriverTestCase(test.TrialTestCase):
volume_driver='nova.volume.driver.FakeAOEDriver',
scheduler_driver='nova.scheduler.simple.SimpleScheduler')
self.scheduler = manager.SchedulerManager()
- self.context = None
self.manager = auth_manager.AuthManager()
self.user = self.manager.create_user('fake', 'fake', 'fake')
self.project = self.manager.create_project('fake', 'fake', 'fake')
- self.context = None
+ self.context = context.get_admin_context()
def tearDown(self): # pylint: disable-msg=C0103
self.manager.delete_user(self.user)
diff --git a/nova/tests/service_unittest.py b/nova/tests/service_unittest.py
index 06f80e82c..f8da7a871 100644
--- a/nova/tests/service_unittest.py
+++ b/nova/tests/service_unittest.py
@@ -22,6 +22,7 @@ Unit Tests for remote procedure calls using queue
import mox
+from nova import context
from nova import exception
from nova import flags
from nova import rpc
@@ -45,6 +46,7 @@ class ServiceTestCase(test.BaseTestCase):
def setUp(self): # pylint: disable=C0103
super(ServiceTestCase, self).setUp()
self.mox.StubOutWithMock(service, 'db')
+ self.context = context.get_admin_context()
def test_create(self):
host = 'foo'
@@ -88,10 +90,10 @@ class ServiceTestCase(test.BaseTestCase):
'report_count': 0,
'id': 1}
- service.db.service_get_by_args(None,
+ service.db.service_get_by_args(mox.IgnoreArg(),
host,
binary).AndRaise(exception.NotFound())
- service.db.service_create(None,
+ service.db.service_create(mox.IgnoreArg(),
service_create).AndReturn(service_ref)
self.mox.ReplayAll()
@@ -110,10 +112,10 @@ class ServiceTestCase(test.BaseTestCase):
'report_count': 0,
'id': 1}
service.db.__getattr__('report_state')
- service.db.service_get_by_args(None,
+ service.db.service_get_by_args(self.context,
host,
binary).AndReturn(service_ref)
- service.db.service_update(None, service_ref['id'],
+ service.db.service_update(self.context, service_ref['id'],
mox.ContainsKeyValue('report_count', 1))
self.mox.ReplayAll()
@@ -132,13 +134,13 @@ class ServiceTestCase(test.BaseTestCase):
'id': 1}
service.db.__getattr__('report_state')
- service.db.service_get_by_args(None,
+ service.db.service_get_by_args(self.context,
host,
binary).AndRaise(exception.NotFound())
- service.db.service_create(None,
+ service.db.service_create(self.context,
service_create).AndReturn(service_ref)
- service.db.service_get(None, service_ref['id']).AndReturn(service_ref)
- service.db.service_update(None, service_ref['id'],
+ service.db.service_get(self.context, service_ref['id']).AndReturn(service_ref)
+ service.db.service_update(self.context, service_ref['id'],
mox.ContainsKeyValue('report_count', 1))
self.mox.ReplayAll()
@@ -154,7 +156,7 @@ class ServiceTestCase(test.BaseTestCase):
'id': 1}
service.db.__getattr__('report_state')
- service.db.service_get_by_args(None,
+ service.db.service_get_by_args(self.context,
host,
binary).AndRaise(Exception())
@@ -173,10 +175,10 @@ class ServiceTestCase(test.BaseTestCase):
'id': 1}
service.db.__getattr__('report_state')
- service.db.service_get_by_args(None,
+ service.db.service_get_by_args(self.context,
host,
binary).AndReturn(service_ref)
- service.db.service_update(None, service_ref['id'],
+ service.db.service_update(self.context, service_ref['id'],
mox.ContainsKeyValue('report_count', 1))
self.mox.ReplayAll()
diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py
index 1d665b502..8e2fa11c1 100644
--- a/nova/tests/volume_unittest.py
+++ b/nova/tests/volume_unittest.py
@@ -22,6 +22,7 @@ import logging
from twisted.internet import defer
+from nova import context
from nova import exception
from nova import db
from nova import flags
@@ -39,7 +40,7 @@ class VolumeTestCase(test.TrialTestCase):
self.compute = utils.import_object(FLAGS.compute_manager)
self.flags(connection_type='fake')
self.volume = utils.import_object(FLAGS.volume_manager)
- self.context = None
+ self.context = context.get_admin_context()
@staticmethod
def _create_volume(size='0'):
@@ -51,19 +52,19 @@ class VolumeTestCase(test.TrialTestCase):
vol['availability_zone'] = FLAGS.storage_availability_zone
vol['status'] = "creating"
vol['attach_status'] = "detached"
- return db.volume_create(None, vol)['id']
+ return db.volume_create(context.get_admin_context(), vol)['id']
@defer.inlineCallbacks
def test_create_delete_volume(self):
"""Test volume can be created and deleted"""
volume_id = self._create_volume()
yield self.volume.create_volume(self.context, volume_id)
- self.assertEqual(volume_id, db.volume_get(None, volume_id).id)
+ self.assertEqual(volume_id, db.volume_get(context.get_admin_context(), volume_id).id)
yield self.volume.delete_volume(self.context, volume_id)
self.assertRaises(exception.NotFound,
db.volume_get,
- None,
+ self.context,
volume_id)
@defer.inlineCallbacks
@@ -92,7 +93,7 @@ class VolumeTestCase(test.TrialTestCase):
self.assertFailure(self.volume.create_volume(self.context,
volume_id),
db.NoMoreBlades)
- db.volume_destroy(None, volume_id)
+ db.volume_destroy(context.get_admin_context(), volume_id)
for volume_id in vols:
yield self.volume.delete_volume(self.context, volume_id)
@@ -113,12 +114,13 @@ class VolumeTestCase(test.TrialTestCase):
volume_id = self._create_volume()
yield self.volume.create_volume(self.context, volume_id)
if FLAGS.fake_tests:
- db.volume_attached(None, volume_id, instance_id, mountpoint)
+ db.volume_attached(self.context, volume_id, instance_id, mountpoint)
else:
- yield self.compute.attach_volume(instance_id,
+ yield self.compute.attach_volume(self.context,
+ instance_id,
volume_id,
mountpoint)
- vol = db.volume_get(None, volume_id)
+ vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
self.assertEqual(vol['mountpoint'], mountpoint)
@@ -128,17 +130,18 @@ class VolumeTestCase(test.TrialTestCase):
self.assertFailure(self.volume.delete_volume(self.context, volume_id),
exception.Error)
if FLAGS.fake_tests:
- db.volume_detached(None, volume_id)
+ db.volume_detached(self.context, volume_id)
else:
- yield self.compute.detach_volume(instance_id,
+ yield self.compute.detach_volume(self.context,
+ instance_id,
volume_id)
- vol = db.volume_get(None, volume_id)
+ vol = db.volume_get(self.context, volume_id)
self.assertEqual(vol['status'], "available")
yield self.volume.delete_volume(self.context, volume_id)
self.assertRaises(exception.Error,
db.volume_get,
- None,
+ self.context,
volume_id)
db.instance_destroy(self.context, instance_id)
@@ -151,7 +154,7 @@ class VolumeTestCase(test.TrialTestCase):
def _check(volume_id):
"""Make sure blades aren't duplicated"""
volume_ids.append(volume_id)
- (shelf_id, blade_id) = db.volume_get_shelf_and_blade(None,
+ (shelf_id, blade_id) = db.volume_get_shelf_and_blade(context.get_admin_context(),
volume_id)
shelf_blade = '%s.%s' % (shelf_id, blade_id)
self.assert_(shelf_blade not in shelf_blades)
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index d868e083c..4976b4a3b 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -28,6 +28,7 @@ import shutil
from twisted.internet import defer
from twisted.internet import task
+from nova import context
from nova import db
from nova import exception
from nova import flags
@@ -140,12 +141,13 @@ class LibvirtConnection(object):
def _wait_for_shutdown():
try:
state = self.get_info(instance['name'])['state']
- db.instance_set_state(None, instance['id'], state)
+ db.instance_set_state(context.get_admin_context(),
+ instance['id'], state)
if state == power_state.SHUTDOWN:
timer.stop()
d.callback(None)
except Exception:
- db.instance_set_state(None,
+ db.instance_set_state(context.get_admin_context(),
instance['id'],
power_state.SHUTDOWN)
timer.stop()
@@ -190,14 +192,15 @@ class LibvirtConnection(object):
def _wait_for_reboot():
try:
state = self.get_info(instance['name'])['state']
- db.instance_set_state(None, instance['id'], state)
+ db.instance_set_state(context.get_admin_context(),
+ instance['id'], state)
if state == power_state.RUNNING:
logging.debug('instance %s: rebooted', instance['name'])
timer.stop()
d.callback(None)
except Exception, exn:
logging.error('_wait_for_reboot failed: %s', exn)
- db.instance_set_state(None,
+ db.instance_set_state(context.get_admin_context(),
instance['id'],
power_state.SHUTDOWN)
timer.stop()
@@ -210,7 +213,7 @@ class LibvirtConnection(object):
@exception.wrap_exception
def spawn(self, instance):
xml = self.to_xml(instance)
- db.instance_set_state(None,
+ db.instance_set_state(context.get_admin_context(),
instance['id'],
power_state.NOSTATE,
'launching')
@@ -225,7 +228,8 @@ class LibvirtConnection(object):
def _wait_for_boot():
try:
state = self.get_info(instance['name'])['state']
- db.instance_set_state(None, instance['id'], state)
+ db.instance_set_state(context.get_admin_context(),
+ instance['id'], state)
if state == power_state.RUNNING:
logging.debug('instance %s: booted', instance['name'])
timer.stop()
@@ -233,7 +237,7 @@ class LibvirtConnection(object):
except:
logging.exception('instance %s: failed to boot',
instance['name'])
- db.instance_set_state(None,
+ db.instance_set_state(context.get_admin_context(),
instance['id'],
power_state.SHUTDOWN)
timer.stop()
@@ -280,9 +284,11 @@ class LibvirtConnection(object):
key = str(inst['key_data'])
net = None
- network_ref = db.project_get_network(None, project.id)
+ network_ref = db.project_get_network(context.get_admin_context(),
+ project.id)
if network_ref['injected']:
- address = db.instance_get_fixed_address(None, inst['id'])
+ address = db.instance_get_fixed_address(context.get_admin_context(),
+ inst['id'])
with open(FLAGS.injected_network_template) as f:
net = f.read() % {'address': address,
'network': network_ref['network'],
@@ -314,7 +320,8 @@ class LibvirtConnection(object):
def to_xml(self, instance):
# TODO(termie): cache?
logging.debug('instance %s: starting toXML method', instance['name'])
- network = db.project_get_network(None, instance['project_id'])
+ network = db.project_get_network(context.get_admin_context(),
+ instance['project_id'])
# FIXME(vish): stick this in db
instance_type = instance_types.INSTANCE_TYPES[instance['instance_type']]
xml_info = {'type': FLAGS.libvirt_type,