summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Day <eday@oddments.org>2010-12-08 17:24:40 +0000
committerTarmac <>2010-12-08 17:24:40 +0000
commit67cbebdfcb57d4b81be65e54e4ed4a2201d4efa4 (patch)
treec84a7aeeead2ecf5d65b5a7feac159010f1810a2
parent3bb38e64ed5144db00d77488243b6b70023a3c44 (diff)
parent90ddeb5a13fe8d1bea3d8a568bed011de72bfbf2 (diff)
downloadnova-67cbebdfcb57d4b81be65e54e4ed4a2201d4efa4.tar.gz
nova-67cbebdfcb57d4b81be65e54e4ed4a2201d4efa4.tar.xz
nova-67cbebdfcb57d4b81be65e54e4ed4a2201d4efa4.zip
Pushed terminate instance and network manager/topic methods into network.compute.api.
-rw-r--r--nova/api/ec2/cloud.py65
-rw-r--r--nova/api/openstack/servers.py29
-rw-r--r--nova/compute/api.py82
-rw-r--r--nova/tests/api/openstack/test_servers.py13
4 files changed, 100 insertions, 89 deletions
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 161d2d038..7978e08a0 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -94,7 +94,7 @@ class CloudController(object):
"""
def __init__(self):
self.network_manager = utils.import_object(FLAGS.network_manager)
- self.compute_api = compute_api.ComputeAPI()
+ self.compute_api = compute_api.ComputeAPI(self.network_manager)
self.image_service = S3ImageService()
self.setup()
@@ -752,7 +752,6 @@ class CloudController(object):
instance_types.get_by_type(kwargs.get('instance_type', None)),
self.image_service,
kwargs['image_id'],
- self._get_network_topic(context),
min_count=int(kwargs.get('min_count', max_count)),
max_count=max_count,
kernel_id=kwargs.get('kernel_id'),
@@ -768,65 +767,11 @@ class CloudController(object):
def terminate_instances(self, context, instance_id, **kwargs):
"""Terminate each instance in instance_id, which is a list of ec2 ids.
-
- instance_id is a kwarg so its name cannot be modified.
- """
- ec2_id_list = instance_id
+ instance_id is a kwarg so its name cannot be modified."""
logging.debug("Going to start terminating instances")
- for id_str in ec2_id_list:
- internal_id = ec2_id_to_internal_id(id_str)
- logging.debug("Going to try and terminate %s" % id_str)
- try:
- instance_ref = db.instance_get_by_internal_id(context,
- internal_id)
- except exception.NotFound:
- logging.warning("Instance %s was not found during terminate",
- id_str)
- continue
-
- if (instance_ref['state_description'] == 'terminating'):
- logging.warning("Instance %s is already being terminated",
- id_str)
- continue
- now = datetime.datetime.utcnow()
- self.compute_api.update_instance(context,
- instance_ref['id'],
- state_description='terminating',
- state=0,
- terminated_at=now)
-
- # FIXME(ja): where should network deallocate occur?
- address = db.instance_get_floating_address(context,
- instance_ref['id'])
- if address:
- logging.debug("Disassociating address %s" % address)
- # NOTE(vish): Right now we don't really care if the ip is
- # disassociated. We may need to worry about
- # checking this later. Perhaps in the scheduler?
- network_topic = self._get_network_topic(context)
- rpc.cast(context,
- network_topic,
- {"method": "disassociate_floating_ip",
- "args": {"floating_address": address}})
-
- address = db.instance_get_fixed_address(context,
- instance_ref['id'])
- if address:
- logging.debug("Deallocating address %s" % address)
- # NOTE(vish): Currently, nothing needs to be done on the
- # network node until release. If this changes,
- # we will need to cast here.
- self.network_manager.deallocate_fixed_ip(context.elevated(),
- address)
-
- host = instance_ref['host']
- if host:
- rpc.cast(context,
- db.queue_get_for(context, FLAGS.compute_topic, host),
- {"method": "terminate_instance",
- "args": {"instance_id": instance_ref['id']}})
- else:
- db.instance_destroy(context, instance_ref['id'])
+ for ec2_id in instance_id:
+ internal_id = ec2_id_to_internal_id(ec2_id)
+ self.compute_api.delete_instance(context, internal_id)
return True
def reboot_instances(self, context, instance_id, **kwargs):
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 519250e60..e7ab17d03 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -18,11 +18,12 @@
import webob
from webob import exc
+from nova import context
+from nova import exception
from nova import flags
from nova import rpc
from nova import utils
from nova import wsgi
-from nova import context
from nova.api.openstack import faults
from nova.compute import api as compute_api
from nova.compute import instance_types
@@ -82,7 +83,6 @@ class Controller(wsgi.Controller):
if not db_driver:
db_driver = FLAGS.db_driver
self.db_driver = utils.import_object(db_driver)
- self.network_manager = utils.import_object(FLAGS.network_manager)
self.compute_api = compute_api.ComputeAPI()
super(Controller, self).__init__()
@@ -120,11 +120,11 @@ class Controller(wsgi.Controller):
""" Destroys a server """
user_id = req.environ['nova.context']['user']['id']
ctxt = context.RequestContext(user_id, user_id)
- instance = self.db_driver.instance_get_by_internal_id(ctxt, int(id))
- if instance and instance['user_id'] == user_id:
- self.db_driver.instance_destroy(ctxt, id)
- return faults.Fault(exc.HTTPAccepted())
- return faults.Fault(exc.HTTPNotFound())
+ try:
+ self.compute_api.delete_instance(ctxt, int(id))
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+ return exc.HTTPAccepted()
def create(self, req):
""" Creates a new server for a given user """
@@ -139,7 +139,6 @@ class Controller(wsgi.Controller):
instance_types.get_by_flavor_id(env['server']['flavorId']),
utils.import_object(FLAGS.image_service),
env['server']['imageId'],
- self._get_network_topic(ctxt),
name=env['server']['name'],
description=env['server']['name'],
key_name=key_pair['name'],
@@ -166,7 +165,7 @@ class Controller(wsgi.Controller):
if 'name' in inst_dict['server']:
update_dict['display_name'] = inst_dict['server']['name']
- self.compute_api.update_instance(ctxt, instance['id'], update_dict)
+ self.compute_api.update_instance(ctxt, instance['id'], **update_dict)
return exc.HTTPNoContent()
def action(self, req, id):
@@ -185,14 +184,4 @@ class Controller(wsgi.Controller):
# TODO(gundlach): pass reboot_type, support soft reboot in
# virt driver
self.compute_api.reboot(ctxt, id)
-
- def _get_network_topic(self, context):
- """Retrieves the network host for a project"""
- network_ref = self.network_manager.get_network(context)
- host = network_ref['host']
- if not host:
- host = rpc.call(context,
- FLAGS.network_topic,
- {"method": "set_network_host",
- "args": {"network_id": network_ref['id']}})
- return self.db_driver.queue_get_for(context, FLAGS.network_topic, host)
+ return exc.HTTPAccepted()
diff --git a/nova/compute/api.py b/nova/compute/api.py
index da01ca61a..457d6e27f 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -20,6 +20,7 @@
Handles all API requests relating to instances (guest vms).
"""
+import datetime
import logging
import time
@@ -43,17 +44,17 @@ def generate_default_hostname(internal_id):
class ComputeAPI(base.Base):
"""API for interacting with the compute manager."""
- def __init__(self, **kwargs):
- self.network_manager = utils.import_object(FLAGS.network_manager)
+ def __init__(self, network_manager=None, **kwargs):
+ if not network_manager:
+ network_manager = utils.import_object(FLAGS.network_manager)
+ self.network_manager = network_manager
super(ComputeAPI, self).__init__(**kwargs)
- # TODO(eday): network_topic arg should go away once we push network
- # allocation into the scheduler or compute worker.
def create_instances(self, context, instance_type, image_service, image_id,
- network_topic, min_count=1, max_count=1,
- kernel_id=None, ramdisk_id=None, name='',
- description='', user_data='', key_name=None,
- key_data=None, security_group='default',
+ min_count=1, max_count=1, kernel_id=None,
+ ramdisk_id=None, name='', description='',
+ user_data='', key_name=None, key_data=None,
+ security_group='default',
generate_hostname=generate_default_hostname):
"""Create the number of instances requested if quote and
other arguments check out ok."""
@@ -139,7 +140,7 @@ class ComputeAPI(base.Base):
instance_id,
is_vpn)
rpc.cast(elevated,
- network_topic,
+ self._get_network_topic(context),
{"method": "setup_fixed_ip",
"args": {"address": address}})
@@ -211,6 +212,58 @@ class ComputeAPI(base.Base):
"""
self.db.instance_update(context, instance_id, kwargs)
+ def delete_instance(self, context, instance_id):
+ logging.debug("Going to try and terminate %d" % instance_id)
+ try:
+ instance = self.db.instance_get_by_internal_id(context,
+ instance_id)
+ except exception.NotFound as e:
+ logging.warning("Instance %d was not found during terminate",
+ instance_id)
+ raise e
+
+ if (instance['state_description'] == 'terminating'):
+ logging.warning("Instance %d is already being terminated",
+ instance_id)
+ return
+
+ self.update_instance(context,
+ instance['id'],
+ state_description='terminating',
+ state=0,
+ terminated_at=datetime.datetime.utcnow())
+
+ # FIXME(ja): where should network deallocate occur?
+ address = self.db.instance_get_floating_address(context,
+ instance['id'])
+ if address:
+ logging.debug("Disassociating address %s" % address)
+ # NOTE(vish): Right now we don't really care if the ip is
+ # disassociated. We may need to worry about
+ # checking this later. Perhaps in the scheduler?
+ rpc.cast(context,
+ self._get_network_topic(context),
+ {"method": "disassociate_floating_ip",
+ "args": {"floating_address": address}})
+
+ address = self.db.instance_get_fixed_address(context, instance['id'])
+ if address:
+ logging.debug("Deallocating address %s" % address)
+ # NOTE(vish): Currently, nothing needs to be done on the
+ # network node until release. If this changes,
+ # we will need to cast here.
+ self.network_manager.deallocate_fixed_ip(context.elevated(),
+ address)
+
+ host = instance['host']
+ if host:
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "terminate_instance",
+ "args": {"instance_id": instance['id']}})
+ else:
+ self.db.instance_destroy(context, instance['id'])
+
def reboot(self, context, instance_id):
"""Reboot the given instance."""
instance = self.db.instance_get_by_internal_id(context, instance_id)
@@ -237,3 +290,14 @@ class ComputeAPI(base.Base):
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "unrescue_instance",
"args": {"instance_id": instance['id']}})
+
+ def _get_network_topic(self, context):
+ """Retrieves the network host for a project"""
+ network_ref = self.network_manager.get_network(context)
+ host = network_ref['host']
+ if not host:
+ host = rpc.call(context,
+ FLAGS.network_topic,
+ {"method": "set_network_host",
+ "args": {"network_id": network_ref['id']}})
+ return self.db.queue_get_for(context, FLAGS.network_topic, host)
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index 44ac8f342..46b9c5348 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -47,6 +47,14 @@ def return_security_group(context, instance_id, security_group_id):
pass
+def instance_update(context, instance_id, kwargs):
+ pass
+
+
+def instance_address(context, instance_id):
+ return None
+
+
def stub_instance(id, user_id=1):
return Instance(id=id + 123456, state=0, image_id=10, user_id=user_id,
display_name='server%s' % id, internal_id=id)
@@ -69,6 +77,11 @@ class ServersTest(unittest.TestCase):
return_servers)
self.stubs.Set(nova.db.api, 'instance_add_security_group',
return_security_group)
+ self.stubs.Set(nova.db.api, 'instance_update', instance_update)
+ self.stubs.Set(nova.db.api, 'instance_get_fixed_address',
+ instance_address)
+ self.stubs.Set(nova.db.api, 'instance_get_floating_address',
+ instance_address)
def tearDown(self):
self.stubs.UnsetAll()