summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVishvananda Ishaya <vishvananda@gmail.com>2010-12-20 21:04:54 +0000
committerVishvananda Ishaya <vishvananda@gmail.com>2010-12-20 21:04:54 +0000
commitde383023e4d5c30d3ad4474af104f6b659e1bd32 (patch)
treedb7861a11e84dc00cc9245a8b291ad40d7d2e2a7
parent800ecbd713c55d7410d6eb860a439cb87468e7ad (diff)
downloadnova-de383023e4d5c30d3ad4474af104f6b659e1bd32.tar.gz
nova-de383023e4d5c30d3ad4474af104f6b659e1bd32.tar.xz
nova-de383023e4d5c30d3ad4474af104f6b659e1bd32.zip
directly copy ip allocation into compute
-rw-r--r--nova/compute/api.py44
-rw-r--r--nova/compute/manager.py55
2 files changed, 53 insertions, 46 deletions
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 8e0efa4cc..2dae9cdbc 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -145,18 +145,6 @@ class ComputeAPI(base.Base):
instance = self.update_instance(context, instance_id, **updates)
instances.append(instance)
- # TODO(vish): This probably should be done in the scheduler
- # or in compute as a call. The network should be
- # allocated after the host is assigned and setup
- # can happen at the same time.
- address = self.network_manager.allocate_fixed_ip(context,
- instance_id,
- is_vpn)
- rpc.cast(elevated,
- self._get_network_topic(context),
- {"method": "setup_fixed_ip",
- "args": {"address": address}})
-
logging.debug("Casting to scheduler for %s/%s's instance %s",
context.project_id, context.user_id, instance_id)
rpc.cast(context,
@@ -219,28 +207,6 @@ class ComputeAPI(base.Base):
state=0,
terminated_at=datetime.datetime.utcnow())
- # FIXME(ja): where should network deallocate occur?
- address = self.db.instance_get_floating_address(context,
- instance['id'])
- if address:
- logging.debug("Disassociating address %s" % address)
- # NOTE(vish): Right now we don't really care if the ip is
- # disassociated. We may need to worry about
- # checking this later. Perhaps in the scheduler?
- rpc.cast(context,
- self._get_network_topic(context),
- {"method": "disassociate_floating_ip",
- "args": {"floating_address": address}})
-
- address = self.db.instance_get_fixed_address(context, instance['id'])
- if address:
- logging.debug("Deallocating address %s" % address)
- # NOTE(vish): Currently, nothing needs to be done on the
- # network node until release. If this changes,
- # we will need to cast here.
- self.network_manager.deallocate_fixed_ip(context.elevated(),
- address)
-
host = instance['host']
if host:
rpc.cast(context,
@@ -293,13 +259,3 @@ class ComputeAPI(base.Base):
{"method": "unrescue_instance",
"args": {"instance_id": instance['id']}})
- def _get_network_topic(self, context):
- """Retrieves the network host for a project"""
- network_ref = self.network_manager.get_network(context)
- host = network_ref['host']
- if not host:
- host = rpc.call(context,
- FLAGS.network_topic,
- {"method": "set_network_host",
- "args": {"network_id": network_ref['id']}})
- return self.db.queue_get_for(context, FLAGS.network_topic, host)
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 7eb60e262..6d705f983 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -40,6 +40,7 @@ import logging
from nova import exception
from nova import flags
from nova import manager
+from nova import rpc
from nova import utils
from nova.compute import power_state
@@ -76,6 +77,17 @@ class ComputeManager(manager.Manager):
state = power_state.NOSTATE
self.db.instance_set_state(context, instance_id, state)
+ def _get_network_topic(self, context):
+ """Retrieves the network host for a project"""
+ network_ref = self.network_manager.get_network(context)
+ host = network_ref['host']
+ if not host:
+ host = rpc.call(context,
+ FLAGS.network_topic,
+ {"method": "set_network_host",
+ "args": {"network_id": network_ref['id']}})
+ return self.db.queue_get_for(context, FLAGS.network_topic, host)
+
@exception.wrap_exception
def refresh_security_group(self, context, security_group_id, **_kwargs):
"""This call passes stright through to the virtualization driver."""
@@ -89,11 +101,26 @@ class ComputeManager(manager.Manager):
if instance_ref['name'] in self.driver.list_instances():
raise exception.Error("Instance has already been created")
logging.debug("instance %s: starting...", instance_id)
- self.network_manager.setup_compute_network(context, instance_id)
self.db.instance_update(context,
instance_id,
{'host': self.host})
+ self.db.instance_set_state(context,
+ instance_id,
+ power_state.NOSTATE,
+ 'networking')
+
+ is_vpn = instance_ref['image_id'] == FLAGS.vpn_image_id
+ address = self.network_manager.allocate_fixed_ip(context,
+ instance_id,
+ is_vpn)
+ rpc.cast(context,
+ self._get_network_topic(context),
+ {"method": "setup_fixed_ip",
+ "args": {"address": address}})
+
+ self.network_manager.setup_compute_network(context, instance_id)
+
# TODO(vish) check to make sure the availability zone matches
self.db.instance_set_state(context,
instance_id,
@@ -119,9 +146,33 @@ class ComputeManager(manager.Manager):
def terminate_instance(self, context, instance_id):
"""Terminate an instance on this machine."""
context = context.elevated()
- logging.debug("instance %s: terminating", instance_id)
instance_ref = self.db.instance_get(context, instance_id)
+
+ address = self.db.instance_get_floating_address(context,
+ instance_ref['id'])
+ if address:
+ logging.debug("Disassociating address %s" % address)
+ # NOTE(vish): Right now we don't really care if the ip is
+ # disassociated. We may need to worry about
+ # checking this later.
+ rpc.cast(context,
+ self._get_network_topic(context),
+ {"method": "disassociate_floating_ip",
+ "args": {"floating_address": address}})
+
+ address = self.db.instance_get_fixed_address(context,
+ instance_ref['id'])
+ if address:
+ logging.debug("Deallocating address %s" % address)
+ # NOTE(vish): Currently, nothing needs to be done on the
+ # network node until release. If this changes,
+ # we will need to cast here.
+ self.network_manager.deallocate_fixed_ip(context.elevated(),
+ address)
+
+ logging.debug("instance %s: terminating", instance_id)
+
volumes = instance_ref.get('volumes', []) or []
for volume in volumes:
self.detach_volume(context, instance_id, volume['id'])