summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--nova/compute/manager.py46
-rw-r--r--nova/db/sqlalchemy/models.py2
-rw-r--r--nova/network/api.py20
-rw-r--r--nova/network/manager.py154
-rw-r--r--nova/network/quantum/manager.py4
-rw-r--r--nova/tests/api/ec2/test_cloud.py3
-rw-r--r--nova/tests/test_compute.py8
-rw-r--r--nova/tests/test_network.py2
8 files changed, 204 insertions, 35 deletions
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 73fceb2f4..3d27d94cc 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -1162,6 +1162,10 @@ class ComputeManager(manager.SchedulerDependentManager):
self._notify_about_instance_usage(instance_ref,
"resize.confirm.start")
+ # NOTE(tr3buchet): tear down networks on source host
+ self.network_api.setup_networks_on_host(context, instance_ref,
+ migration_ref['source_compute'], teardown=True)
+
network_info = self._get_instance_nw_info(context, instance_ref)
self.driver.confirm_migration(migration_ref, instance_ref,
self._legacy_nw_info(network_info))
@@ -1183,6 +1187,10 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_ref = self.db.instance_get_by_uuid(context,
migration_ref.instance_uuid)
+ # NOTE(tr3buchet): tear down networks on destination host
+ self.network_api.setup_networks_on_host(context, instance_ref,
+ teardown=True)
+
network_info = self._get_instance_nw_info(context, instance_ref)
self.driver.destroy(instance_ref, self._legacy_nw_info(network_info))
topic = self.db.queue_get_for(context, FLAGS.compute_topic,
@@ -1216,7 +1224,7 @@ class ComputeManager(manager.SchedulerDependentManager):
# Just roll back the record. There's no need to resize down since
# the 'old' VM already has the preferred attributes
self._instance_update(context,
- instance_ref["uuid"],
+ instance_ref['uuid'],
memory_mb=instance_type['memory_mb'],
host=migration_ref['source_compute'],
vcpus=instance_type['vcpus'],
@@ -1352,6 +1360,10 @@ class ComputeManager(manager.SchedulerDependentManager):
ephemeral_gb=instance_type['ephemeral_gb'])
resize_instance = True
+ # NOTE(tr3buchet): setup networks on destination host
+ self.network_api.setup_networks_on_host(context, instance_ref,
+ migration_ref['dest_compute'])
+
network_info = self._get_instance_nw_info(context, instance_ref)
self._notify_about_instance_usage(instance_ref, "finish_resize.start",
@@ -1865,6 +1877,10 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.pre_live_migration(block_device_info)
+ # NOTE(tr3buchet): setup networks on destination host
+ self.network_api.setup_networks_on_host(context, instance_ref,
+ self.host)
+
# Bridge settings.
# Call this method prior to ensure_filtering_rules_for_instance,
# since bridge is not set up, ensure_filtering_rules_for instance
@@ -1928,8 +1944,8 @@ class ComputeManager(manager.SchedulerDependentManager):
if self._get_instance_volume_bdms(context, instance_id):
rpc.call(context,
FLAGS.volume_topic,
- {"method": "check_for_export",
- "args": {'instance_id': instance_id}})
+ {'method': 'check_for_export',
+ 'args': {'instance_id': instance_id}})
if block_migration:
disk = self.driver.get_instance_disk_info(instance_ref.name)
@@ -1938,8 +1954,8 @@ class ComputeManager(manager.SchedulerDependentManager):
rpc.call(context,
self.db.queue_get_for(context, FLAGS.compute_topic, dest),
- {"method": "pre_live_migration",
- "args": {'instance_id': instance_id,
+ {'method': 'pre_live_migration',
+ 'args': {'instance_id': instance_id,
'block_migration': block_migration,
'disk': disk}})
@@ -1988,6 +2004,10 @@ class ComputeManager(manager.SchedulerDependentManager):
# Releasing vlan.
# (not necessary in current implementation?)
+ # NOTE(tr3buchet): tear down networks on source host
+ self.network_api.setup_networks_on_host(ctxt, instance_ref,
+ self.host, teardown=True)
+
network_info = self._get_instance_nw_info(ctxt, instance_ref)
# Releasing security group ingress rule.
self.driver.unfilter_instance(instance_ref,
@@ -2070,6 +2090,14 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_ref = self.db.instance_get(context, instance_id)
LOG.info(_('Post operation of migraton started'),
instance=instance_ref)
+
+ # NOTE(tr3buchet): setup networks on destination host
+ # this is called a second time because
+ # multi_host does not create the bridge in
+ # plug_vifs
+ self.network_api.setup_networks_on_host(context, instance_ref,
+ self.host)
+
network_info = self._get_instance_nw_info(context, instance_ref)
self.driver.post_live_migration_at_destination(context, instance_ref,
self._legacy_nw_info(network_info),
@@ -2094,6 +2122,10 @@ class ComputeManager(manager.SchedulerDependentManager):
vm_state=vm_states.ACTIVE,
task_state=None)
+ # NOTE(tr3buchet): setup networks on source host (really it's re-setup)
+ self.network_api.setup_networks_on_host(context, instance_ref,
+ self.host)
+
for bdm in self._get_instance_volume_bdms(context, instance_ref['id']):
volume_id = bdm['volume_id']
volume = self.volume_api.get(context, volume_id)
@@ -2121,6 +2153,10 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_ref = self.db.instance_get(context, instance_id)
network_info = self._get_instance_nw_info(context, instance_ref)
+ # NOTE(tr3buchet): tear down networks on destination host
+ self.network_api.setup_networks_on_host(context, instance_ref,
+ self.host, teardown=True)
+
# NOTE(vish): The mapping is passed in so the driver can disconnect
# from remote volumes if necessary
block_device_info = self._get_instance_volume_block_device_info(
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 18b40dfaa..48b59a680 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -638,8 +638,10 @@ class Migration(BASE, NovaBase):
"""Represents a running host-to-host migration."""
__tablename__ = 'migrations'
id = Column(Integer, primary_key=True, nullable=False)
+ # NOTE(tr3buchet): the ____compute variables are instance['host']
source_compute = Column(String(255))
dest_compute = Column(String(255))
+ # NOTE(tr3buchet): dest_host, btw, is an ip address
dest_host = Column(String(255))
old_instance_type_id = Column(Integer())
new_instance_type_id = Column(Integer())
diff --git a/nova/network/api.py b/nova/network/api.py
index 10d87fe6a..a3489ae98 100644
--- a/nova/network/api.py
+++ b/nova/network/api.py
@@ -176,12 +176,14 @@ class API(base.Base):
args = kwargs
args['instance_id'] = instance['id']
args['project_id'] = instance['project_id']
+ args['host'] = instance['host']
rpc.cast(context, FLAGS.network_topic,
{'method': 'deallocate_for_instance',
'args': args})
def add_fixed_ip_to_instance(self, context, instance_id, host, network_id):
"""Adds a fixed ip to instance from specified network."""
+ # NOTE(tr3buchet): poorly written, broken in all but flat manager
args = {'instance_id': instance_id,
'host': host,
'network_id': network_id}
@@ -191,6 +193,7 @@ class API(base.Base):
def remove_fixed_ip_from_instance(self, context, instance_id, address):
"""Removes a fixed ip from instance from specified network."""
+ # NOTE(tr3buchet): poorly written, broken in all but flat manager
args = {'instance_id': instance_id,
'address': address}
rpc.cast(context, FLAGS.network_topic,
@@ -317,3 +320,20 @@ class API(base.Base):
return rpc.call(context, FLAGS.network_topic,
{'method': 'create_public_dns_domain',
'args': args})
+
+ def setup_networks_on_host(self, context, instance, host=None,
+ teardown=False):
+ """Setup or teardown the network structures on hosts related to
+ instance"""
+ host = host or instance['host']
+ # NOTE(tr3buchet): host is passed in cases where we need to setup
+ # or teardown the networks on a host which has been migrated to/from
+ # and instance['host'] is not yet or is no longer equal to
+ args = {'instance_id': instance['id'],
+ 'host': host,
+ 'teardown': teardown}
+
+ # NOTE(tr3buchet): the call is just to wait for completion
+ rpc.call(context, FLAGS.network_topic,
+ {'method': 'setup_networks_on_host',
+ 'args': args})
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 9dd75b03b..2fede9d81 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -190,6 +190,7 @@ class RPCAllocateFixedIP(object):
break
# NOTE(vish): if we are not multi_host pass to the network host
+ # NOTE(tr3buchet): but if we are, host came from instance['host']
if not network['multi_host']:
host = network['host']
# NOTE(vish): if there is no network host, set one
@@ -228,6 +229,29 @@ class RPCAllocateFixedIP(object):
network = self._get_network_by_id(context, network_id)
return self.allocate_fixed_ip(context, instance_id, network, **kwargs)
+ def deallocate_fixed_ip(self, context, address, host):
+ """Call the superclass deallocate_fixed_ip if i'm the correct host
+ otherwise cast to the correct host"""
+ fixed_ip = self.db.fixed_ip_get_by_address(context, address)
+ network = self._get_network_by_id(context, fixed_ip['network_id'])
+
+ # NOTE(vish): if we are not multi_host pass to the network host
+ # NOTE(tr3buchet): but if we are, host came from instance['host']
+ if not network['multi_host']:
+ host = network['host']
+ if host != self.host:
+ # need to call deallocate_fixed_ip on correct network host
+ topic = self.db.queue_get_for(context, FLAGS.network_topic, host)
+ args = {'address': address,
+ 'host': host}
+ rpc.cast(context, topic,
+ {'method': 'deallocate_fixed_ip',
+ 'args': args})
+ else:
+ # i am the correct host, run here
+ super(RPCAllocateFixedIP, self).deallocate_fixed_ip(context,
+ address)
+
def wrap_check_policy(func):
"""Check policy corresponding to the wrapped methods prior to execution"""
@@ -738,7 +762,7 @@ class NetworkManager(manager.SchedulerDependentManager):
# an ip address.
ctxt = context.get_admin_context()
for network in self.db.network_get_all_by_host(ctxt, self.host):
- self._setup_network(ctxt, network)
+ self._setup_network_on_host(ctxt, network)
@manager.periodic_task
def _disassociate_stale_fixed_ips(self, context):
@@ -1169,7 +1193,7 @@ class NetworkManager(manager.SchedulerDependentManager):
self.instance_dns_manager.create_entry(uuid, address,
"A",
self.instance_dns_domain)
- self._setup_network(context, network)
+ self._setup_network_on_host(context, network)
return address
def deallocate_fixed_ip(self, context, address, **kwargs):
@@ -1191,10 +1215,9 @@ class NetworkManager(manager.SchedulerDependentManager):
if FLAGS.force_dhcp_release:
network = self._get_network_by_id(context,
fixed_ip_ref['network_id'])
- dev = self.driver.get_dev(network)
vif = self.db.virtual_interface_get_by_instance_and_network(
context, instance_id, network['id'])
- self.driver.release_dhcp(dev, address, vif['address'])
+ self._teardown_network_on_host(context, network, vif, address)
def lease_fixed_ip(self, context, address):
"""Called by dhcp-bridge when ip is leased."""
@@ -1234,7 +1257,7 @@ class NetworkManager(manager.SchedulerDependentManager):
# the code below will update the file if necessary
if FLAGS.update_dhcp_on_disassociate:
network_ref = self.db.fixed_ip_get_network(context, address)
- self._setup_network(context, network_ref)
+ self._setup_network_on_host(context, network_ref)
def create_networks(self, context, label, cidr, multi_host, num_networks,
network_size, cidr_v6, gateway, gateway_v6, bridge,
@@ -1425,7 +1448,65 @@ class NetworkManager(manager.SchedulerDependentManager):
"""Calls allocate_fixed_ip once for each network."""
raise NotImplementedError()
- def _setup_network(self, context, network_ref):
+ def setup_networks_on_host(self, context, instance_id, host,
+ teardown=False):
+ """calls setup/teardown on network hosts associated with an instance"""
+ green_pool = greenpool.GreenPool()
+
+ if teardown:
+ call_func = self._teardown_network_on_host
+ else:
+ call_func = self._setup_network_on_host
+
+ vifs = self.db.virtual_interface_get_by_instance(context,
+ instance_id)
+ for vif in vifs:
+ network = self.db.network_get(context, vif['network_id'])
+ fixed_ips = self.db.fixed_ips_by_virtual_interface(context,
+ vif['id'])
+ addresses = [fixed_ip['address'] for fixed_ip in fixed_ips]
+ if not network['multihost']:
+ #NOTE (tr3buchet): if using multihost, host is instance[host]
+ host = network['host']
+ if self.host == host or host is None:
+ # at this point i am the correct host, or host doesn't
+ # matter -> FlatManager
+ for address in addresses:
+ call_func(context, network, vif, address)
+ else:
+ # i'm not the right host, run call on correct host
+ topic = self.db.queue_get_for(context, FLAGS.network_topic,
+ host)
+ args = {'network_id': network['id'],
+ 'vif_id': vif['id'],
+ 'teardown': teardown}
+ for address in addresses:
+ # NOTE(tr3buchet): the call is just to wait for completion
+ args['address'] = address
+ green_pool.spawn_n(rpc.call, context, topic,
+ {'method': 'rpc_setup_network_on_host',
+ 'args': args})
+
+ # wait for all of the setups (if any) to finish
+ green_pool.waitall()
+
+ def rpc_setup_network_on_host(self, context, network_id, vif_id, address,
+ teardown):
+ if teardown:
+ call_func = self._teardown_network_on_host
+ else:
+ call_func = self._setup_network_on_host
+
+ # subcall from original setup_networks_on_host
+ vif = self.db.virtual_interface_get(context, vif_id)
+ network = self.db.network_get(context, network_id)
+ call_func(context, network, vif, address)
+
+ def _setup_network_on_host(self, context, network, vif=None, address=None):
+ """Sets up network on this host."""
+ raise NotImplementedError()
+
+ def _teardown_network_on_host(self, context, network, vif, address):
"""Sets up network on this host."""
raise NotImplementedError()
@@ -1557,11 +1638,18 @@ class FlatManager(NetworkManager):
**kwargs)
self.db.fixed_ip_disassociate(context, address)
- def _setup_network(self, context, network_ref):
+ def _setup_network_on_host(self, context, network, vif=None, address=None):
"""Setup Network on this host."""
+ # NOTE(tr3buchet): this does not need to happen on every ip
+ # allocation, this functionality makes more sense in create_network
+ # but we'd have to move the flat_injected flag to compute
net = {}
net['injected'] = FLAGS.flat_injected
- self.db.network_update(context, network_ref['id'], net)
+ self.db.network_update(context, network['id'], net)
+
+ def _teardown_network_on_host(self, context, network, vif, address):
+ """Tear down netowrk on this host."""
+ pass
class FlatDHCPManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
@@ -1584,21 +1672,26 @@ class FlatDHCPManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
super(FlatDHCPManager, self).init_host()
self.init_host_floating_ips()
- def _setup_network(self, context, network_ref):
+ def _setup_network_on_host(self, context, network, vif=None, address=None):
"""Sets up network on this host."""
- network_ref['dhcp_server'] = self._get_dhcp_ip(context, network_ref)
+ network['dhcp_server'] = self._get_dhcp_ip(context, network)
- self.l3driver.initialize_gateway(network_ref)
+ self.l3driver.initialize_gateway(network)
if not FLAGS.fake_network:
- dev = self.driver.get_dev(network_ref)
- self.driver.update_dhcp(context, dev, network_ref)
+ dev = self.driver.get_dev(network)
+ self.driver.update_dhcp(context, dev, network)
if(FLAGS.use_ipv6):
- self.driver.update_ra(context, dev, network_ref)
+ self.driver.update_ra(context, dev, network)
gateway = utils.get_my_linklocal(dev)
- self.db.network_update(context, network_ref['id'],
+ self.db.network_update(context, network['id'],
{'gateway_v6': gateway})
+ def _teardown_network_on_host(self, context, network, vif, address):
+ if not FLAGS.fake_network:
+ dev = self.driver.get_dev(network)
+ self.driver.release_dhcp(dev, address, vif['address'])
+
def _get_network_by_id(self, context, network_id):
return NetworkManager._get_network_by_id(self, context.elevated(),
network_id)
@@ -1675,7 +1768,7 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
values = {'allocated': True,
'virtual_interface_id': vif['id']}
self.db.fixed_ip_update(context, address, values)
- self._setup_network(context, network)
+ self._setup_network_on_host(context, network)
return address
@wrap_check_policy
@@ -1713,35 +1806,40 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
NetworkManager.create_networks(self, context, vpn=True, **kwargs)
- def _setup_network(self, context, network_ref):
+ def _setup_network_on_host(self, context, network, vif=None, address=None):
"""Sets up network on this host."""
- if not network_ref['vpn_public_address']:
+ if not network['vpn_public_address']:
net = {}
address = FLAGS.vpn_ip
net['vpn_public_address'] = address
- network_ref = db.network_update(context, network_ref['id'], net)
+ network = db.network_update(context, network['id'], net)
else:
- address = network_ref['vpn_public_address']
- network_ref['dhcp_server'] = self._get_dhcp_ip(context, network_ref)
+ address = network['vpn_public_address']
+ network['dhcp_server'] = self._get_dhcp_ip(context, network)
- self.l3driver.initialize_gateway(network_ref)
+ self.l3driver.initialize_gateway(network)
# NOTE(vish): only ensure this forward if the address hasn't been set
# manually.
if address == FLAGS.vpn_ip and hasattr(self.driver,
"ensure_vpn_forward"):
self.l3driver.add_vpn(FLAGS.vpn_ip,
- network_ref['vpn_public_port'],
- network_ref['vpn_private_address'])
+ network['vpn_public_port'],
+ network['vpn_private_address'])
if not FLAGS.fake_network:
- dev = self.driver.get_dev(network_ref)
- self.driver.update_dhcp(context, dev, network_ref)
+ dev = self.driver.get_dev(network)
+ self.driver.update_dhcp(context, dev, network)
if(FLAGS.use_ipv6):
- self.driver.update_ra(context, dev, network_ref)
+ self.driver.update_ra(context, dev, network)
gateway = utils.get_my_linklocal(dev)
- self.db.network_update(context, network_ref['id'],
+ self.db.network_update(context, network['id'],
{'gateway_v6': gateway})
+ def _teardown_network_on_host(self, context, network, vif, address):
+ if not FLAGS.fake_network:
+ dev = self.driver.get_dev(network)
+ self.driver.release_dhcp(dev, address, vif['address'])
+
def _get_networks_by_uuids(self, context, network_uuids):
return self.db.network_get_all_by_uuids(context, network_uuids,
context.project_id)
diff --git a/nova/network/quantum/manager.py b/nova/network/quantum/manager.py
index fad8ecc33..16e782fa8 100644
--- a/nova/network/quantum/manager.py
+++ b/nova/network/quantum/manager.py
@@ -689,3 +689,7 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager):
leases_text += text
LOG.debug("DHCP leases: %s" % leases_text)
return leases_text
+
+ def setup_networks_on_host(self, *args, **kwargs):
+ # no host specific setup is needed in quantum manager
+ pass
diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py
index 52cb670ca..5a64f237e 100644
--- a/nova/tests/api/ec2/test_cloud.py
+++ b/nova/tests/api/ec2/test_cloud.py
@@ -218,7 +218,8 @@ class CloudTestCase(test.TestCase):
public_ip=address)
self.cloud.release_address(self.context,
public_ip=address)
- self.network.deallocate_fixed_ip(self.context, fixed_ips[0]['address'])
+ self.network.deallocate_fixed_ip(self.context, fixed_ips[0]['address'],
+ inst['host'])
db.instance_destroy(self.context, inst['id'])
db.floating_ip_destroy(self.context, address)
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index 24a383f80..3b784dfa1 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -1387,6 +1387,10 @@ class ComputeTestCase(BaseTestCase):
'disk': None}
}).AndRaise(rpc.common.RemoteError('', '', ''))
# mocks for rollback
+ rpc.call(c, 'network', {'method': 'setup_networks_on_host',
+ 'args': {'instance_id': instance_id,
+ 'host': self.compute.host,
+ 'teardown': False}})
rpc.call(c, topic, {"method": "remove_volume_connection",
"args": {'instance_id': instance_id,
'volume_id': volume_id}})
@@ -1455,6 +1459,10 @@ class ComputeTestCase(BaseTestCase):
self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance')
self.compute.driver.unfilter_instance(i_ref, [])
self.mox.StubOutWithMock(rpc, 'call')
+ rpc.call(c, 'network', {'method': 'setup_networks_on_host',
+ 'args': {'instance_id': instance_id,
+ 'host': self.compute.host,
+ 'teardown': True}})
rpc.call(c, db.queue_get_for(c, FLAGS.compute_topic, dest),
{"method": "post_live_migration_at_destination",
"args": {'instance_id': i_ref['id'], 'block_migration': False}})
diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py
index 05b931617..ba9dd6e12 100644
--- a/nova/tests/test_network.py
+++ b/nova/tests/test_network.py
@@ -838,7 +838,7 @@ class VlanNetworkTestCase(test.TestCase):
# Clean up the ip addresses
self.network.disassociate_floating_ip(context1, float_addr)
self.network.deallocate_floating_ip(context1, float_addr)
- self.network.deallocate_fixed_ip(context1, fix_addr)
+ self.network.deallocate_fixed_ip(context1, fix_addr, 'fake')
db.floating_ip_destroy(context1.elevated(), float_addr)
db.fixed_ip_disassociate(context1.elevated(), fix_addr)