summaryrefslogtreecommitdiffstats
path: root/nova/compute
diff options
context:
space:
mode:
authorTrey Morris <treyemorris@gmail.com>2012-01-09 11:52:53 -0600
committerTrey Morris <treyemorris@gmail.com>2012-02-01 13:29:14 -0600
commit73fd7abacd3bc5492b0335b3bb71c16b4a9d30e2 (patch)
tree571ee661373cfc18b1ca1135e2b8c36d6758c641 /nova/compute
parentfced0f58bcbaef6fff76c6719e27e7d100aa721b (diff)
Ties quantum, melange, and nova network model
get_instance_nw_info() now returns network model, and keeps the network info cache up to date. virt shim and translation in place for virts to get at the old stuff Change-Id: I070ea7d8564af6c644059d1c209542d250d19ddb
Diffstat (limited to 'nova/compute')
-rw-r--r--nova/compute/api.py41
-rw-r--r--nova/compute/manager.py91
-rw-r--r--nova/compute/utils.py118
3 files changed, 198 insertions, 52 deletions
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 622a1abfd..42c4ea4f0 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -1687,25 +1687,30 @@ class API(base.Base):
# in its info, if this changes, the next few lines will need to
# accommodate the info containing floating as well as fixed ip
# addresses
- fixed_ip_addrs = []
- for info in self.network_api.get_instance_nw_info(context.elevated(),
- instance):
- ips = info[1]['ips']
- fixed_ip_addrs.extend([ip_dict['ip'] for ip_dict in ips])
-
- # TODO(tr3buchet): this will associate the floating IP with the first
- # fixed_ip (lowest id) an instance has. This should be changed to
- # support specifying a particular fixed_ip if multiple exist.
- if not fixed_ip_addrs:
- msg = _("instance |%s| has no fixed_ips. "
- "unable to associate floating ip") % instance_uuid
- raise exception.ApiError(msg)
- if len(fixed_ip_addrs) > 1:
- LOG.warning(_("multiple fixed_ips exist, using the first: %s"),
- fixed_ip_addrs[0])
- self.network_api.associate_floating_ip(context,
+
+ fail_bag = _('instance |%s| has no fixed ips. '
+ 'unable to associate floating ip') % instance_uuid
+
+ nw_info = self.network_api.get_instance_nw_info(context.elevated(),
+ instance)
+
+ if nw_info:
+ ips = [ip for ip in nw_info[0].fixed_ips()]
+
+ # TODO(tr3buchet): this will associate the floating IP with the
+ # first # fixed_ip (lowest id) an instance has. This should be
+ # changed to # support specifying a particular fixed_ip if
+ # multiple exist.
+ if not ips:
+ raise exception.ApiError(fail_bag)
+ if len(ips) > 1:
+ LOG.warning(_('multiple fixedips exist, using the first: %s'),
+ ips[0]['address'])
+ self.network_api.associate_floating_ip(context,
floating_address=address,
- fixed_address=fixed_ip_addrs[0])
+ fixed_address=ips[0]['address'])
+ return
+ raise exception.ApiError(fail_bag)
@wrap_check_policy
def get_instance_metadata(self, context, instance):
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index ef49718dc..cb22ff8e7 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -49,7 +49,7 @@ from nova.common import cfg
from nova.compute import instance_types
from nova.compute import power_state
from nova.compute import task_states
-from nova.compute.utils import notify_usage_exists
+from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import exception
from nova import flags
@@ -57,6 +57,7 @@ import nova.image
from nova import log as logging
from nova import manager
from nova import network
+from nova.network import model as network_model
from nova.notifier import api as notifier
from nova import rpc
from nova import utils
@@ -227,7 +228,7 @@ class ComputeManager(manager.SchedulerDependentManager):
try:
net_info = self._get_instance_nw_info(context, instance)
self.driver.ensure_filtering_rules_for_instance(instance,
- net_info)
+ self._legacy_nw_info(net_info))
except NotImplementedError:
LOG.warning(_('Hypervisor driver does not '
'support firewall rules'))
@@ -282,10 +283,18 @@ class ComputeManager(manager.SchedulerDependentManager):
def _get_instance_nw_info(self, context, instance):
"""Get a list of dictionaries of network data of an instance.
Returns an empty list if stub_network flag is set."""
- network_info = []
- if not FLAGS.stub_network:
- network_info = self.network_api.get_instance_nw_info(context,
- instance)
+ if FLAGS.stub_network:
+ return network_model.NetworkInfo()
+
+ # get the network info from network
+ network_info = self.network_api.get_instance_nw_info(context,
+ instance)
+ return network_info
+
+ def _legacy_nw_info(self, network_info):
+ """Converts the model nw_info object to legacy style"""
+ if self.driver.legacy_nwinfo():
+ network_info = compute_utils.legacy_network_info(network_info)
return network_info
def _setup_block_device_mapping(self, context, instance):
@@ -489,12 +498,13 @@ class ComputeManager(manager.SchedulerDependentManager):
if FLAGS.stub_network:
msg = _("Skipping network allocation for instance %s")
LOG.debug(msg % instance['uuid'])
- return []
+ return network_model.NetworkInfo()
self._instance_update(context, instance['uuid'],
vm_state=vm_states.BUILDING,
task_state=task_states.NETWORKING)
is_vpn = instance['image_ref'] == str(FLAGS.vpn_image_id)
try:
+ # allocate and get network info
network_info = self.network_api.allocate_for_instance(
context, instance, vpn=is_vpn,
requested_networks=requested_networks)
@@ -502,7 +512,9 @@ class ComputeManager(manager.SchedulerDependentManager):
msg = _("Instance %s failed network setup")
LOG.exception(msg % instance['uuid'])
raise
+
LOG.debug(_("instance network_info: |%s|"), network_info)
+
return network_info
def _prep_block_device(self, context, instance):
@@ -527,7 +539,7 @@ class ComputeManager(manager.SchedulerDependentManager):
instance['admin_pass'] = admin_pass
try:
self.driver.spawn(context, instance, image_meta,
- network_info, block_device_info)
+ self._legacy_nw_info(network_info), block_device_info)
except Exception:
msg = _("Instance %s failed to spawn")
LOG.exception(msg % instance['uuid'])
@@ -606,9 +618,10 @@ class ComputeManager(manager.SchedulerDependentManager):
{'action_str': action_str, 'instance_uuid': instance_uuid},
context=context)
+ # get network info before tearing down
network_info = self._get_instance_nw_info(context, instance)
- if not FLAGS.stub_network:
- self.network_api.deallocate_for_instance(context, instance)
+ # tear down allocated network structure
+ self._deallocate_network(context, instance)
if instance['power_state'] == power_state.SHUTOFF:
self.db.instance_destroy(context, instance_id)
@@ -618,7 +631,8 @@ class ComputeManager(manager.SchedulerDependentManager):
bdms = self._get_instance_volume_bdms(context, instance_id)
block_device_info = self._get_instance_volume_block_device_info(
context, instance_id)
- self.driver.destroy(instance, network_info, block_device_info)
+ self.driver.destroy(instance, self._legacy_nw_info(network_info),
+ block_device_info)
for bdm in bdms:
try:
# NOTE(vish): actual driver detach done in driver.destroy, so
@@ -663,7 +677,7 @@ class ComputeManager(manager.SchedulerDependentManager):
"""Terminate an instance on this host."""
elevated = context.elevated()
instance = self.db.instance_get_by_uuid(elevated, instance_uuid)
- notify_usage_exists(instance, current_period=True)
+ compute_utils.notify_usage_exists(instance, current_period=True)
self._delete_instance(context, instance)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@@ -732,7 +746,7 @@ class ComputeManager(manager.SchedulerDependentManager):
task_state=None)
network_info = self._get_instance_nw_info(context, instance)
- self.driver.destroy(instance, network_info)
+ self.driver.destroy(instance, self._legacy_nw_info(network_info))
self._instance_update(context,
instance_uuid,
@@ -755,7 +769,7 @@ class ComputeManager(manager.SchedulerDependentManager):
image_meta = _get_image_meta(context, instance['image_ref'])
self.driver.spawn(context, instance, image_meta,
- network_info, device_info)
+ self._legacy_nw_info(network_info), device_info)
current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
@@ -794,7 +808,8 @@ class ComputeManager(manager.SchedulerDependentManager):
context=context)
network_info = self._get_instance_nw_info(context, instance)
- self.driver.reboot(instance, network_info, reboot_type)
+ self.driver.reboot(instance, self._legacy_nw_info(network_info),
+ reboot_type)
current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
@@ -1026,7 +1041,8 @@ class ComputeManager(manager.SchedulerDependentManager):
image_meta = _get_image_meta(context, instance_ref['image_ref'])
with self.error_out_instance_on_exception(context, instance_uuid):
- self.driver.rescue(context, instance_ref, network_info, image_meta)
+ self.driver.rescue(context, instance_ref,
+ self._legacy_nw_info(network_info), image_meta)
current_power_state = self._get_power_state(context, instance_ref)
self._instance_update(context,
@@ -1047,7 +1063,8 @@ class ComputeManager(manager.SchedulerDependentManager):
network_info = self._get_instance_nw_info(context, instance_ref)
with self.error_out_instance_on_exception(context, instance_uuid):
- self.driver.unrescue(instance_ref, network_info)
+ self.driver.unrescue(instance_ref,
+ self._legacy_nw_info(network_info))
current_power_state = self._get_power_state(context, instance_ref)
self._instance_update(context,
@@ -1069,8 +1086,8 @@ class ComputeManager(manager.SchedulerDependentManager):
"resize.confirm.start")
network_info = self._get_instance_nw_info(context, instance_ref)
- self.driver.confirm_migration(
- migration_ref, instance_ref, network_info)
+ self.driver.confirm_migration(migration_ref, instance_ref,
+ self._legacy_nw_info(network_info))
self._notify_about_instance_usage(instance_ref, "resize.confirm.end",
network_info=network_info)
@@ -1090,7 +1107,7 @@ class ComputeManager(manager.SchedulerDependentManager):
migration_ref.instance_uuid)
network_info = self._get_instance_nw_info(context, instance_ref)
- self.driver.destroy(instance_ref, network_info)
+ self.driver.destroy(instance_ref, self._legacy_nw_info(network_info))
topic = self.db.queue_get_for(context, FLAGS.compute_topic,
migration_ref['source_compute'])
rpc.cast(context, topic,
@@ -1267,8 +1284,9 @@ class ComputeManager(manager.SchedulerDependentManager):
try:
self.driver.finish_migration(context, migration_ref, instance_ref,
- disk_info, network_info, image_meta,
- resize_instance)
+ disk_info,
+ self._legacy_nw_info(network_info),
+ image_meta, resize_instance)
except Exception, error:
with utils.save_and_reraise_exception():
msg = _('%s. Setting instance vm_state to ERROR')
@@ -1477,7 +1495,8 @@ class ComputeManager(manager.SchedulerDependentManager):
network_info = self._get_instance_nw_info(context, instance)
LOG.debug(_("network_info to inject: |%s|"), network_info)
- self.driver.inject_network_info(instance, network_info)
+ self.driver.inject_network_info(instance,
+ self._legacy_nw_info(network_info))
return network_info
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@@ -1753,14 +1772,16 @@ class ComputeManager(manager.SchedulerDependentManager):
# concorrent request occurs to iptables, then it complains.
network_info = self._get_instance_nw_info(context, instance_ref)
- fixed_ips = [nw_info[1]['ips'] for nw_info in network_info]
+ # TODO(tr3buchet): figure out how on the earth this is necessary
+ fixed_ips = network_info.fixed_ips()
if not fixed_ips:
raise exception.FixedIpNotFoundForInstance(instance_id=instance_id)
max_retry = FLAGS.live_migration_retry_count
for cnt in range(max_retry):
try:
- self.driver.plug_vifs(instance_ref, network_info)
+ self.driver.plug_vifs(instance_ref,
+ self._legacy_nw_info(network_info))
break
except exception.ProcessExecutionError:
if cnt == max_retry - 1:
@@ -1778,7 +1799,7 @@ class ComputeManager(manager.SchedulerDependentManager):
# In addition, this method is creating filtering rule
# onto destination host.
self.driver.ensure_filtering_rules_for_instance(instance_ref,
- network_info)
+ self._legacy_nw_info(network_info))
# Preparation for block migration
if block_migration:
@@ -1868,7 +1889,8 @@ class ComputeManager(manager.SchedulerDependentManager):
network_info = self._get_instance_nw_info(ctxt, instance_ref)
# Releasing security group ingress rule.
- self.driver.unfilter_instance(instance_ref, network_info)
+ self.driver.unfilter_instance(instance_ref,
+ self._legacy_nw_info(network_info))
# Database updating.
# NOTE(jkoelker) This needs to be converted to network api calls
@@ -1918,13 +1940,15 @@ class ComputeManager(manager.SchedulerDependentManager):
# No instance booting at source host, but instance dir
# must be deleted for preparing next block migration
if block_migration:
- self.driver.destroy(instance_ref, network_info)
+ self.driver.destroy(instance_ref,
+ self._legacy_nw_info(network_info))
else:
# self.driver.destroy() usually performs vif unplugging
# but we must do it explicitly here when block_migration
# is false, as the network devices at the source must be
# torn down
- self.driver.unplug_vifs(instance_ref, network_info)
+ self.driver.unplug_vifs(instance_ref,
+ self._legacy_nw_info(network_info))
LOG.info(_('Migrating %(instance_uuid)s to %(dest)s finished'
' successfully.') % locals())
@@ -1945,10 +1969,9 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.info(_('Post operation of migraton started for %s .')
% instance_ref['uuid'])
network_info = self._get_instance_nw_info(context, instance_ref)
- self.driver.post_live_migration_at_destination(context,
- instance_ref,
- network_info,
- block_migration)
+ self.driver.post_live_migration_at_destination(context, instance_ref,
+ self._legacy_nw_info(network_info),
+ block_migration)
def rollback_live_migration(self, context, instance_ref,
dest, block_migration):
@@ -2000,7 +2023,7 @@ class ComputeManager(manager.SchedulerDependentManager):
# from remote volumes if necessary
block_device_info = \
self._get_instance_volume_block_device_info(context, instance_id)
- self.driver.destroy(instance_ref, network_info,
+ self.driver.destroy(instance_ref, self._legacy_nw_info(network_info),
block_device_info)
@manager.periodic_task
diff --git a/nova/compute/utils.py b/nova/compute/utils.py
index 922a9c761..b8b34fa81 100644
--- a/nova/compute/utils.py
+++ b/nova/compute/utils.py
@@ -16,8 +16,11 @@
"""Compute-related Utilities and helpers."""
+import netaddr
+
from nova import context
from nova import db
+from nova import exception
from nova import flags
from nova.notifier import api as notifier_api
from nova import utils
@@ -53,3 +56,118 @@ def notify_usage_exists(instance_ref, current_period=False):
'compute.instance.exists',
notifier_api.INFO,
usage_info)
+
+
+def legacy_network_info(network_model):
+ """
+ Return the legacy network_info representation of the network_model
+ """
+ def get_ip(ip):
+ if not ip:
+ return None
+ return ip['address']
+
+ def fixed_ip_dict(ip, subnet):
+ if ip['version'] == 4:
+ netmask = str(subnet.as_netaddr().netmask)
+ else:
+ netmask = subnet.as_netaddr()._prefixlen
+
+ return {'ip': ip['address'],
+ 'enabled': '1',
+ 'netmask': netmask,
+ 'gateway': get_ip(subnet['gateway'])}
+
+ def get_meta(model, key, default=None):
+ if 'meta' in model and key in model['meta']:
+ return model['meta'][key]
+ return default
+
+ def convert_routes(routes):
+ routes_list = []
+ for route in routes:
+ r = {'route': str(netaddr.IPNetwork(route['cidr']).network),
+ 'netmask': str(netaddr.IPNetwork(route['cidr']).netmask),
+ 'gateway': get_ip(route['gateway'])}
+ routes_list.append(r)
+ return routes_list
+
+ network_info = []
+ for vif in network_model:
+ if not vif['network'] or not vif['network']['subnets']:
+ continue
+ network = vif['network']
+
+ # NOTE(jkoelker) The legacy format only supports one subnet per
+ # network, so we only use the 1st one of each type
+ # NOTE(tr3buchet): o.O
+ v4_subnets = []
+ v6_subnets = []
+ for subnet in vif['network']['subnets']:
+ if subnet['version'] == 4:
+ v4_subnets.append(subnet)
+ else:
+ v6_subnets.append(subnet)
+
+ subnet_v4 = None
+ subnet_v6 = None
+
+ if v4_subnets:
+ subnet_v4 = v4_subnets[0]
+
+ if v6_subnets:
+ subnet_v6 = v6_subnets[0]
+
+ if not subnet_v4:
+ raise exception.NovaException(
+ message=_('v4 subnets are required for legacy nw_info'))
+
+ routes = convert_routes(subnet_v4['routes'])
+
+ should_create_bridge = get_meta(network, 'should_create_bridge',
+ False)
+ should_create_vlan = get_meta(network, 'should_create_vlan', False)
+ gateway = get_ip(subnet_v4['gateway'])
+ dhcp_server = get_meta(subnet_v4, 'dhcp_server', gateway)
+ network_dict = dict(bridge=network['bridge'],
+ id=network['id'],
+ cidr=subnet_v4['cidr'],
+ cidr_v6=subnet_v6['cidr'] if subnet_v6 else None,
+ vlan=get_meta(network, 'vlan'),
+ injected=get_meta(network, 'injected', False),
+ multi_host=get_meta(network, 'multi_host',
+ False),
+ bridge_interface=get_meta(network,
+ 'bridge_interface'))
+ # NOTE(tr3buchet): the 'ips' bit here is tricky, we support a single
+ # subnet but we want all the IPs to be there
+ # so we use the v4_subnets[0] and its IPs are first
+ # so that eth0 will be from subnet_v4, the rest of the
+ # IPs will be aliased eth0:1 etc and the gateways from
+ # their subnets will not be used
+ info_dict = dict(label=network['label'],
+ broadcast=str(subnet_v4.as_netaddr().broadcast),
+ mac=vif['address'],
+ vif_uuid=vif['id'],
+ rxtx_cap=get_meta(network, 'rxtx_cap', 0),
+ dns=[get_ip(ip) for ip in subnet['dns']],
+ ips=[fixed_ip_dict(ip, subnet)
+ for subnet in v4_subnets
+ for ip in subnet['ips']],
+ should_create_bridge=should_create_bridge,
+ should_create_vlan=should_create_vlan,
+ dhcp_server=dhcp_server)
+ if routes:
+ info_dict['routes'] = routes
+
+ if gateway:
+ info_dict['gateway'] = gateway
+
+ if v6_subnets:
+ if subnet_v6['gateway']:
+ info_dict['gateway_v6'] = get_ip(subnet_v6['gateway'])
+ info_dict['ip6s'] = [fixed_ip_dict(ip, subnet_v6)
+ for ip in subnet_v6['ips']]
+
+ network_info.append((network_dict, info_dict))
+ return network_info