summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTrey Morris <trey.morris@rackspace.com>2011-04-11 14:16:30 -0500
committerTrey Morris <trey.morris@rackspace.com>2011-04-11 14:16:30 -0500
commit1845c5df145251f1e90709a91cc02ee5ec787e2f (patch)
tree034ab29848268c640bafbb5cfbb0bc63adb1bd32
parent7eedf3f69ca1bbd1f44252fa01fb4f2676735eb2 (diff)
network manager changes, compute changes, various other
-rw-r--r--nova/compute/manager.py83
-rw-r--r--nova/db/api.py3
-rw-r--r--nova/db/sqlalchemy/api.py28
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/014_mac_address_table.py9
-rw-r--r--nova/db/sqlalchemy/models.py3
-rw-r--r--nova/network/api.py6
-rw-r--r--nova/network/manager.py201
-rw-r--r--nova/virt/xenapi/vmops.py10
8 files changed, 209 insertions, 134 deletions
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 2c5d958e6..f5bcaf603 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -200,9 +200,9 @@ class ComputeManager(manager.SchedulerDependentManager):
def run_instance(self, context, instance_id, **kwargs):
"""Launch a new instance with specified options."""
context = context.elevated()
- instance_ref = self.db.instance_get(context, instance_id)
- instance_ref.injected_files = kwargs.get('injected_files', [])
- if instance_ref['name'] in self.driver.list_instances():
+ instance = self.db.instance_get(context, instance_id)
+ instance.injected_files = kwargs.get('injected_files', [])
+ if instance['name'] in self.driver.list_instances():
raise exception.Error(_("Instance has already been created"))
LOG.audit(_("instance %s: starting..."), instance_id,
context=context)
@@ -215,26 +215,13 @@ class ComputeManager(manager.SchedulerDependentManager):
power_state.NOSTATE,
'networking')
- is_vpn = instance_ref['image_id'] == FLAGS.vpn_image_id
- # NOTE(vish): This could be a cast because we don't do anything
- # with the address currently, but I'm leaving it as
- # a call to ensure that network setup completes. We
- # will eventually also need to save the address here.
- #NOTE(tr3buchet): I don't see why we'd save it here when the network
- # manager is saving it.
+ is_vpn = instance['image_id'] == FLAGS.vpn_image_id
if not FLAGS.stub_network:
- rpc.call(context, self.get_network_topic(context),
- {"method": "allocate_fixed_ips",
- "args": {"instance_id": instance_id,
- "vpn": is_vpn}})
- rpc.call(context, self.get_network_topic(context),
- {"method": "allocate_mac_addresses",
- "args": {"instance_id": instance_id}})
-
- nw_info = rpc.call(context, self.get_network_topic(context),
- {"method": "allocate_for_instance",
- "args": {"instance_id": instance_id}})
- Log.debug(_("instance addresses: |%s|"), instance_ref['fixed_ips'])
+ network_info = rpc.call(context, self.get_network_topic(context),
+ {"method": "allocate_for_instance",
+ "args": {"instance_id": instance_id,
+ "vpn": is_vpn}})
+ Log.debug(_("instance network_info: |%s|"), network_info)
# TODO(vish) check to make sure the availability zone matches
self.db.instance_set_state(context,
@@ -243,7 +230,7 @@ class ComputeManager(manager.SchedulerDependentManager):
'spawning')
try:
- self.driver.spawn(instance_ref)
+ self.driver.spawn(instance, network_info)
now = datetime.datetime.utcnow()
self.db.instance_update(context,
instance_id,
@@ -263,45 +250,22 @@ class ComputeManager(manager.SchedulerDependentManager):
def terminate_instance(self, context, instance_id):
"""Terminate an instance on this machine."""
context = context.elevated()
- instance_ref = self.db.instance_get(context, instance_id)
+ instance = self.db.instance_get(context, instance_id)
LOG.audit(_("Terminating instance %s"), instance_id, context=context)
- fixed_ip = instance_ref.get('fixed_ip')
- if not FLAGS.stub_network and fixed_ip:
- floating_ips = fixed_ip.get('floating_ips') or []
- for floating_ip in floating_ips:
- address = floating_ip['address']
- LOG.debug("Disassociating address %s", address,
- context=context)
- # NOTE(vish): Right now we don't really care if the ip is
- # disassociated. We may need to worry about
- # checking this later.
- network_topic = self.db.queue_get_for(context,
- FLAGS.network_topic,
- floating_ip['host'])
- rpc.cast(context,
- network_topic,
- {"method": "disassociate_floating_ip",
- "args": {"floating_address": address}})
-
- address = fixed_ip['address']
- if address:
- LOG.debug(_("Deallocating address %s"), address,
- context=context)
- # NOTE(vish): Currently, nothing needs to be done on the
- # network node until release. If this changes,
- # we will need to cast here.
- self.network_manager.deallocate_fixed_ip(context.elevated(),
- address)
-
- volumes = instance_ref.get('volumes') or []
+ if not FLAGS.stub_network:
+ rpc.call(context, self.get_network_topic(context),
+ {"method": "allocate_for_instance",
+ "args": {"instance_id": instance_id}})
+
+ volumes = instance.get('volumes') or []
for volume in volumes:
self.detach_volume(context, instance_id, volume['id'])
- if instance_ref['state'] == power_state.SHUTOFF:
+ if instance['state'] == power_state.SHUTOFF:
self.db.instance_destroy(context, instance_id)
raise exception.Error(_('trying to destroy already destroyed'
' instance: %s') % instance_id)
- self.driver.destroy(instance_ref)
+ self.driver.destroy(instance)
# TODO(ja): should we keep it in a terminated state for a bit?
self.db.instance_destroy(context, instance_id)
@@ -712,10 +676,15 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
context = context.elevated()
- instance_ref = self.db.instance_get(context, instance_id)
LOG.debug(_('instance %s: inject network info'), instance_id,
context=context)
- self.driver.inject_network_info(instance_ref)
+ instance = self.db.instance_get(context, instance)
+ network_info = rpc.call(context, self.get_network_topic(context),
+ {"method": "get_instance_nw_info",
+ "args": {"instance": instance}})
+ Log.debug(_("network_info: |%s|"), network_info)
+
+ self.driver.inject_network_info(instance, network_info=network_info)
@exception.wrap_exception
def get_console_output(self, context, instance_id):
diff --git a/nova/db/api.py b/nova/db/api.py
index 036caa585..bc146e8f1 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -313,6 +313,7 @@ def migration_get_by_instance_and_status(context, instance_id, status):
return IMPL.migration_get_by_instance_and_status(context, instance_id,
status)
+
####################
@@ -419,6 +420,8 @@ def mac_address_delete(context, mac_address):
def mac_address_delete_by_instance(context, instance_id):
"""delete mac address record in teh database"""
return IMPL.mac_address_delete_by_instance(context, instance_id)
+
+
####################
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index e69a5c680..3b9d95752 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -773,17 +773,20 @@ def mac_address_create(context, values):
mac_address_ref = models.MacAddress()
mac_address_ref.update(values)
mac_address_ref.save()
-
- session = get_session()
- with session.begin():
- instance = instance_get(context, instance_id, session=session)
- network = network_get(context, network_id, session=session)
- mac_address.instance = instance
- mac_address.network = network
- mac_address_ref.save(session=session)
- return mac_address_ref
+# instance_id = values['instance_id']
+# network_id = values['network_id']
+#
+# session = get_session()
+# with session.begin():
+# instance = instance_get(context, instance_id, session=session)
+# network = network_get(context, network_id, session=session)
+# mac_address.instance = instance
+# mac_address.network = network
+# mac_address_ref.save(session=session)
+# return mac_address_ref
+@require_context
def mac_address_get(context, mac_address):
"""gets a mac address from the table
@@ -811,7 +814,7 @@ def mac_address_get_all_by_instance(context, instance_id):
return mac_address_refs
-@require_context
+@require_admin_context
def mac_address_get_all_by_network(context, network_id):
"""gets all mac addresses for instance
@@ -840,7 +843,8 @@ def mac_address_delete(context, mac_address):
@require_context
def mac_address_delete_by_instance(context, instance_id):
- """delete mac address record in teh database
+ """delete mac address records in the database that are associated
+ with the instance given by instance_id
context = request context object
instance_id = instance to remove macs for
@@ -1407,6 +1411,8 @@ def network_get_by_cidr(context, cidr):
@require_admin_context
def network_get_by_instance(_context, instance_id):
+ # note this uses fixed IP to get to instance
+ # only works for networks the instance has an IP from
session = get_session()
rv = session.query(models.Network).\
filter_by(deleted=False).\
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/014_mac_address_table.py b/nova/db/sqlalchemy/migrate_repo/versions/014_mac_address_table.py
index b8b57b284..0c482bd71 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/014_mac_address_table.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/014_mac_address_table.py
@@ -27,7 +27,7 @@ mac_addresses = Table('mac_addresses', meta,
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
- Column('mac_address',
+ Column('address',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False),
unique=True),
@@ -62,7 +62,7 @@ def upgrade(migrate_engine):
s = select([instances.c.id, instances.c.mac_address,
fixed_ips.c.network_id],
fixed_ips.c.instance_id == instances.c.id)
- keys = ['instance_id', 'mac_address', 'network_id']
+ keys = ['instance_id', 'address', 'network_id']
join_list = [dict(zip(keys, row)) for row in s.execute()]
logging.info("join list |%s|", join_list)
@@ -72,3 +72,8 @@ def upgrade(migrate_engine):
# drop the mac_address column from instances
c.drop
+
+
+def downgrade(migrate_engine):
+ logging.error(_("Can't downgrade without losing data"))
+ raise Exception
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 42d8c1512..544070aa9 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -209,6 +209,7 @@ class Instance(BASE, NovaBase):
hostname = Column(String(255))
host = Column(String(255)) # , ForeignKey('hosts.id'))
+ # aka flavor
instance_type = Column(String(255))
user_data = Column(Text)
@@ -516,7 +517,7 @@ class MacAddress(BASE, NovaBase):
"""Represents a mac address used by an instance"""
__tablename__ = 'mac_addresses'
id = Column(Integer, primary_key=True)
- mac_address = Column(String(255), unique=True)
+ address = Column(String(255), unique=True)
network_id = Column(Integer, ForeignKey('networks.id'), nullable=False)
network = relationship(Network, backref=backref('mac_addresses'))
instance_id = Column(Integer, ForeignKey('instances.id'), nullable=False)
diff --git a/nova/network/api.py b/nova/network/api.py
index c1597103f..4ee1148cb 100644
--- a/nova/network/api.py
+++ b/nova/network/api.py
@@ -86,9 +86,3 @@ class API(base.Base):
self.db.queue_get_for(context, FLAGS.network_topic, host),
{"method": "disassociate_floating_ip",
"args": {"floating_address": floating_ip['address']}})
-
- def get_instance_network_info(self, context, instance_id):
- """return the network info for an instance"""
- return rpc.call(context, FLAGS.network_topic,
- {"method": "get_instance_network_info",
- "args": {"instance_id": instance_id}})
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 19c0cfbae..c85bbf218 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -162,42 +162,138 @@ class NetworkManager(manager.SchedulerDependentManager):
self._on_set_network_host(context, network_id)
return host
- def allocate_mac_addresses(self, context, instance_id):
- """generates and stores mac addresses"""
+ def _get_networks_for_instance(self, context, instance):
+ """determine which networks an instance should connect to"""
+ # TODO(tr3buchet) maybe this needs to be updated in the future if
+ # there is a better way to determine which networks
+ # a non-vlan instance should connect to
networks = self.db.network_get_all(context)
+ # return only networks which are not vlan networks
+ return [network for network in networks if network['vlan'] is None]
+
+ def allocate_for_instance(self, context, instance_id, **kwargs):
+ """handles allocating the various network resources for an instance"""
+ LOG.debug(_("network allocations for instance %s"), instance_id,
+ context=context)
+ admin_context = context.elevated()
+ instance = self.db.instance_get(context, instance_id)
+ networks = self._get_networks_for_instance(admin_context, instance)
+ self._allocate_mac_addresses(context, instance, networks)
+ self._allocate_fixed_ips(admin_context, instance, networks, **kwargs)
+ return self.get_instance_nw_info(admin_context, instance)
+
+ def deallocate_for_instance(self, context, instance_id, **kwargs):
+ """handles deallocating various network resources for an instance"""
+ LOG.debug(_("network deallocations for instance %s"), instance_id,
+ context=context)
+ instance = self.db.instance_get(context, instance_id)
+
+ # deallocate mac addresses
+ self.db.mac_address_delete_by_instance(context, instance_id)
+
+ # deallocate fixed ips
+ for fixed_ip in instance.fixed_ips:
+ # disassociate floating ips related to fixed_ip
+ for floating_ip in fixed_ip.floating_ips:
+ network_topic = self.db.queue_get_for(context,
+ FLAGS.network_topic,
+ floating_ip['host'])
+ # NOTE(tr3buchet) from vish, may need to check to make sure
+ # disassocate worked in the future
+ rpc.cast(context
+ network_topic
+ {'method': 'disassociate_floating_ip',
+ 'args': {'floating_address': floating_ip['address']}})
+ # then deallocate fixed_ip
+ self.deallocate_fixed_ip(context, fixed_ip['address'], **kwargs)
+
+ def _allocate_mac_addresses(self, context, instance, networks):
+ """generates and stores mac addresses"""
for network in networks:
- mac_addr_row = {'mac_address': self.generate_mac_address(),
- 'instance_id': instance_id,
- 'network_id': network.id}
+ mac_address = {'address': self.generate_mac_address(),
+ 'instance_id': instance['id'],
+ 'network_id': network['id']}
+ # try 5 times to create a unique mac_address
for i in range(5):
try:
- self.db.mac_address_create(context, mac_addr_row)
- except:
- #TODO(tr3buchet) find the specific exception
- mac_address['mac_address'] = self.generate_mac_address()
+ self.db.mac_address_create(context, mac_address)
+ break
+ except IntegrityError:
+ mac_address['address'] = self.generate_mac_address()
else:
- self.db.mac_address_delete(context, instance_id=instance_id)
+ self.db.mac_address_delete_by_instance(context, instance_id)
raise exception.MacAddress(_("5 attempts at create failed"))
- def allocate_fixed_ips(self, context, instance_id, *args, **kwargs):
- """Gets a fixed ip from a host's pool."""
+ def _allocate_fixed_ips(self, context, instance, networks, **kwargs):
+ """calls allocate_fixed_ip once for each network"""
+ for network in networks:
+ self.allocate_fixed_ip(context, instance, network, **kwargs)
+
+ def allocate_fixed_ip(self, context, instance, network, **kwargs):
+ """Gets a fixed ip from the pool."""
# TODO(vish): when this is called by compute, we can associate compute
# with a network, or a cluster of computes with a network
# and use that network here with a method like
# network_get_by_compute_host
- networks = self.db.network_get_all(context)
-# network_ref = self.db.network_get_by_bridge(context,
-# FLAGS.flat_network_bridge)
- for network in networks:
- address = self.db.fixed_ip_associate_pool(context.elevated(),
- network.id,
- instance_id)
- self.db.fixed_ip_update(context, address, {'allocated': True})
+ address = self.db.fixed_ip_associate_pool(context.elevated(),
+ network['id'],
+ instance['id'])
+ self.db.fixed_ip_update(context, address, {'allocated': True})
+ return address
- def deallocate_fixed_ip(self, context, address, *args, **kwargs):
+ def deallocate_fixed_ip(self, context, address, **kwargs):
"""Returns a fixed ip to the pool."""
self.db.fixed_ip_update(context, address, {'allocated': False})
- self.db.fixed_ip_disassociate(context.elevated(), address)
+ self.db.fixed_ip_disassociate(context, address)
+
+ def get_instance_nw_info(self, context, instance):
+ """creates network info list for instance"""
+ # TODO(tr3buchet) should handle floating IPs as well?
+ fixed_ips = db.fixed_ip_get_all_by_instance(context,
+ instance['id'])
+ mac_addresses = mac_address_get_all_by_instance(context,
+ instance['id'])
+ flavor = db.instance_type_get_by_name(context,
+ instance['instance_type'])
+ network_info = []
+ # a mac_address contains address, instance_id, network_id
+ # it is also joined to the instance and network given by those IDs
+ for mac_address in mac_addresses:
+ network = mac_address['network']
+
+ # determine which of the instance's IPs belong to this network
+ network_IPs = [fixed_ip['address'] for fixed_ip in fixed_ips if
+ fixed_ip['network_id'] == network['id']]
+
+ # TODO(tr3buchet) eventually "enabled" should be determined
+ def ip_dict(ip):
+ return {
+ "ip": ip,
+ "netmask": network["netmask"],
+ "enabled": "1"}
+
+ def ip6_dict():
+ return {
+ "ip": utils.to_global_ipv6(network['cidr_v6'],
+ mac_address['address'])
+ "netmask": network['netmask_v6'],
+ "enabled": "1"}
+
+ info = {
+ 'label': network['label'],
+ 'gateway': network['gateway'],
+ 'gateway6': network['gateway_v6'],
+ 'broadcast': network['broadcast'],
+ 'mac': mac_address['address'],
+ 'rxtx_cap': flavor['rxtx_cap'],
+ 'dns': [network['dns']],
+ 'ips': [ip_dict(ip) for ip in network_IPs]}
+ if network['cidr_v6']:
+ info['ip6s'] = [ip6_dict()]
+ if network['gateway_v6']:
+ info['gateway6'] = network['gateway_v6']
+ network_info.append((mac_address['network'], info))
+ return network_info
def setup_fixed_ip(self, context, address):
"""Sets up rules for fixed ip."""
@@ -306,13 +402,12 @@ class NetworkManager(manager.SchedulerDependentManager):
return host
def create_networks(self, context, cidr, num_networks, network_size,
- cidr_v6, label, *args, **kwargs):
+ cidr_v6, label, **kwargs):
"""Create networks based on parameters."""
fixed_net = IPy.IP(cidr)
fixed_net_v6 = IPy.IP(cidr_v6)
significant_bits_v6 = 64
network_size_v6 = 1 << 64
- count = 1
for index in range(num_networks):
start = index * network_size
start_v6 = index * network_size_v6
@@ -328,10 +423,9 @@ class NetworkManager(manager.SchedulerDependentManager):
net['broadcast'] = str(project_net.broadcast())
net['dhcp_start'] = str(project_net[2])
if num_networks > 1:
- net['label'] = "%s_%d" % (label, count)
+ net['label'] = "%s_%d" % (label, index)
else:
net['label'] = label
- count += 1
if(FLAGS.use_ipv6):
cidr_v6 = "%s/%s" % (fixed_net_v6[start_v6],
@@ -384,12 +478,6 @@ class NetworkManager(manager.SchedulerDependentManager):
return ':'.join(map(lambda x: "%02x" % x, mac))
-# def setup_new_instance(self, context, instance_id, host):
-# """allocate the network config for a new instance on host"""
-# for network in DB_NETWORKGETALLBYHOST():
-# ip = self.allocate_fixed_ip(context, instance_id, network)
-
-
class FlatManager(NetworkManager):
"""Basic network where no vlans are used.
@@ -477,18 +565,17 @@ class FlatDHCPManager(NetworkManager):
self.driver.ensure_bridge(network_ref['bridge'],
FLAGS.flat_interface)
- def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
+ def allocate_fixed_ip(self, context, instance, network, **kwargs):
"""Setup dhcp for this network."""
address = super(FlatDHCPManager, self).allocate_fixed_ip(context,
- instance_id,
- *args,
+ instance,
+ network,
**kwargs)
- network_ref = db.fixed_ip_get_network(context, address)
if not FLAGS.fake_network:
- self.driver.update_dhcp(context, network_ref['id'])
+ self.driver.update_dhcp(context, network['id'])
return address
- def deallocate_fixed_ip(self, context, address, *args, **kwargs):
+ def deallocate_fixed_ip(self, context, address, **kwargs):
"""Returns a fixed ip to the pool."""
self.db.fixed_ip_update(context, address, {'allocated': False})
@@ -528,29 +615,23 @@ class VlanManager(NetworkManager):
super(VlanManager, self).init_host()
self.driver.metadata_forward()
- def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
+ def allocate_fixed_ip(self, context, instance, network, **kwargs):
"""Gets a fixed ip from the pool."""
- # TODO(vish): This should probably be getting project_id from
- # the instance, but it is another trip to the db.
- # Perhaps this method should take an instance_ref.
- ctxt = context.elevated()
- network_ref = self.db.project_get_network(ctxt,
- context.project_id)
if kwargs.get('vpn', None):
- address = network_ref['vpn_private_address']
- self.db.fixed_ip_associate(ctxt,
+ address = network['vpn_private_address']
+ self.db.fixed_ip_associate(context,
address,
- instance_id)
+ instance['id'])
else:
- address = self.db.fixed_ip_associate_pool(ctxt,
- network_ref['id'],
- instance_id)
+ address = self.db.fixed_ip_associate_pool(context,
+ network['id'],
+ instance['id'])
self.db.fixed_ip_update(context, address, {'allocated': True})
if not FLAGS.fake_network:
- self.driver.update_dhcp(context, network_ref['id'])
+ self.driver.update_dhcp(context, network['id'])
return address
- def deallocate_fixed_ip(self, context, address, *args, **kwargs):
+ def deallocate_fixed_ip(self, context, address, **kwargs):
"""Returns a fixed ip to the pool."""
self.db.fixed_ip_update(context, address, {'allocated': False})
@@ -560,8 +641,15 @@ class VlanManager(NetworkManager):
self.driver.ensure_vlan_bridge(network_ref['vlan'],
network_ref['bridge'])
+ def _get_networks_for_instance(self, context, instance):
+ """determine which networks an instance should connect to"""
+ # get network associated with project
+ # TODO(tr3buchet): currently there can be only one, but this should
+ # change. when it does this should be project_get_networks
+ return self.db.project_get_network(context, instance['project_id'])
+
def create_networks(self, context, cidr, num_networks, network_size,
- cidr_v6, vlan_start, vpn_start, **kwargs):
+ cidr_v6, vlan_start, vpn_start, label, **kwargs):
"""Create networks based on parameters."""
# Check that num_networks + vlan_start is not > 4094, fixes lp708025
if num_networks + vlan_start > 4094:
@@ -594,6 +682,11 @@ class VlanManager(NetworkManager):
net['dhcp_start'] = str(project_net[3])
net['vlan'] = vlan
net['bridge'] = 'br%s' % vlan
+ if num_networks > 1:
+ net['label'] = "%s_%d" % (label, index)
+ else:
+ net['label'] = label
+
if(FLAGS.use_ipv6):
cidr_v6 = "%s/%s" % (fixed_net_v6[start_v6],
significant_bits_v6)
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 0235e2dc4..89860c5e2 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -807,7 +807,6 @@ class VMOps(object):
"ip": utils.to_global_ipv6(network['cidr_v6'],
instance['mac_address']),
"netmask": network['netmask_v6'],
- "gateway": network['gateway_v6'],
"enabled": "1"}
info = {
@@ -820,10 +819,12 @@ class VMOps(object):
'ips': [ip_dict(ip) for ip in network_IPs]}
if network['cidr_v6']:
info['ip6s'] = [ip6_dict(ip) for ip in network_IPs]
+ if network['gateway_v6']:
+ info['gateway6'] = network['gateway_v6']
network_info.append((network, info))
return network_info
- def inject_network_info(self, instance, vm_ref, network_info):
+ def inject_network_info(self, instance, vm_ref=None, network_info):
"""
Generate the network info and make calls to place it into the
xenstore and the xenstore param list
@@ -831,7 +832,10 @@ class VMOps(object):
logging.debug(_("injecting network info to xs for vm: |%s|"), vm_ref)
# this function raises if vm_ref is not a vm_opaque_ref
- self._session.get_xenapi().VM.get_record(vm_ref)
+ if vm_ref:
+ self._session.get_xenapi().VM.get_record(vm_ref)
+ else:
+ vm_ref = VMHelper.lookup(self._session, instance.name)
for (network, info) in network_info:
location = 'vm-data/networking/%s' % info['mac'].replace(':', '')