summaryrefslogtreecommitdiffstats
path: root/nova/virt
diff options
context:
space:
mode:
Diffstat (limited to 'nova/virt')
-rw-r--r--nova/virt/baremetal/base.py2
-rw-r--r--nova/virt/baremetal/db/api.py36
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/api.py108
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/008_remove_bm_pxe_ips_table.py61
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/models.py9
-rwxr-xr-xnova/virt/baremetal/driver.py9
-rw-r--r--nova/virt/baremetal/fake.py2
-rw-r--r--nova/virt/baremetal/pxe.py58
-rw-r--r--nova/virt/baremetal/pxe_config.template2
-rwxr-xr-xnova/virt/baremetal/tilera.py32
-rwxr-xr-xnova/virt/baremetal/tilera_pdu.py2
-rw-r--r--nova/virt/baremetal/utils.py29
-rw-r--r--nova/virt/baremetal/virtual_power_driver.py2
-rw-r--r--nova/virt/baremetal/volume_driver.py11
-rw-r--r--nova/virt/configdrive.py4
-rw-r--r--nova/virt/disk/vfs/localfs.py8
-rwxr-xr-xnova/virt/driver.py3
-rwxr-xr-xnova/virt/fake.py2
-rwxr-xr-xnova/virt/hyperv/driver.py3
-rw-r--r--nova/virt/hyperv/vmutils.py2
-rwxr-xr-xnova/virt/images.py5
-rw-r--r--nova/virt/libvirt/blockinfo.py2
-rwxr-xr-xnova/virt/libvirt/driver.py357
-rw-r--r--nova/virt/libvirt/firewall.py9
-rw-r--r--nova/virt/libvirt/vif.py152
-rwxr-xr-xnova/virt/powervm/driver.py3
-rw-r--r--nova/virt/powervm/lpar.py2
-rw-r--r--nova/virt/powervm/operator.py4
-rwxr-xr-xnova/virt/vmwareapi/driver.py32
-rw-r--r--nova/virt/vmwareapi/fake.py123
-rw-r--r--nova/virt/vmwareapi/vm_util.py110
-rw-r--r--nova/virt/vmwareapi/vmops.py36
-rw-r--r--nova/virt/xenapi/agent.py174
-rwxr-xr-xnova/virt/xenapi/driver.py3
-rw-r--r--nova/virt/xenapi/fake.py9
-rw-r--r--nova/virt/xenapi/vm_utils.py32
-rw-r--r--nova/virt/xenapi/volume_utils.py12
37 files changed, 895 insertions, 555 deletions
diff --git a/nova/virt/baremetal/base.py b/nova/virt/baremetal/base.py
index 2029400ba..876c70b23 100644
--- a/nova/virt/baremetal/base.py
+++ b/nova/virt/baremetal/base.py
@@ -30,7 +30,7 @@ class NodeDriver(object):
def destroy_images(self, context, node, instance):
raise NotImplementedError()
- def activate_bootloader(self, context, node, instance):
+ def activate_bootloader(self, context, node, instance, **kwargs):
raise NotImplementedError()
def deactivate_bootloader(self, context, node, instance):
diff --git a/nova/virt/baremetal/db/api.py b/nova/virt/baremetal/db/api.py
index 91edc05d9..3943b7902 100644
--- a/nova/virt/baremetal/db/api.py
+++ b/nova/virt/baremetal/db/api.py
@@ -121,42 +121,6 @@ def bm_node_associate_and_update(context, node_uuid, values):
return IMPL.bm_node_associate_and_update(context, node_uuid, values)
-def bm_pxe_ip_create(context, address, server_address):
- return IMPL.bm_pxe_ip_create(context, address, server_address)
-
-
-def bm_pxe_ip_create_direct(context, bm_pxe_ip):
- return IMPL.bm_pxe_ip_create_direct(context, bm_pxe_ip)
-
-
-def bm_pxe_ip_destroy(context, ip_id):
- return IMPL.bm_pxe_ip_destroy(context, ip_id)
-
-
-def bm_pxe_ip_destroy_by_address(context, address):
- return IMPL.bm_pxe_ip_destroy_by_address(context, address)
-
-
-def bm_pxe_ip_get_all(context):
- return IMPL.bm_pxe_ip_get_all(context)
-
-
-def bm_pxe_ip_get(context, ip_id):
- return IMPL.bm_pxe_ip_get(context, ip_id)
-
-
-def bm_pxe_ip_get_by_bm_node_id(context, bm_node_id):
- return IMPL.bm_pxe_ip_get_by_bm_node_id(context, bm_node_id)
-
-
-def bm_pxe_ip_associate(context, bm_node_id):
- return IMPL.bm_pxe_ip_associate(context, bm_node_id)
-
-
-def bm_pxe_ip_disassociate(context, bm_node_id):
- return IMPL.bm_pxe_ip_disassociate(context, bm_node_id)
-
-
def bm_interface_get(context, if_id):
return IMPL.bm_interface_get(context, if_id)
diff --git a/nova/virt/baremetal/db/sqlalchemy/api.py b/nova/virt/baremetal/db/sqlalchemy/api.py
index 88d44e3d3..3c140556e 100644
--- a/nova/virt/baremetal/db/sqlalchemy/api.py
+++ b/nova/virt/baremetal/db/sqlalchemy/api.py
@@ -236,111 +236,6 @@ def bm_node_destroy(context, bm_node_id):
@sqlalchemy_api.require_admin_context
-def bm_pxe_ip_get_all(context):
- query = model_query(context, models.BareMetalPxeIp, read_deleted="no")
- return query.all()
-
-
-@sqlalchemy_api.require_admin_context
-def bm_pxe_ip_create(context, address, server_address):
- ref = models.BareMetalPxeIp()
- ref.address = address
- ref.server_address = server_address
- _save(ref)
- return ref
-
-
-@sqlalchemy_api.require_admin_context
-def bm_pxe_ip_create_direct(context, bm_pxe_ip):
- ref = bm_pxe_ip_create(context,
- address=bm_pxe_ip['address'],
- server_address=bm_pxe_ip['server_address'])
- return ref
-
-
-@sqlalchemy_api.require_admin_context
-def bm_pxe_ip_destroy(context, ip_id):
- # Delete physically since it has unique columns
- model_query(context, models.BareMetalPxeIp, read_deleted="no").\
- filter_by(id=ip_id).\
- delete()
-
-
-@sqlalchemy_api.require_admin_context
-def bm_pxe_ip_destroy_by_address(context, address):
- # Delete physically since it has unique columns
- model_query(context, models.BareMetalPxeIp, read_deleted="no").\
- filter_by(address=address).\
- delete()
-
-
-@sqlalchemy_api.require_admin_context
-def bm_pxe_ip_get(context, ip_id):
- result = model_query(context, models.BareMetalPxeIp, read_deleted="no").\
- filter_by(id=ip_id).\
- first()
-
- return result
-
-
-@sqlalchemy_api.require_admin_context
-def bm_pxe_ip_get_by_bm_node_id(context, bm_node_id):
- result = model_query(context, models.BareMetalPxeIp, read_deleted="no").\
- filter_by(bm_node_id=bm_node_id).\
- first()
-
- if not result:
- raise exception.NodeNotFound(node_id=bm_node_id)
-
- return result
-
-
-@sqlalchemy_api.require_admin_context
-def bm_pxe_ip_associate(context, bm_node_id):
- session = db_session.get_session()
- with session.begin():
- # Check if the node really exists
- node_ref = model_query(context, models.BareMetalNode,
- read_deleted="no", session=session).\
- filter_by(id=bm_node_id).\
- first()
- if not node_ref:
- raise exception.NodeNotFound(node_id=bm_node_id)
-
- # Check if the node already has a pxe_ip
- ip_ref = model_query(context, models.BareMetalPxeIp,
- read_deleted="no", session=session).\
- filter_by(bm_node_id=bm_node_id).\
- first()
- if ip_ref:
- return ip_ref.id
-
- # with_lockmode('update') and filter_by(bm_node_id=None) will lock all
- # records. It may cause a performance problem in high-concurrency
- # environment.
- ip_ref = model_query(context, models.BareMetalPxeIp,
- read_deleted="no", session=session).\
- filter_by(bm_node_id=None).\
- with_lockmode('update').\
- first()
-
- # this exception is not caught in nova/compute/manager
- if not ip_ref:
- raise exception.NovaException(_("No more PXE IPs available"))
-
- ip_ref.bm_node_id = bm_node_id
- session.add(ip_ref)
- return ip_ref.id
-
-
-@sqlalchemy_api.require_admin_context
-def bm_pxe_ip_disassociate(context, bm_node_id):
- model_query(context, models.BareMetalPxeIp, read_deleted="no").\
- filter_by(bm_node_id=bm_node_id).\
- update({'bm_node_id': None})
-
-
-@sqlalchemy_api.require_admin_context
def bm_interface_get(context, if_id):
result = model_query(context, models.BareMetalInterface,
read_deleted="no").\
@@ -402,8 +297,7 @@ def bm_interface_set_vif_uuid(context, if_id, vif_uuid):
if str(e).find('IntegrityError') != -1:
raise exception.NovaException(_("Baremetal interface %s "
"already in use") % vif_uuid)
- else:
- raise e
+ raise
@sqlalchemy_api.require_admin_context
diff --git a/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/008_remove_bm_pxe_ips_table.py b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/008_remove_bm_pxe_ips_table.py
new file mode 100644
index 000000000..22b45c480
--- /dev/null
+++ b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/008_remove_bm_pxe_ips_table.py
@@ -0,0 +1,61 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 Mirantis Inc.
+# All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Boolean
+from sqlalchemy import Column
+from sqlalchemy import DateTime
+from sqlalchemy import Index
+from sqlalchemy import Integer
+from sqlalchemy import MetaData
+from sqlalchemy import String
+from sqlalchemy import Table
+
+
+table_name = 'bm_pxe_ips'
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ table = Table(table_name, meta, autoload=True)
+ table.drop()
+
+
+def downgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ bm_pxe_ips = Table(table_name, meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('address', String(length=255), unique=True),
+ Column('bm_node_id', Integer),
+ Column('server_address',
+ String(length=255), unique=True),
+ mysql_engine='InnoDB',
+ )
+ bm_pxe_ips.create()
+
+ Index(
+ 'idx_bm_pxe_ips_bm_node_id_deleted',
+ bm_pxe_ips.c.bm_node_id,
+ bm_pxe_ips.c.deleted
+ ).create(migrate_engine)
diff --git a/nova/virt/baremetal/db/sqlalchemy/models.py b/nova/virt/baremetal/db/sqlalchemy/models.py
index dbc9386ec..e21999634 100644
--- a/nova/virt/baremetal/db/sqlalchemy/models.py
+++ b/nova/virt/baremetal/db/sqlalchemy/models.py
@@ -54,15 +54,6 @@ class BareMetalNode(BASE, models.NovaBase):
swap_mb = Column(Integer)
-class BareMetalPxeIp(BASE, models.NovaBase):
- __tablename__ = 'bm_pxe_ips'
- id = Column(Integer, primary_key=True)
- deleted = Column(Boolean, default=False)
- address = Column(String(255), unique=True)
- server_address = Column(String(255), unique=True)
- bm_node_id = Column(Integer, ForeignKey('bm_nodes.id'), nullable=True)
-
-
class BareMetalInterface(BASE, models.NovaBase):
__tablename__ = 'bm_interfaces'
id = Column(Integer, primary_key=True)
diff --git a/nova/virt/baremetal/driver.py b/nova/virt/baremetal/driver.py
index 376921360..703cc0980 100755
--- a/nova/virt/baremetal/driver.py
+++ b/nova/virt/baremetal/driver.py
@@ -248,8 +248,10 @@ class BareMetalDriver(driver.ComputeDriver):
injected_files=injected_files,
network_info=network_info,
)
- self.driver.activate_bootloader(context, node, instance)
- self.power_on(instance, node)
+ self.driver.activate_bootloader(context, node, instance,
+ network_info=network_info)
+ self.power_on(context, instance, network_info, block_device_info,
+ node)
self.driver.activate_node(context, node, instance)
_update_state(context, node, instance, baremetal_states.ACTIVE)
except Exception:
@@ -330,7 +332,8 @@ class BareMetalDriver(driver.ComputeDriver):
"for instance %r") % instance['uuid'])
pm.stop_console()
- def power_on(self, instance, node=None):
+ def power_on(self, context, instance, network_info, block_device_info=None,
+ node=None):
"""Power on the specified instance."""
if not node:
node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
diff --git a/nova/virt/baremetal/fake.py b/nova/virt/baremetal/fake.py
index b3f39fdc3..76586ab74 100644
--- a/nova/virt/baremetal/fake.py
+++ b/nova/virt/baremetal/fake.py
@@ -28,7 +28,7 @@ class FakeDriver(base.NodeDriver):
def destroy_images(self, context, node, instance):
pass
- def activate_bootloader(self, context, node, instance):
+ def activate_bootloader(self, context, node, instance, **kwargs):
pass
def deactivate_bootloader(self, context, node, instance):
diff --git a/nova/virt/baremetal/pxe.py b/nova/virt/baremetal/pxe.py
index 6a5a5ece5..21fc2ce47 100644
--- a/nova/virt/baremetal/pxe.py
+++ b/nova/virt/baremetal/pxe.py
@@ -54,6 +54,10 @@ pxe_opts = [
cfg.IntOpt('pxe_deploy_timeout',
help='Timeout for PXE deployments. Default: 0 (unlimited)',
default=0),
+ cfg.BoolOpt('pxe_network_config',
+ help='If set, pass the network configuration details to the '
+ 'initramfs via cmdline.',
+ default=False),
]
LOG = logging.getLogger(__name__)
@@ -77,9 +81,22 @@ def _get_cheetah():
return CHEETAH
+def build_pxe_network_config(network_info):
+ interfaces = bm_utils.map_network_interfaces(network_info, CONF.use_ipv6)
+ template = None
+ if not CONF.use_ipv6:
+ template = "ip=%(address)s::%(gateway)s:%(netmask)s::%(name)s:off"
+ else:
+ template = ("ip=[%(address_v6)s]::[%(gateway_v6)s]:"
+ "[%(netmask_v6)s]::%(name)s:off")
+
+ net_config = [template % iface for iface in interfaces]
+ return ' '.join(net_config)
+
+
def build_pxe_config(deployment_id, deployment_key, deployment_iscsi_iqn,
deployment_aki_path, deployment_ari_path,
- aki_path, ari_path):
+ aki_path, ari_path, network_info):
"""Build the PXE config file for a node
This method builds the PXE boot configuration file for a node,
@@ -90,6 +107,11 @@ def build_pxe_config(deployment_id, deployment_key, deployment_iscsi_iqn,
"""
LOG.debug(_("Building PXE config for deployment %s.") % deployment_id)
+
+ network_config = None
+ if network_info and CONF.baremetal.pxe_network_config:
+ network_config = build_pxe_network_config(network_info)
+
pxe_options = {
'deployment_id': deployment_id,
'deployment_key': deployment_key,
@@ -99,6 +121,7 @@ def build_pxe_config(deployment_id, deployment_key, deployment_iscsi_iqn,
'aki_path': aki_path,
'ari_path': ari_path,
'pxe_append_params': CONF.baremetal.pxe_append_params,
+ 'pxe_network_config': network_config,
}
cheetah = _get_cheetah()
pxe_config = str(cheetah(
@@ -110,33 +133,7 @@ def build_pxe_config(deployment_id, deployment_key, deployment_iscsi_iqn,
def build_network_config(network_info):
- # TODO(deva): fix assumption that device names begin with "eth"
- # and fix assumption about ordering
- try:
- assert isinstance(network_info, list)
- except AssertionError:
- network_info = [network_info]
- interfaces = []
- for id, (network, mapping) in enumerate(network_info):
- address_v6 = None
- gateway_v6 = None
- netmask_v6 = None
- if CONF.use_ipv6:
- address_v6 = mapping['ip6s'][0]['ip']
- netmask_v6 = mapping['ip6s'][0]['netmask']
- gateway_v6 = mapping['gateway_v6']
- interface = {
- 'name': 'eth%d' % id,
- 'address': mapping['ips'][0]['ip'],
- 'gateway': mapping['gateway'],
- 'netmask': mapping['ips'][0]['netmask'],
- 'dns': ' '.join(mapping['dns']),
- 'address_v6': address_v6,
- 'gateway_v6': gateway_v6,
- 'netmask_v6': netmask_v6,
- }
- interfaces.append(interface)
-
+ interfaces = bm_utils.map_network_interfaces(network_info, CONF.use_ipv6)
cheetah = _get_cheetah()
network_config = str(cheetah(
open(CONF.baremetal.net_config_template).read(),
@@ -216,7 +213,7 @@ def get_tftp_image_info(instance, instance_type):
image_info['ramdisk'][0] = str(instance['ramdisk_id'])
image_info['deploy_kernel'][0] = get_deploy_aki_id(instance_type)
image_info['deploy_ramdisk'][0] = get_deploy_ari_id(instance_type)
- except KeyError as e:
+ except KeyError:
pass
missing_labels = []
@@ -354,7 +351,7 @@ class PXE(base.NodeDriver):
bm_utils.unlink_without_raise(get_image_file_path(instance))
bm_utils.rmtree_without_raise(get_image_dir_path(instance))
- def activate_bootloader(self, context, node, instance):
+ def activate_bootloader(self, context, node, instance, network_info):
"""Configure PXE boot loader for an instance
Kernel and ramdisk images are downloaded by cache_tftp_images,
@@ -398,6 +395,7 @@ class PXE(base.NodeDriver):
image_info['deploy_ramdisk'][1],
image_info['kernel'][1],
image_info['ramdisk'][1],
+ network_info,
)
bm_utils.write_to_file(pxe_config_file_path, pxe_config)
diff --git a/nova/virt/baremetal/pxe_config.template b/nova/virt/baremetal/pxe_config.template
index f2fcc9b14..54dd98baf 100644
--- a/nova/virt/baremetal/pxe_config.template
+++ b/nova/virt/baremetal/pxe_config.template
@@ -8,4 +8,4 @@ ipappend 3
label boot
kernel ${pxe_options.aki_path}
-append initrd=${pxe_options.ari_path} root=${ROOT} ro ${pxe_options.pxe_append_params}
+append initrd=${pxe_options.ari_path} root=${ROOT} ro ${pxe_options.pxe_append_params} ${pxe_options.pxe_network_config}
diff --git a/nova/virt/baremetal/tilera.py b/nova/virt/baremetal/tilera.py
index 64335298c..36127bfa2 100755
--- a/nova/virt/baremetal/tilera.py
+++ b/nova/virt/baremetal/tilera.py
@@ -55,31 +55,7 @@ def _get_cheetah():
def build_network_config(network_info):
- try:
- assert isinstance(network_info, list)
- except AssertionError:
- network_info = [network_info]
- interfaces = []
- for id, (network, mapping) in enumerate(network_info):
- address_v6 = None
- gateway_v6 = None
- netmask_v6 = None
- if CONF.use_ipv6:
- address_v6 = mapping['ip6s'][0]['ip']
- netmask_v6 = mapping['ip6s'][0]['netmask']
- gateway_v6 = mapping['gateway_v6']
- interface = {
- 'name': 'eth%d' % id,
- 'address': mapping['ips'][0]['ip'],
- 'gateway': mapping['gateway'],
- 'netmask': mapping['ips'][0]['netmask'],
- 'dns': ' '.join(mapping['dns']),
- 'address_v6': address_v6,
- 'gateway_v6': gateway_v6,
- 'netmask_v6': netmask_v6,
- }
- interfaces.append(interface)
-
+ interfaces = bm_utils.map_network_interfaces(network_info, CONF.use_ipv6)
cheetah = _get_cheetah()
network_config = str(cheetah(
open(CONF.baremetal.net_config_template).read(),
@@ -130,7 +106,7 @@ def get_tftp_image_info(instance):
}
try:
image_info['kernel'][0] = str(instance['kernel_id'])
- except KeyError as e:
+ except KeyError:
pass
missing_labels = []
@@ -262,7 +238,7 @@ class Tilera(base.NodeDriver):
bm_utils.unlink_without_raise(get_image_file_path(instance))
bm_utils.rmtree_without_raise(get_image_dir_path(instance))
- def activate_bootloader(self, context, node, instance):
+ def activate_bootloader(self, context, node, instance, network_info):
"""Configure Tilera boot loader for an instance
Kernel and ramdisk images are downloaded by cache_tftp_images,
@@ -371,7 +347,7 @@ class Tilera(base.NodeDriver):
user_data = instance['user_data']
try:
self._iptables_set(node_ip, user_data)
- except Exception as ex:
+ except Exception:
self.deactivate_bootloader(context, node, instance)
raise exception.NovaException(_("Node is "
"unknown error state."))
diff --git a/nova/virt/baremetal/tilera_pdu.py b/nova/virt/baremetal/tilera_pdu.py
index 0e491168f..90f9287e4 100755
--- a/nova/virt/baremetal/tilera_pdu.py
+++ b/nova/virt/baremetal/tilera_pdu.py
@@ -109,7 +109,7 @@ class Pdu(base.PowerManager):
return CONF.baremetal.tile_pdu_off
else:
try:
- out = utils.execute(CONF.baremetal.tile_pdu_mgr,
+ utils.execute(CONF.baremetal.tile_pdu_mgr,
CONF.baremetal.tile_pdu_ip, mode)
time.sleep(CONF.baremetal.tile_power_wait)
return mode
diff --git a/nova/virt/baremetal/utils.py b/nova/virt/baremetal/utils.py
index b18bfac85..96abcd41b 100644
--- a/nova/virt/baremetal/utils.py
+++ b/nova/virt/baremetal/utils.py
@@ -81,3 +81,32 @@ def random_alnum(count):
import string
chars = string.ascii_uppercase + string.digits
return "".join(random.choice(chars) for _ in range(count))
+
+
+def map_network_interfaces(network_info, use_ipv6=False):
+ # TODO(deva): fix assumption that device names begin with "eth"
+ # and fix assumption about ordering
+ if not isinstance(network_info, list):
+ network_info = [network_info]
+
+ interfaces = []
+ for id, (network, mapping) in enumerate(network_info):
+ address_v6 = None
+ gateway_v6 = None
+ netmask_v6 = None
+ if use_ipv6:
+ address_v6 = mapping['ip6s'][0]['ip']
+ netmask_v6 = mapping['ip6s'][0]['netmask']
+ gateway_v6 = mapping['gateway_v6']
+ interface = {
+ 'name': 'eth%d' % id,
+ 'address': mapping['ips'][0]['ip'],
+ 'gateway': mapping['gateway'],
+ 'netmask': mapping['ips'][0]['netmask'],
+ 'dns': ' '.join(mapping['dns']),
+ 'address_v6': address_v6,
+ 'gateway_v6': gateway_v6,
+ 'netmask_v6': netmask_v6,
+ }
+ interfaces.append(interface)
+ return interfaces
diff --git a/nova/virt/baremetal/virtual_power_driver.py b/nova/virt/baremetal/virtual_power_driver.py
index e110bf436..303e5a009 100644
--- a/nova/virt/baremetal/virtual_power_driver.py
+++ b/nova/virt/baremetal/virtual_power_driver.py
@@ -59,7 +59,6 @@ CONF.register_group(baremetal_vp)
CONF.register_opts(opts, baremetal_vp)
_conn = None
-_virtual_power_settings = None
_vp_cmd = None
_cmds = None
@@ -84,7 +83,6 @@ class VirtualPowerManager(base.PowerManager):
"""
def __init__(self, **kwargs):
global _conn
- global _virtual_power_settings
global _cmds
if _cmds is None:
diff --git a/nova/virt/baremetal/volume_driver.py b/nova/virt/baremetal/volume_driver.py
index 6cf6b775f..f634fa76a 100644
--- a/nova/virt/baremetal/volume_driver.py
+++ b/nova/virt/baremetal/volume_driver.py
@@ -21,6 +21,7 @@ import re
from oslo.config import cfg
from nova import context as nova_context
+from nova.db import api as nova_db_api
from nova import exception
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
@@ -219,10 +220,9 @@ class LibvirtVolumeDriver(VolumeDriver):
return method(connection_info, *args, **kwargs)
def attach_volume(self, connection_info, instance, mountpoint):
- node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
ctx = nova_context.get_admin_context()
- pxe_ip = bmdb.bm_pxe_ip_get_by_bm_node_id(ctx, node['id'])
- if not pxe_ip:
+ fixed_ips = nova_db_api.fixed_ip_get_by_instance(ctx, instance['uuid'])
+ if not fixed_ips:
if not CONF.baremetal.use_unsafe_iscsi:
raise exception.NovaException(_(
'No fixed PXE IP is associated to %s') % instance['uuid'])
@@ -236,8 +236,9 @@ class LibvirtVolumeDriver(VolumeDriver):
tid = _get_next_tid()
_create_iscsi_export_tgtadm(device_path, tid, iqn)
- if pxe_ip:
- _allow_iscsi_tgtadm(tid, pxe_ip['address'])
+ if fixed_ips:
+ for ip in fixed_ips:
+ _allow_iscsi_tgtadm(tid, ip['address'])
else:
# NOTE(NTTdocomo): Since nova-compute does not know the
# instance's initiator ip, it allows any initiators
diff --git a/nova/virt/configdrive.py b/nova/virt/configdrive.py
index 173dd457b..d4038457f 100644
--- a/nova/virt/configdrive.py
+++ b/nova/virt/configdrive.py
@@ -179,7 +179,3 @@ class ConfigDriveBuilder(object):
def required_by(instance):
return instance.get('config_drive') or CONF.force_config_drive
-
-
-def enabled_for(instance):
- return required_by(instance) or instance.get('config_drive_id')
diff --git a/nova/virt/disk/vfs/localfs.py b/nova/virt/disk/vfs/localfs.py
index 10b9a1aa8..735481340 100644
--- a/nova/virt/disk/vfs/localfs.py
+++ b/nova/virt/disk/vfs/localfs.py
@@ -18,6 +18,7 @@ import os
import tempfile
from nova import exception
+from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.disk.mount import loop
@@ -77,10 +78,9 @@ class VFSLocalFS(vfs.VFS):
raise exception.NovaException(mount.error)
self.mount = mount
except Exception as e:
- LOG.debug(_("Failed to mount image %(ex)s)") %
- {'ex': str(e)})
- self.teardown()
- raise e
+ with excutils.save_and_reraise_exception():
+ LOG.debug(_("Failed to mount image %(ex)s)"), {'ex': str(e)})
+ self.teardown()
def teardown(self):
try:
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index fbc2a5640..7b2185f52 100755
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -425,7 +425,8 @@ class ComputeDriver(object):
"""Power off the specified instance."""
raise NotImplementedError()
- def power_on(self, instance):
+ def power_on(self, context, instance, network_info,
+ block_device_info=None):
"""Power on the specified instance."""
raise NotImplementedError()
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 801c8e827..1d8943101 100755
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -182,7 +182,7 @@ class FakeDriver(driver.ComputeDriver):
def power_off(self, instance):
pass
- def power_on(self, instance):
+ def power_on(self, context, instance, network_info, block_device_info):
pass
def soft_delete(self, instance):
diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py
index 675f36f54..e0f533db6 100755
--- a/nova/virt/hyperv/driver.py
+++ b/nova/virt/hyperv/driver.py
@@ -103,7 +103,8 @@ class HyperVDriver(driver.ComputeDriver):
def power_off(self, instance):
self._vmops.power_off(instance)
- def power_on(self, instance):
+ def power_on(self, context, instance, network_info,
+ block_device_info=None):
self._vmops.power_on(instance)
def live_migration(self, context, instance_ref, dest, post_method,
diff --git a/nova/virt/hyperv/vmutils.py b/nova/virt/hyperv/vmutils.py
index 680ec2d61..2cc40a1de 100644
--- a/nova/virt/hyperv/vmutils.py
+++ b/nova/virt/hyperv/vmutils.py
@@ -335,7 +335,7 @@ class VMUtils(object):
def get_vm_storage_paths(self, vm_name):
vm = self._lookup_vm_check(vm_name)
- vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
+ self._conn.Msvm_VirtualSystemManagementService()
vmsettings = vm.associators(
wmi_result_class='Msvm_VirtualSystemSettingData')
rasds = vmsettings[0].associators(
diff --git a/nova/virt/images.py b/nova/virt/images.py
index b752b7a64..e1b42eafc 100755
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -30,6 +30,7 @@ from nova import exception
from nova.image import glance
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
+from nova.openstack.common import strutils
from nova import utils
LOG = logging.getLogger(__name__)
@@ -90,8 +91,8 @@ class QemuImgInfo(object):
if real_size:
details = real_size.group(1)
try:
- details = utils.to_bytes(details)
- except (TypeError, ValueError):
+ details = strutils.to_bytes(details)
+ except TypeError:
pass
return details
diff --git a/nova/virt/libvirt/blockinfo.py b/nova/virt/libvirt/blockinfo.py
index aabcef964..55bf4fcd1 100644
--- a/nova/virt/libvirt/blockinfo.py
+++ b/nova/virt/libvirt/blockinfo.py
@@ -411,7 +411,7 @@ def get_disk_mapping(virt_type, instance,
'dev': disk_dev,
'type': 'disk'}
- if configdrive.enabled_for(instance):
+ if configdrive.required_by(instance):
config_info = get_next_disk_info(mapping,
disk_bus,
last_device=True)
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 6cfa2c7c7..f20a27900 100755
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -291,7 +291,7 @@ MIN_LIBVIRT_LIVESNAPSHOT_VERSION = (1, 0, 0)
MIN_QEMU_LIVESNAPSHOT_VERSION = (1, 3, 0)
-def libvirt_error_handler(ctxt, err):
+def libvirt_error_handler(context, err):
# Just ignore instead of default outputting to stderr.
pass
@@ -351,8 +351,9 @@ class LibvirtDriver(driver.ComputeDriver):
for mode_str in CONF.disk_cachemodes:
disk_type, sep, cache_mode = mode_str.partition('=')
if cache_mode not in self.valid_cachemodes:
- LOG.warn(_("Invalid cachemode %(cache_mode)s specified "
- "for disk type %(disk_type)s.") % locals())
+ LOG.warn(_('Invalid cachemode %(cache_mode)s specified '
+ 'for disk type %(disk_type)s.'),
+ {'cache_mode': cache_mode, 'disk_type': disk_type})
continue
self.disk_cachemodes[disk_type] = cache_mode
@@ -554,19 +555,19 @@ class LibvirtDriver(driver.ComputeDriver):
event_thread.start()
LOG.debug("Starting green dispatch thread")
- dispatch_thread = eventlet.spawn(self._dispatch_thread)
+ eventlet.spawn(self._dispatch_thread)
def init_host(self, host):
- libvirt.registerErrorHandler(libvirt_error_handler, None)
- libvirt.virEventRegisterDefaultImpl()
-
if not self.has_min_version(MIN_LIBVIRT_VERSION):
major = MIN_LIBVIRT_VERSION[0]
minor = MIN_LIBVIRT_VERSION[1]
micro = MIN_LIBVIRT_VERSION[2]
LOG.error(_('Nova requires libvirt version '
- '%(major)i.%(minor)i.%(micro)i or greater.') %
- locals())
+ '%(major)i.%(minor)i.%(micro)i or greater.'),
+ {'major': major, 'minor': minor, 'micro': micro})
+
+ libvirt.registerErrorHandler(libvirt_error_handler, None)
+ libvirt.virEventRegisterDefaultImpl()
self._init_events()
@@ -592,7 +593,7 @@ class LibvirtDriver(driver.ComputeDriver):
libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
self._event_lifecycle_callback,
self)
- except Exception as e:
+ except Exception:
LOG.warn(_("URI %s does not support events"),
self.uri())
@@ -778,9 +779,10 @@ class LibvirtDriver(driver.ComputeDriver):
if not is_okay:
with excutils.save_and_reraise_exception():
- LOG.error(_("Error from libvirt during destroy. "
- "Code=%(errcode)s Error=%(e)s") %
- locals(), instance=instance)
+ LOG.error(_('Error from libvirt during destroy. '
+ 'Code=%(errcode)s Error=%(e)s'),
+ {'errcode': errcode, 'e': e},
+ instance=instance)
def _wait_for_destroy(expected_domid):
"""Called at an interval until the VM is gone."""
@@ -853,9 +855,9 @@ class LibvirtDriver(driver.ComputeDriver):
except libvirt.libvirtError as e:
with excutils.save_and_reraise_exception():
errcode = e.get_error_code()
- LOG.error(_("Error from libvirt during undefine. "
- "Code=%(errcode)s Error=%(e)s") %
- locals(), instance=instance)
+ LOG.error(_('Error from libvirt during undefine. '
+ 'Code=%(errcode)s Error=%(e)s') %
+ {'errcode': errcode, 'e': e}, instance=instance)
def _cleanup(self, instance, network_info, block_device_info,
destroy_disks):
@@ -879,9 +881,10 @@ class LibvirtDriver(driver.ComputeDriver):
else:
retry = False
errcode = e.get_error_code()
- LOG.error(_("Error from libvirt during unfilter. "
- "Code=%(errcode)s Error=%(e)s") %
- locals(), instance=instance)
+ LOG.error(_('Error from libvirt during unfilter. '
+ 'Code=%(errcode)s Error=%(e)s') %
+ {'errcode': errcode, 'e': e},
+ instance=instance)
reason = "Error unfiltering instance."
raise exception.InstanceTerminationFailure(reason=reason)
except Exception:
@@ -907,7 +910,7 @@ class LibvirtDriver(driver.ComputeDriver):
if destroy_disks:
target = libvirt_utils.get_instance_path(instance)
- LOG.info(_('Deleting instance files %(target)s') % locals(),
+ LOG.info(_('Deleting instance files %s'), target,
instance=instance)
if os.path.exists(target):
# If we fail to get rid of the directory
@@ -916,8 +919,8 @@ class LibvirtDriver(driver.ComputeDriver):
try:
shutil.rmtree(target)
except OSError as e:
- LOG.error(_("Failed to cleanup directory %(target)s: %(e)s"
- ) % locals())
+ LOG.error(_('Failed to cleanup directory %(target)s: '
+ '%(e)s'), {'target': target, 'e': e})
#NOTE(bfilippov): destroy all LVM disks for this instance
self._cleanup_lvm(instance)
@@ -1113,7 +1116,7 @@ class LibvirtDriver(driver.ComputeDriver):
if state == power_state.RUNNING:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.attachDeviceFlags(cfg.to_xml(), flags)
- except libvirt.libvirtError as ex:
+ except libvirt.libvirtError:
LOG.error(_('attaching network adapter failed.'),
instance=instance)
self.vif_driver.unplug(instance, (network, mapping))
@@ -1205,6 +1208,18 @@ class LibvirtDriver(driver.ComputeDriver):
REQ_HYPERVISOR_LIVESNAPSHOT) \
and not source_format == "lvm":
live_snapshot = True
+ # Abort is an idempotent operation, so make sure any block
+ # jobs which may have failed are ended. This operation also
+ # confims the running instance, as opposed to the system as a
+ # whole, has a new enough version of the hypervisor (bug 1193146).
+ try:
+ virt_dom.blockJobAbort(disk_path, 0)
+ except libvirt.libvirtError as ex:
+ error_code = ex.get_error_code()
+ if error_code == libvirt.VIR_ERR_CONFIG_UNSUPPORTED:
+ live_snapshot = False
+ else:
+ pass
else:
live_snapshot = False
@@ -1276,13 +1291,6 @@ class LibvirtDriver(driver.ComputeDriver):
# Save a copy of the domain's running XML file
xml = domain.XMLDesc(0)
- # Abort is an idempotent operation, so make sure any block
- # jobs which may have failed are ended.
- try:
- domain.blockJobAbort(disk_path, 0)
- except Exception:
- pass
-
def _wait_for_block_job(domain, disk_path):
status = domain.blockJobInfo(disk_path, 0)
try:
@@ -1378,19 +1386,22 @@ class LibvirtDriver(driver.ComputeDriver):
state = LIBVIRT_POWER_STATE[state]
new_domid = dom.ID()
- if state in [power_state.SHUTDOWN,
- power_state.CRASHED]:
- LOG.info(_("Instance shutdown successfully."),
- instance=instance)
- self._create_domain(domain=dom)
- timer = loopingcall.FixedIntervalLoopingCall(
- self._wait_for_running, instance)
- timer.start(interval=0.5).wait()
- return True
- elif old_domid != new_domid:
- LOG.info(_("Instance may have been rebooted during soft "
- "reboot, so return now."), instance=instance)
- return True
+ # NOTE(ivoks): By checking domain IDs, we make sure we are
+ # not recreating domain that's already running.
+ if old_domid != new_domid:
+ if state in [power_state.SHUTDOWN,
+ power_state.CRASHED]:
+ LOG.info(_("Instance shutdown successfully."),
+ instance=instance)
+ self._create_domain(domain=dom)
+ timer = loopingcall.FixedIntervalLoopingCall(
+ self._wait_for_running, instance)
+ timer.start(interval=0.5).wait()
+ return True
+ else:
+ LOG.info(_("Instance may have been rebooted during soft "
+ "reboot, so return now."), instance=instance)
+ return True
greenthread.sleep(1)
return False
@@ -1423,7 +1434,8 @@ class LibvirtDriver(driver.ComputeDriver):
write_to_disk=True)
# NOTE (rmk): Re-populate any missing backing files.
- disk_info_json = self.get_instance_disk_info(instance['name'], xml)
+ disk_info_json = self.get_instance_disk_info(instance['name'], xml,
+ block_device_info)
self._create_images_and_backing(context, instance, disk_info_json)
# Initialize all the necessary networking, block devices and
@@ -1457,13 +1469,13 @@ class LibvirtDriver(driver.ComputeDriver):
"""Power off the specified instance."""
self._destroy(instance)
- def power_on(self, instance):
+ def power_on(self, context, instance, network_info,
+ block_device_info=None):
"""Power on the specified instance."""
- dom = self._lookup_by_name(instance['name'])
- self._create_domain(domain=dom, instance=instance)
- timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_running,
- instance)
- timer.start(interval=0.5).wait()
+ # We use _hard_reboot here to ensure that all backing files,
+ # network, and block device connections, etc. are established
+ # and available before we attempt to start the instance.
+ self._hard_reboot(context, instance, network_info, block_device_info)
def suspend(self, instance):
"""Suspend the specified instance."""
@@ -1605,7 +1617,8 @@ class LibvirtDriver(driver.ComputeDriver):
return out
def _append_to_file(self, data, fpath):
- LOG.info(_('data: %(data)r, fpath: %(fpath)r') % locals())
+ LOG.info(_('data: %(data)r, fpath: %(fpath)r'),
+ {'data': data, 'fpath': fpath})
fp = open(fpath, 'a+')
fp.write(data)
return fpath
@@ -1918,7 +1931,7 @@ class LibvirtDriver(driver.ComputeDriver):
extra_md['admin_pass'] = admin_pass
inst_md = instance_metadata.InstanceMetadata(instance,
- content=files, extra_md=extra_md)
+ content=files or [], extra_md=extra_md)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
configdrive_path = basepath(fname='disk.config')
LOG.info(_('Creating config drive at %(path)s'),
@@ -1959,10 +1972,16 @@ class LibvirtDriver(driver.ComputeDriver):
injection_path = image('disk').path
img_id = instance['image_ref']
- for inj in ('key', 'net', 'metadata', 'admin_pass', 'files'):
- if locals()[inj]:
+ for inj, val in [('key', key),
+ ('net', net),
+ ('metadata', metadata),
+ ('admin_pass', admin_pass),
+ ('files', files)]:
+ if val:
LOG.info(_('Injecting %(inj)s into image '
- '%(img_id)s'), locals(), instance=instance)
+ '%(img_id)s'),
+ {'inj': inj, 'img_id': img_id},
+ instance=instance)
try:
disk.inject_data(injection_path,
key, net, metadata, admin_pass, files,
@@ -1972,7 +1991,8 @@ class LibvirtDriver(driver.ComputeDriver):
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_('Error injecting data into image '
- '%(img_id)s (%(e)s)') % locals(),
+ '%(img_id)s (%(e)s)'),
+ {'img_id': img_id, 'e': e},
instance=instance)
if CONF.libvirt_type == 'uml':
@@ -2384,12 +2404,14 @@ class LibvirtDriver(driver.ComputeDriver):
def to_xml(self, instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None, write_to_disk=False):
- LOG.debug(_("Start to_xml instance=%(instance)s "
- "network_info=%(network_info)s "
- "disk_info=%(disk_info)s "
- "image_meta=%(image_meta)s rescue=%(rescue)s"
- "block_device_info=%(block_device_info)s") %
- locals())
+ LOG.debug(_('Start to_xml instance=%(instance)s '
+ 'network_info=%(network_info)s '
+ 'disk_info=%(disk_info)s '
+ 'image_meta=%(image_meta)s rescue=%(rescue)s'
+ 'block_device_info=%(block_device_info)s'),
+ {'instance': instance, 'network_info': network_info,
+ 'disk_info': disk_info, 'image_meta': image_meta,
+ 'rescue': rescue, 'block_device_info': block_device_info})
conf = self.get_guest_config(instance, network_info, image_meta,
disk_info, rescue, block_device_info)
xml = conf.to_xml()
@@ -2399,7 +2421,8 @@ class LibvirtDriver(driver.ComputeDriver):
xml_path = os.path.join(instance_dir, 'libvirt.xml')
libvirt_utils.write_to_file(xml_path, xml)
- LOG.debug(_('End to_xml instance=%(instance)s xml=%(xml)s') % locals())
+ LOG.debug(_('End to_xml instance=%(instance)s xml=%(xml)s'),
+ {'instance': instance, 'xml': xml})
return xml
def _lookup_by_id(self, instance_id):
@@ -2437,8 +2460,11 @@ class LibvirtDriver(driver.ComputeDriver):
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_name)
- msg = _("Error from libvirt while looking up %(instance_name)s: "
- "[Error Code %(error_code)s] %(ex)s") % locals()
+ msg = (_('Error from libvirt while looking up %(instance_name)s: '
+ '[Error Code %(error_code)s] %(ex)s') %
+ {'instance_name': instance_name,
+ 'error_code': error_code,
+ 'ex': ex})
raise exception.NovaException(msg)
def get_info(self, instance):
@@ -2481,10 +2507,28 @@ class LibvirtDriver(driver.ComputeDriver):
use_cow=CONF.use_cow_images)
if xml:
- domain = self._conn.defineXML(xml)
+ try:
+ domain = self._conn.defineXML(xml)
+ except Exception as e:
+ LOG.error(_("An error occurred while trying to define a domain"
+ " with xml: %s") % xml)
+ raise e
+
if power_on:
- domain.createWithFlags(launch_flags)
- self._enable_hairpin(domain.XMLDesc(0))
+ try:
+ domain.createWithFlags(launch_flags)
+ except Exception as e:
+ with excutils.save_and_reraise_exception():
+ LOG.error(_("An error occurred while trying to launch a "
+ "defined domain with xml: %s") %
+ domain.XMLDesc(0))
+
+ try:
+ self._enable_hairpin(domain.XMLDesc(0))
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.error(_("An error occurred while enabling hairpin mode on "
+ "domain with xml: %s") % domain.XMLDesc(0))
# NOTE(uni): Now the container is running with its own private mount
# namespace and so there is no need to keep the container rootfs
@@ -2911,13 +2955,14 @@ class LibvirtDriver(driver.ComputeDriver):
return domain.blockStats(disk)
except libvirt.libvirtError as e:
errcode = e.get_error_code()
- LOG.info(_("Getting block stats failed, device might have "
- "been detached. Instance=%(instance_name)s "
- "Disk=%(disk)s Code=%(errcode)s Error=%(e)s")
- % locals())
+ LOG.info(_('Getting block stats failed, device might have '
+ 'been detached. Instance=%(instance_name)s '
+ 'Disk=%(disk)s Code=%(errcode)s Error=%(e)s'),
+ {'instance_name': instance_name, 'disk': disk,
+ 'errcode': errcode, 'e': e})
except exception.InstanceNotFound:
- LOG.info(_("Could not find domain in libvirt for instance %s. "
- "Cannot get block stats for device") % instance_name)
+ LOG.info(_('Could not find domain in libvirt for instance %s. '
+ 'Cannot get block stats for device'), instance_name)
def interface_stats(self, instance_name, interface):
"""
@@ -2986,7 +3031,7 @@ class LibvirtDriver(driver.ComputeDriver):
'disk_available_least': _get_disk_available_least()}
return dic
- def check_instance_shared_storage_local(self, ctxt, instance):
+ def check_instance_shared_storage_local(self, context, instance):
dirpath = libvirt_utils.get_instance_path(instance)
if not os.path.exists(dirpath):
@@ -2999,13 +3044,13 @@ class LibvirtDriver(driver.ComputeDriver):
os.close(fd)
return {"filename": tmp_file}
- def check_instance_shared_storage_remote(self, ctxt, data):
+ def check_instance_shared_storage_remote(self, context, data):
return os.path.exists(data['filename'])
- def check_instance_shared_storage_cleanup(self, ctxt, data):
+ def check_instance_shared_storage_cleanup(self, context, data):
fileutils.delete_if_exists(data["filename"])
- def check_can_live_migrate_destination(self, ctxt, instance_ref,
+ def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
@@ -3014,8 +3059,8 @@ class LibvirtDriver(driver.ComputeDriver):
This runs checks on the destination host, and then calls
back to the source host to check the results.
- :param ctxt: security context
- :param instance_ref: nova.db.sqlalchemy.models.Instance
+ :param context: security context
+ :param instance: nova.db.sqlalchemy.models.Instance
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
:returns: a dict containing:
@@ -3031,7 +3076,6 @@ class LibvirtDriver(driver.ComputeDriver):
(disk_available_gb * 1024) - CONF.reserved_host_disk_mb
# Compare CPU
- src = instance_ref['host']
source_cpu_info = src_compute_info['cpu_info']
self._compare_cpu(source_cpu_info)
@@ -3043,16 +3087,17 @@ class LibvirtDriver(driver.ComputeDriver):
"disk_over_commit": disk_over_commit,
"disk_available_mb": disk_available_mb}
- def check_can_live_migrate_destination_cleanup(self, ctxt,
+ def check_can_live_migrate_destination_cleanup(self, context,
dest_check_data):
"""Do required cleanup on dest host after check_can_live_migrate calls
- :param ctxt: security context
+ :param context: security context
"""
filename = dest_check_data["filename"]
self._cleanup_shared_storage_test_file(filename)
- def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):
+ def check_can_live_migrate_source(self, context, instance,
+ dest_check_data):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
@@ -3077,7 +3122,7 @@ class LibvirtDriver(driver.ComputeDriver):
reason = _("Block migration can not be used "
"with shared storage.")
raise exception.InvalidLocalStorage(reason=reason, path=source)
- self._assert_dest_node_has_enough_disk(ctxt, instance,
+ self._assert_dest_node_has_enough_disk(context, instance,
dest_check_data['disk_available_mb'],
dest_check_data['disk_over_commit'])
@@ -3096,7 +3141,7 @@ class LibvirtDriver(driver.ComputeDriver):
return dest_check_data
- def _assert_dest_node_has_enough_disk(self, context, instance_ref,
+ def _assert_dest_node_has_enough_disk(self, context, instance,
available_mb, disk_over_commit):
"""Checks if destination has enough disk for block migration."""
# Libvirt supports qcow2 disk format,which is usually compressed
@@ -3113,7 +3158,7 @@ class LibvirtDriver(driver.ComputeDriver):
if available_mb:
available = available_mb * (1024 ** 2)
- ret = self.get_instance_disk_info(instance_ref['name'])
+ ret = self.get_instance_disk_info(instance['name'])
disk_infos = jsonutils.loads(ret)
necessary = 0
@@ -3126,12 +3171,14 @@ class LibvirtDriver(driver.ComputeDriver):
# Check that available disk > necessary disk
if (available - necessary) < 0:
- instance_uuid = instance_ref['uuid']
- reason = _("Unable to migrate %(instance_uuid)s: "
- "Disk of instance is too large(available"
- " on destination host:%(available)s "
- "< need:%(necessary)s)")
- raise exception.MigrationPreCheckError(reason=reason % locals())
+ reason = (_('Unable to migrate %(instance_uuid)s: '
+ 'Disk of instance is too large(available'
+ ' on destination host:%(available)s '
+ '< need:%(necessary)s)') %
+ {'instance_uuid': instance['uuid'],
+ 'available': available,
+ 'necessary': necessary})
+ raise exception.MigrationPreCheckError(reason=reason)
def _compare_cpu(self, cpu_info):
"""Checks the host cpu is compatible to a cpu given by xml.
@@ -3171,11 +3218,11 @@ class LibvirtDriver(driver.ComputeDriver):
except libvirt.libvirtError as e:
with excutils.save_and_reraise_exception():
ret = e.message
- LOG.error(m % locals())
+ LOG.error(m, {'ret': ret, 'u': u})
if ret <= 0:
- LOG.error(m % locals())
- raise exception.InvalidCPUInfo(reason=m % locals())
+ LOG.error(m, {'ret': ret, 'u': u})
+ raise exception.InvalidCPUInfo(reason=m % {'ret': ret, 'u': u})
def _create_shared_storage_test_file(self):
"""Makes tmpfile under CONF.instances_path."""
@@ -3203,7 +3250,7 @@ class LibvirtDriver(driver.ComputeDriver):
tmp_file = os.path.join(CONF.instances_path, filename)
os.remove(tmp_file)
- def ensure_filtering_rules_for_instance(self, instance_ref, network_info,
+ def ensure_filtering_rules_for_instance(self, instance, network_info,
time_module=None):
"""Ensure that an instance's filtering rules are enabled.
@@ -3218,21 +3265,21 @@ class LibvirtDriver(driver.ComputeDriver):
if not time_module:
time_module = greenthread
- self.firewall_driver.setup_basic_filtering(instance_ref, network_info)
- self.firewall_driver.prepare_instance_filter(instance_ref,
+ self.firewall_driver.setup_basic_filtering(instance, network_info)
+ self.firewall_driver.prepare_instance_filter(instance,
network_info)
# nwfilters may be defined in a separate thread in the case
# of libvirt non-blocking mode, so we wait for completion
timeout_count = range(CONF.live_migration_retry_count)
while timeout_count:
- if self.firewall_driver.instance_filter_exists(instance_ref,
+ if self.firewall_driver.instance_filter_exists(instance,
network_info):
break
timeout_count.pop()
if len(timeout_count) == 0:
msg = _('The firewall filter for %s does not exist')
- raise exception.NovaException(msg % instance_ref["name"])
+ raise exception.NovaException(msg % instance["name"])
time_module.sleep(1)
def filter_defer_apply_on(self):
@@ -3241,13 +3288,13 @@ class LibvirtDriver(driver.ComputeDriver):
def filter_defer_apply_off(self):
self.firewall_driver.filter_defer_apply_off()
- def live_migration(self, ctxt, instance_ref, dest,
+ def live_migration(self, context, instance, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Spawning live_migration operation for distributing high-load.
- :params ctxt: security context
- :params instance_ref:
+ :params context: security context
+ :params instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params dest: destination host
@@ -3263,17 +3310,17 @@ class LibvirtDriver(driver.ComputeDriver):
"""
- greenthread.spawn(self._live_migration, ctxt, instance_ref, dest,
+ greenthread.spawn(self._live_migration, context, instance, dest,
post_method, recover_method, block_migration,
migrate_data)
- def _live_migration(self, ctxt, instance_ref, dest, post_method,
+ def _live_migration(self, context, instance, dest, post_method,
recover_method, block_migration=False,
migrate_data=None):
"""Do live migration.
- :params ctxt: security context
- :params instance_ref:
+ :params context: security context
+ :params instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params dest: destination host
@@ -3295,7 +3342,7 @@ class LibvirtDriver(driver.ComputeDriver):
flagvals = [getattr(libvirt, x.strip()) for x in flaglist]
logical_sum = reduce(lambda x, y: x | y, flagvals)
- dom = self._lookup_by_name(instance_ref["name"])
+ dom = self._lookup_by_name(instance["name"])
dom.migrateToURI(CONF.live_migration_uri % dest,
logical_sum,
None,
@@ -3303,9 +3350,9 @@ class LibvirtDriver(driver.ComputeDriver):
except Exception as e:
with excutils.save_and_reraise_exception():
- LOG.error(_("Live Migration failure: %(e)s") % locals(),
- instance=instance_ref)
- recover_method(ctxt, instance_ref, dest, block_migration)
+ LOG.error(_("Live Migration failure: %s"), e,
+ instance=instance)
+ recover_method(context, instance, dest, block_migration)
# Waiting for completion of live_migration.
timer = loopingcall.FixedIntervalLoopingCall(f=None)
@@ -3313,10 +3360,10 @@ class LibvirtDriver(driver.ComputeDriver):
def wait_for_live_migration():
"""waiting for live migration completion."""
try:
- self.get_info(instance_ref)['state']
+ self.get_info(instance)['state']
except exception.NotFound:
timer.stop()
- post_method(ctxt, instance_ref, dest, block_migration,
+ post_method(context, instance, dest, block_migration,
migrate_data)
timer.f = wait_for_live_migration
@@ -3412,7 +3459,7 @@ class LibvirtDriver(driver.ComputeDriver):
instance=instance)
greenthread.sleep(1)
- def pre_block_migration(self, ctxt, instance, disk_info_json):
+ def pre_block_migration(self, context, instance, disk_info_json):
"""Preparation for block migration."""
# NOTE (rmk): When preparing for a block migration, the instance dir
# should not exist on the destination hypervisor.
@@ -3420,11 +3467,11 @@ class LibvirtDriver(driver.ComputeDriver):
if os.path.exists(instance_dir):
raise exception.DestinationDiskExists(path=instance_dir)
os.mkdir(instance_dir)
- self._create_images_and_backing(ctxt, instance, disk_info_json)
+ self._create_images_and_backing(context, instance, disk_info_json)
- def _create_images_and_backing(self, ctxt, instance, disk_info_json):
+ def _create_images_and_backing(self, context, instance, disk_info_json):
"""
- :params ctxt: security context
+ :params context: security context
:params instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
@@ -3453,7 +3500,7 @@ class LibvirtDriver(driver.ComputeDriver):
instance_disk,
CONF.libvirt_images_type)
image.cache(fetch_func=libvirt_utils.fetch_image,
- context=ctxt,
+ context=context,
filename=cache_name,
image_id=instance['image_ref'],
user_id=instance['user_id'],
@@ -3462,17 +3509,17 @@ class LibvirtDriver(driver.ComputeDriver):
# if image has kernel and ramdisk, just download
# following normal way.
- self._fetch_instance_kernel_ramdisk(ctxt, instance)
+ self._fetch_instance_kernel_ramdisk(context, instance)
- def post_live_migration_at_destination(self, ctxt,
- instance_ref,
+ def post_live_migration_at_destination(self, context,
+ instance,
network_info,
block_migration,
block_device_info=None):
"""Post operation of live migration at destination host.
- :param ctxt: security context
- :param instance_ref:
+ :param context: security context
+ :param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param network_info: instance network information
@@ -3480,23 +3527,24 @@ class LibvirtDriver(driver.ComputeDriver):
"""
# Define migrated instance, otherwise, suspend/destroy does not work.
dom_list = self._conn.listDefinedDomains()
- if instance_ref["name"] not in dom_list:
+ if instance["name"] not in dom_list:
# In case of block migration, destination does not have
# libvirt.xml
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
- instance_ref)
- self.to_xml(instance_ref, network_info, disk_info,
+ instance)
+ self.to_xml(instance, network_info, disk_info,
block_device_info, write_to_disk=True)
# libvirt.xml should be made by to_xml(), but libvirt
# does not accept to_xml() result, since uuid is not
# included in to_xml() result.
- dom = self._lookup_by_name(instance_ref["name"])
+ dom = self._lookup_by_name(instance["name"])
self._conn.defineXML(dom.XMLDesc(0))
- def get_instance_disk_info(self, instance_name, xml=None):
+ def get_instance_disk_info(self, instance_name, xml=None,
+ block_device_info=None):
"""Preparation block migration.
- :params instance_ref:
+ :params instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:return:
@@ -3517,31 +3565,49 @@ class LibvirtDriver(driver.ComputeDriver):
xml = virt_dom.XMLDesc(0)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
- msg = _("Error from libvirt while getting description of "
- "%(instance_name)s: [Error Code %(error_code)s] "
- "%(ex)s") % locals()
+ msg = (_('Error from libvirt while getting description of '
+ '%(instance_name)s: [Error Code %(error_code)s] '
+ '%(ex)s') %
+ {'instance_name': instance_name,
+ 'error_code': error_code,
+ 'ex': ex})
LOG.warn(msg)
raise exception.InstanceNotFound(instance_id=instance_name)
+ # NOTE (rmk): When block_device_info is provided, we will use it to
+ # filter out devices which are actually volumes.
+ block_device_mapping = driver.block_device_info_get_mapping(
+ block_device_info)
+
+ volume_devices = set()
+ for vol in block_device_mapping:
+ disk_dev = vol['mount_device'].rpartition("/")[2]
+ volume_devices.add(disk_dev)
+
disk_info = []
doc = etree.fromstring(xml)
disk_nodes = doc.findall('.//devices/disk')
path_nodes = doc.findall('.//devices/disk/source')
driver_nodes = doc.findall('.//devices/disk/driver')
+ target_nodes = doc.findall('.//devices/disk/target')
for cnt, path_node in enumerate(path_nodes):
disk_type = disk_nodes[cnt].get('type')
path = path_node.get('file')
+ target = target_nodes[cnt].attrib['dev']
if disk_type != 'file':
- LOG.debug(_('skipping %(path)s since it looks like volume') %
- locals())
+ LOG.debug(_('skipping %s since it looks like volume'), path)
continue
if not path:
- LOG.debug(_('skipping disk for %(instance_name)s as it'
- ' does not have a path') %
- locals())
+ LOG.debug(_('skipping disk for %s as it does not have a path'),
+ instance_name)
+ continue
+
+ if target in volume_devices:
+ LOG.debug(_('skipping disk %(path)s (%(target)s) as it is a '
+ 'volume'), {'path': path, 'target': target})
continue
# get the real disk size or
@@ -3580,8 +3646,8 @@ class LibvirtDriver(driver.ComputeDriver):
info['over_committed_disk_size'])
except OSError as e:
if e.errno == errno.ENOENT:
- LOG.error(_("Getting disk size of %(i_name)s: %(e)s") %
- locals())
+ LOG.error(_('Getting disk size of %(i_name)s: %(e)s'),
+ {'i_name': i_name, 'e': e})
else:
raise
except exception.InstanceNotFound:
@@ -3591,9 +3657,9 @@ class LibvirtDriver(driver.ComputeDriver):
greenthread.sleep(0)
return disk_over_committed_size
- def unfilter_instance(self, instance_ref, network_info):
+ def unfilter_instance(self, instance, network_info):
"""See comments of same method in firewall_driver."""
- self.firewall_driver.unfilter_instance(instance_ref,
+ self.firewall_driver.unfilter_instance(instance,
network_info=network_info)
def get_host_stats(self, refresh=False):
@@ -3652,7 +3718,8 @@ class LibvirtDriver(driver.ComputeDriver):
block_device_info=None):
LOG.debug(_("Starting migrate_disk_and_power_off"),
instance=instance)
- disk_info_text = self.get_instance_disk_info(instance['name'])
+ disk_info_text = self.get_instance_disk_info(instance['name'],
+ block_device_info=block_device_info)
disk_info = jsonutils.loads(disk_info_text)
# copy disks to destination
@@ -3912,9 +3979,7 @@ class LibvirtDriver(driver.ComputeDriver):
def instance_on_disk(self, instance):
# ensure directories exist and are writable
instance_path = libvirt_utils.get_instance_path(instance)
- LOG.debug(_('Checking instance files accessability'
- '%(instance_path)s')
- % locals())
+ LOG.debug(_('Checking instance files accessability %s'), instance_path)
return os.access(instance_path, os.W_OK)
def inject_network_info(self, instance, nw_info):
diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py
index f55e29df9..f9e948a5e 100644
--- a/nova/virt/libvirt/firewall.py
+++ b/nova/virt/libvirt/firewall.py
@@ -245,9 +245,8 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
# This happens when the instance filter is still in
# use (ie. when the instance has not terminated properly)
raise
- LOG.debug(_('The nwfilter(%(instance_filter_name)s) '
- 'is not found.') % locals(),
- instance=instance)
+ LOG.debug(_('The nwfilter(%s) is not found.'),
+ instance_filter_name, instance=instance)
def _define_filters(self, filter_name, filter_children):
self._define_filter(self._filter_container(filter_name,
@@ -269,7 +268,9 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
except libvirt.libvirtError:
name = instance['name']
LOG.debug(_('The nwfilter(%(instance_filter_name)s) for'
- '%(name)s is not found.') % locals(),
+ '%(name)s is not found.'),
+ {'instance_filter_name': instance_filter_name,
+ 'name': name},
instance=instance)
return False
return True
diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py
index b8b9df1a9..9b33a12fe 100644
--- a/nova/virt/libvirt/vif.py
+++ b/nova/virt/libvirt/vif.py
@@ -228,6 +228,36 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
mapping,
image_meta)
+ def get_config_ivs_hybrid(self, instance, network, mapping, image_meta):
+ newnet = copy.deepcopy(network)
+ newnet['bridge'] = self.get_br_name(mapping['vif_uuid'])
+ return self.get_config_bridge(instance,
+ newnet,
+ mapping,
+ image_meta)
+
+ def get_config_ivs_ethernet(self, instance, network, mapping, image_meta):
+ conf = super(LibvirtGenericVIFDriver,
+ self).get_config(instance,
+ network,
+ mapping,
+ image_meta)
+
+ dev = self.get_vif_devname(mapping)
+ designer.set_vif_host_backend_ethernet_config(conf, dev)
+
+ return conf
+
+ def get_config_ivs(self, instance, network, mapping, image_meta):
+ if self.get_firewall_required():
+ return self.get_config_ivs_hybrid(instance, network,
+ mapping,
+ image_meta)
+ else:
+ return self.get_config_ivs_ethernet(instance, network,
+ mapping,
+ image_meta)
+
def get_config_802qbg(self, instance, network, mapping, image_meta):
conf = super(LibvirtGenericVIFDriver,
self).get_config(instance,
@@ -262,16 +292,16 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
def get_config(self, instance, network, mapping, image_meta):
vif_type = mapping.get('vif_type')
- LOG.debug(_("vif_type=%(vif_type)s instance=%(instance)s "
- "network=%(network)s mapping=%(mapping)s")
- % locals())
+ LOG.debug(_('vif_type=%(vif_type)s instance=%(instance)s '
+ 'network=%(network)s mapping=%(mapping)s'),
+ {'vif_type': vif_type, 'instance': instance,
+ 'network': network, 'mapping': mapping})
if vif_type is None:
raise exception.NovaException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
-
- if vif_type == network_model.VIF_TYPE_BRIDGE:
+ elif vif_type == network_model.VIF_TYPE_BRIDGE:
return self.get_config_bridge(instance,
network, mapping,
image_meta)
@@ -287,6 +317,10 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
return self.get_config_802qbh(instance,
network, mapping,
image_meta)
+ elif vif_type == network_model.VIF_TYPE_IVS:
+ return self.get_config_ivs(instance,
+ network, mapping,
+ image_meta)
else:
raise exception.NovaException(
_("Unexpected vif_type=%s") % vif_type)
@@ -371,6 +405,51 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
else:
self.plug_ovs_ethernet(instance, vif)
+ def plug_ivs_ethernet(self, instance, vif):
+ super(LibvirtGenericVIFDriver,
+ self).plug(instance, vif)
+
+ network, mapping = vif
+ iface_id = self.get_ovs_interfaceid(mapping)
+ dev = self.get_vif_devname(mapping)
+ linux_net.create_tap_dev(dev)
+ linux_net.create_ivs_vif_port(dev, iface_id, mapping['mac'],
+ instance['uuid'])
+
+ def plug_ivs_hybrid(self, instance, vif):
+ """Plug using hybrid strategy (same as OVS)
+
+ Create a per-VIF linux bridge, then link that bridge to the OVS
+ integration bridge via a veth device, setting up the other end
+ of the veth device just like a normal IVS port. Then boot the
+ VIF on the linux bridge using standard libvirt mechanisms.
+ """
+ super(LibvirtGenericVIFDriver,
+ self).plug(instance, vif)
+
+ network, mapping = vif
+ iface_id = self.get_ovs_interfaceid(mapping)
+ br_name = self.get_br_name(mapping['vif_uuid'])
+ v1_name, v2_name = self.get_veth_pair_names(mapping['vif_uuid'])
+
+ if not linux_net.device_exists(br_name):
+ utils.execute('brctl', 'addbr', br_name, run_as_root=True)
+ utils.execute('brctl', 'setfd', br_name, 0, run_as_root=True)
+ utils.execute('brctl', 'stp', br_name, 'off', run_as_root=True)
+
+ if not linux_net.device_exists(v2_name):
+ linux_net._create_veth_pair(v1_name, v2_name)
+ utils.execute('ip', 'link', 'set', br_name, 'up', run_as_root=True)
+ utils.execute('brctl', 'addif', br_name, v1_name, run_as_root=True)
+ linux_net.create_ivs_vif_port(v2_name, iface_id, mapping['mac'],
+ instance['uuid'])
+
+ def plug_ivs(self, instance, vif):
+ if self.get_firewall_required():
+ self.plug_ivs_hybrid(instance, vif)
+ else:
+ self.plug_ivs_ethernet(instance, vif)
+
def plug_802qbg(self, instance, vif):
super(LibvirtGenericVIFDriver,
self).plug(instance, vif)
@@ -383,16 +462,16 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
network, mapping = vif
vif_type = mapping.get('vif_type')
- LOG.debug(_("vif_type=%(vif_type)s instance=%(instance)s "
- "network=%(network)s mapping=%(mapping)s")
- % locals())
+ LOG.debug(_('vif_type=%(vif_type)s instance=%(instance)s '
+ 'network=%(network)s mapping=%(mapping)s'),
+ {'vif_type': vif_type, 'instance': instance,
+ 'network': network, 'mapping': mapping})
if vif_type is None:
raise exception.NovaException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
-
- if vif_type == network_model.VIF_TYPE_BRIDGE:
+ elif vif_type == network_model.VIF_TYPE_BRIDGE:
self.plug_bridge(instance, vif)
elif vif_type == network_model.VIF_TYPE_OVS:
self.plug_ovs(instance, vif)
@@ -400,6 +479,8 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
self.plug_802qbg(instance, vif)
elif vif_type == network_model.VIF_TYPE_802_QBH:
self.plug_802qbh(instance, vif)
+ elif vif_type == network_model.VIF_TYPE_IVS:
+ self.plug_ivs(instance, vif)
else:
raise exception.NovaException(
_("Unexpected vif_type=%s") % vif_type)
@@ -458,6 +539,45 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
else:
self.unplug_ovs_ethernet(instance, vif)
+ def unplug_ivs_ethernet(self, instance, vif):
+ """Unplug the VIF by deleting the port from the bridge."""
+ super(LibvirtGenericVIFDriver,
+ self).unplug(instance, vif)
+
+ try:
+ network, mapping = vif
+ linux_net.delete_ivs_vif_port(self.get_vif_devname(mapping))
+ except exception.ProcessExecutionError:
+ LOG.exception(_("Failed while unplugging vif"), instance=instance)
+
+ def unplug_ivs_hybrid(self, instance, vif):
+ """UnPlug using hybrid strategy (same as OVS)
+
+ Unhook port from IVS, unhook port from bridge, delete
+ bridge, and delete both veth devices.
+ """
+ super(LibvirtGenericVIFDriver,
+ self).unplug(instance, vif)
+
+ try:
+ network, mapping = vif
+ br_name = self.get_br_name(mapping['vif_uuid'])
+ v1_name, v2_name = self.get_veth_pair_names(mapping['vif_uuid'])
+
+ utils.execute('brctl', 'delif', br_name, v1_name, run_as_root=True)
+ utils.execute('ip', 'link', 'set', br_name, 'down',
+ run_as_root=True)
+ utils.execute('brctl', 'delbr', br_name, run_as_root=True)
+ linux_net.delete_ivs_vif_port(v2_name)
+ except exception.ProcessExecutionError:
+ LOG.exception(_("Failed while unplugging vif"), instance=instance)
+
+ def unplug_ivs(self, instance, vif):
+ if self.get_firewall_required():
+ self.unplug_ovs_hybrid(instance, vif)
+ else:
+ self.unplug_ovs_ethernet(instance, vif)
+
def unplug_802qbg(self, instance, vif):
super(LibvirtGenericVIFDriver,
self).unplug(instance, vif)
@@ -470,16 +590,16 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
network, mapping = vif
vif_type = mapping.get('vif_type')
- LOG.debug(_("vif_type=%(vif_type)s instance=%(instance)s "
- "network=%(network)s mapping=%(mapping)s")
- % locals())
+ LOG.debug(_('vif_type=%(vif_type)s instance=%(instance)s '
+ 'network=%(network)s mapping=%(mapping)s'),
+ {'vif_type': vif_type, 'instance': instance,
+ 'network': network, 'mapping': mapping})
if vif_type is None:
raise exception.NovaException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
-
- if vif_type == network_model.VIF_TYPE_BRIDGE:
+ elif vif_type == network_model.VIF_TYPE_BRIDGE:
self.unplug_bridge(instance, vif)
elif vif_type == network_model.VIF_TYPE_OVS:
self.unplug_ovs(instance, vif)
@@ -487,6 +607,8 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
self.unplug_802qbg(instance, vif)
elif vif_type == network_model.VIF_TYPE_802_QBH:
self.unplug_802qbh(instance, vif)
+ elif vif_type == network_model.VIF_TYPE_IVS:
+ self.unplug_ivs(instance, vif)
else:
raise exception.NovaException(
_("Unexpected vif_type=%s") % vif_type)
diff --git a/nova/virt/powervm/driver.py b/nova/virt/powervm/driver.py
index c2cee276d..e49bbb209 100755
--- a/nova/virt/powervm/driver.py
+++ b/nova/virt/powervm/driver.py
@@ -191,7 +191,8 @@ class PowerVMDriver(driver.ComputeDriver):
"""Power off the specified instance."""
self._powervm.power_off(instance['name'])
- def power_on(self, instance):
+ def power_on(self, context, instance, network_info,
+ block_device_info=None):
"""Power on the specified instance."""
self._powervm.power_on(instance['name'])
diff --git a/nova/virt/powervm/lpar.py b/nova/virt/powervm/lpar.py
index 7be8b046a..a6c782692 100644
--- a/nova/virt/powervm/lpar.py
+++ b/nova/virt/powervm/lpar.py
@@ -51,7 +51,7 @@ def load_from_conf_data(conf_data):
for (key, value) in attribs.items():
try:
lpar[key] = value
- except exception.PowerVMLPARAttributeNotFound as e:
+ except exception.PowerVMLPARAttributeNotFound:
LOG.info(_('Encountered unknown LPAR attribute: %s\n'
'Continuing without storing') % key)
return lpar
diff --git a/nova/virt/powervm/operator.py b/nova/virt/powervm/operator.py
index 18cba0ba2..fffb77fc9 100644
--- a/nova/virt/powervm/operator.py
+++ b/nova/virt/powervm/operator.py
@@ -766,11 +766,11 @@ class BaseOperator(object):
def _decompress_image_file(self, file_path, outfile_path):
command = "/usr/bin/gunzip -c %s > %s" % (file_path, outfile_path)
- output = self.run_vios_command_as_root(command)
+ self.run_vios_command_as_root(command)
# Remove compressed image file
command = "/usr/bin/rm %s" % file_path
- output = self.run_vios_command_as_root(command)
+ self.run_vios_command_as_root(command)
return outfile_path
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index 3cf9d32b4..8389b2c3d 100755
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -127,6 +127,12 @@ class Failure(Exception):
class VMwareESXDriver(driver.ComputeDriver):
"""The ESX host connection object."""
+ # VMwareAPI has both ESXi and vCenter API sets.
+ # The ESXi API are a proper sub-set of the vCenter API.
+ # That is to say, nearly all valid ESXi calls are
+ # valid vCenter calls. There are some small edge-case
+ # exceptions regarding VNC, CIM, User management & SSO.
+
def __init__(self, virtapi, read_only=False, scheme="https"):
super(VMwareESXDriver, self).__init__(virtapi)
@@ -218,9 +224,10 @@ class VMwareESXDriver(driver.ComputeDriver):
"""Power off the specified instance."""
self._vmops.power_off(instance)
- def power_on(self, instance):
+ def power_on(self, context, instance, network_info,
+ block_device_info=None):
"""Power on the specified instance."""
- self._vmops.power_on(instance)
+ self._vmops._power_on(instance)
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances."""
@@ -337,6 +344,14 @@ class VMwareESXDriver(driver.ComputeDriver):
class VMwareVCDriver(VMwareESXDriver):
"""The ESX host connection object."""
+ # The vCenter driver includes several additional VMware vSphere
+ # capabilities that include API that act on hosts or groups of
+ # hosts in clusters or non-cluster logical-groupings.
+ #
+ # vCenter is not a hypervisor itself, it works with multiple
+ # hypervisor host machines and their guests. This fact can
+ # subtly alter how vSphere and OpenStack interoperate.
+
def __init__(self, virtapi, read_only=False, scheme="https"):
super(VMwareVCDriver, self).__init__(virtapi)
self._cluster_name = CONF.vmwareapi_cluster_name
@@ -379,7 +394,8 @@ class VMwareVCDriver(VMwareESXDriver):
def finish_revert_migration(self, instance, network_info,
block_device_info=None, power_on=True):
"""Finish reverting a resize, powering back on the instance."""
- self._vmops.finish_revert_migration(instance, power_on)
+ self._vmops.finish_revert_migration(instance, network_info,
+ block_device_info, power_on)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
@@ -387,7 +403,7 @@ class VMwareVCDriver(VMwareESXDriver):
"""Completes a resize, turning on the migrated instance."""
self._vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
- power_on)
+ block_device_info, power_on)
def live_migration(self, context, instance_ref, dest,
post_method, recover_method, block_migration=False,
@@ -397,6 +413,14 @@ class VMwareVCDriver(VMwareESXDriver):
post_method, recover_method,
block_migration)
+ def get_vnc_console(self, instance):
+ """Return link to instance's VNC console using vCenter logic."""
+ # In this situation, ESXi and vCenter require different
+ # API logic to create a valid VNC console connection object.
+ # In specific, vCenter does not actually run the VNC service
+ # itself. You must talk to the VNC host underneath vCenter.
+ return self._vmops.get_vnc_console_vcenter(instance)
+
class VMwareAPISession(object):
"""
diff --git a/nova/virt/vmwareapi/fake.py b/nova/virt/vmwareapi/fake.py
index 83c53e5cb..abf896c79 100644
--- a/nova/virt/vmwareapi/fake.py
+++ b/nova/virt/vmwareapi/fake.py
@@ -21,6 +21,7 @@
A fake VMware VI API implementation.
"""
+import collections
import pprint
import uuid
@@ -30,7 +31,7 @@ from nova.virt.vmwareapi import error_util
_CLASSES = ['Datacenter', 'Datastore', 'ResourcePool', 'VirtualMachine',
'Network', 'HostSystem', 'HostNetworkSystem', 'Task', 'session',
- 'files']
+ 'files', 'ClusterComputeResource']
_FAKE_FILE_SIZE = 1024
@@ -61,6 +62,7 @@ def reset():
create_datacenter()
create_datastore()
create_res_pool()
+ create_cluster()
def cleanup():
@@ -82,22 +84,72 @@ def _get_objects(obj_type):
return lst_objs
-class Prop(object):
+class Property(object):
"""Property Object base class."""
- def __init__(self):
- self.name = None
- self.val = None
+ def __init__(self, name=None, val=None):
+ self.name = name
+ self.val = val
+
+
+class ManagedObjectReference(object):
+ """A managed object reference is a remote identifier."""
+
+ def __init__(self, value="object-123", _type="ManagedObject"):
+ super(ManagedObjectReference, self)
+ # Managed Object Reference value attributes
+ # typically have values like vm-123 or
+ # host-232 and not UUID.
+ self.value = value
+ # Managed Object Reference _type
+ # attributes hold the name of the type
+ # of the vCenter object the value
+ # attribute is the identifier for
+ self._type = _type
+
+
+class ObjectContent(object):
+ """ObjectContent array holds dynamic properties."""
+
+ # This class is a *fake* of a class sent back to us by
+ # SOAP. It has its own names. These names are decided
+ # for us by the API we are *faking* here.
+ def __init__(self, obj_ref, prop_list=None, missing_list=None):
+ self.obj = obj_ref
+
+ if not isinstance(prop_list, collections.Iterable):
+ prop_list = []
+
+ if not isinstance(missing_list, collections.Iterable):
+ missing_list = []
+
+ # propSet is the name your Python code will need to
+ # use since this is the name that the API will use
+ self.propSet = prop_list
+
+ # missingSet is the name your python code will
+ # need to use since this is the name that the
+ # API we are talking to will use.
+ self.missingSet = missing_list
class ManagedObject(object):
- """Managed Data Object base class."""
+ """Managed Object base class."""
- def __init__(self, name="ManagedObject", obj_ref=None):
+ def __init__(self, name="ManagedObject", obj_ref=None, value=None):
"""Sets the obj property which acts as a reference to the object."""
super(ManagedObject, self).__setattr__('objName', name)
+
+ # A managed object is a local representation of a
+ # remote object that you can reference using the
+ # object reference.
if obj_ref is None:
- obj_ref = str(uuid.uuid4())
+ if value is None:
+ value = 'obj-123'
+ obj_ref = ManagedObjectReference(value, name)
+
+ # we use __setattr__ here because below the
+ # default setter has been altered for this class.
object.__setattr__(self, 'obj', obj_ref)
object.__setattr__(self, 'propSet', [])
@@ -117,16 +169,20 @@ class ManagedObject(object):
return self.__getattr__(attr)
def __setattr__(self, attr, val):
+ # TODO(hartsocks): this is adds unnecessary complexity to the class
for prop in self.propSet:
if prop.name == attr:
prop.val = val
return
- elem = Prop()
+ elem = Property()
elem.name = attr
elem.val = val
self.propSet.append(elem)
def __getattr__(self, attr):
+ # TODO(hartsocks): remove this
+ # in a real ManagedObject you have to iterate the propSet
+ # in a real ManagedObject, the propSet is a *set* not a list
for elem in self.propSet:
if elem.name == attr:
return elem.val
@@ -141,6 +197,10 @@ class DataObject(object):
self.obj_name = obj_name
+class HostInternetScsiHba():
+ pass
+
+
class VirtualDisk(DataObject):
"""
Virtual Disk class.
@@ -186,7 +246,7 @@ class VirtualMachine(ManagedObject):
"""Virtual Machine class."""
def __init__(self, **kwargs):
- super(VirtualMachine, self).__init__("VirtualMachine")
+ super(VirtualMachine, self).__init__("VirtualMachine", value='vm-10')
self.set("name", kwargs.get("name"))
self.set("runtime.connectionState",
kwargs.get("conn_state", "connected"))
@@ -204,6 +264,8 @@ class VirtualMachine(ManagedObject):
self.set("summary.config.memorySizeMB", kwargs.get("mem", 1))
self.set("config.hardware.device", kwargs.get("virtual_device", None))
self.set("config.extraConfig", kwargs.get("extra_config", None))
+ self.set('runtime.host',
+ ManagedObjectReference(value='host-123', _type="HostSystem"))
self.device = kwargs.get("virtual_device")
def reconfig(self, factory, val):
@@ -250,6 +312,25 @@ class ResourcePool(ManagedObject):
self.set("name", "ResPool")
+class ClusterComputeResource(ManagedObject):
+ """Cluster class."""
+ def __init__(self, **kwargs):
+ super(ClusterComputeResource, self).__init__("ClusterComputeResource",
+ value="domain-test")
+ r_pool = DataObject()
+ r_pool.ManagedObjectReference = [_get_objects("ResourcePool")[0].obj]
+ self.set("resourcePool", r_pool)
+
+ host_sys = DataObject()
+ host_sys.ManagedObjectReference = [_get_objects("HostSystem")[0].obj]
+ self.set("host", host_sys)
+ self.set("name", "test_cluster")
+
+ datastore = DataObject()
+ datastore.ManagedObjectReference = [_get_objects("Datastore")[0].obj]
+ self.set("datastore", datastore)
+
+
class Datastore(ManagedObject):
"""Datastore class."""
@@ -280,8 +361,8 @@ class HostNetworkSystem(ManagedObject):
class HostSystem(ManagedObject):
"""Host System class."""
- def __init__(self):
- super(HostSystem, self).__init__("HostSystem")
+ def __init__(self, obj_ref=None, value='host-123'):
+ super(HostSystem, self).__init__("HostSystem", obj_ref, value)
self.set("name", "ha-host")
if _db_content.get("HostNetworkSystem", None) is None:
create_host_network_system()
@@ -347,6 +428,17 @@ class HostSystem(ManagedObject):
host_pg.HostPortGroup = [host_pg_do]
self.set("config.network.portgroup", host_pg)
+ config = DataObject()
+ storageDevice = DataObject()
+
+ hostBusAdapter = HostInternetScsiHba()
+ hostBusAdapter.HostHostBusAdapter = [hostBusAdapter]
+ hostBusAdapter.iScsiName = "iscsi-name"
+ storageDevice.hostBusAdapter = hostBusAdapter
+ config.storageDevice = storageDevice
+ self.set("config.storageDevice.hostBusAdapter",
+ config.storageDevice.hostBusAdapter)
+
def _add_port_group(self, spec):
"""Adds a port group to the host system object in the db."""
pg_name = spec.name
@@ -429,6 +521,11 @@ def create_network():
_create_object('Network', network)
+def create_cluster():
+ cluster = ClusterComputeResource()
+ _create_object('ClusterComputeResource', cluster)
+
+
def create_task(task_name, state="running"):
task = Task(task_name, state)
_create_object("Task", task)
@@ -682,6 +779,8 @@ class FakeVim(object):
spec_set = kwargs.get("specSet")[0]
type = spec_set.propSet[0].type
properties = spec_set.propSet[0].pathSet
+ if not isinstance(properties, list):
+ properties = properties.split()
objs = spec_set.objectSet
lst_ret_objs = []
for obj in objs:
diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py
index fecac5bcc..d8e063cad 100644
--- a/nova/virt/vmwareapi/vm_util.py
+++ b/nova/virt/vmwareapi/vm_util.py
@@ -20,6 +20,7 @@ The VMware API VM utility module to build SOAP object specs.
"""
import copy
+
from nova import exception
from nova.virt.vmwareapi import vim_util
@@ -50,6 +51,10 @@ def get_vm_create_spec(client_factory, instance, data_store_name,
config_spec.name = instance['uuid']
config_spec.guestId = os_type
+ # Allow nested ESX instances to host 64 bit VMs.
+ if os_type == "vmkernel5Guest":
+ config_spec.nestedHVEnabled = "True"
+
vm_file_info = client_factory.create('ns0:VirtualMachineFileInfo')
vm_file_info.vmPathName = "[" + data_store_name + "]"
config_spec.files = vm_file_info
@@ -522,6 +527,111 @@ def get_vm_ref(session, instance):
return vm_ref
+def get_host_ref_from_id(session, host_id, property_list=None):
+ """Get a host reference object for a host_id string."""
+
+ if property_list is None:
+ property_list = ['name']
+
+ host_refs = session._call_method(
+ vim_util, "get_objects",
+ "HostSystem", property_list)
+
+ for ref in host_refs:
+ if ref.obj.value == host_id:
+ return ref
+
+
+def get_host_id_from_vm_ref(session, vm_ref):
+ """
+ This method allows you to find the managed object
+ ID of the host running a VM. Since vMotion can
+ change the value, you should not presume that this
+ is a value that you can cache for very long and
+ should be prepared to allow for it to change.
+
+ :param session: a vSphere API connection
+ :param vm_ref: a reference object to the running VM
+ :return: the host_id running the virtual machine
+ """
+
+ # to prevent typographical errors below
+ property_name = 'runtime.host'
+
+ # a property collector in VMware vSphere Management API
+ # is a set of local representations of remote values.
+ # property_set here, is a local representation of the
+ # properties we are querying for.
+ property_set = session._call_method(
+ vim_util, "get_object_properties",
+ None, vm_ref, vm_ref._type, [property_name])
+
+ prop = property_from_property_set(
+ property_name, property_set)
+
+ if prop is not None:
+ prop = prop.val.value
+ else:
+ # reaching here represents an impossible state
+ raise RuntimeError(
+ "Virtual Machine %s exists without a runtime.host!"
+ % (vm_ref))
+
+ return prop
+
+
+def property_from_property_set(property_name, property_set):
+ '''
+ Use this method to filter property collector results.
+
+ Because network traffic is expensive, multiple
+ VMwareAPI calls will sometimes pile-up properties
+ to be collected. That means results may contain
+ many different values for multiple purposes.
+
+ This helper will filter a list for a single result
+ and filter the properties of that result to find
+ the single value of whatever type resides in that
+ result. This could be a ManagedObjectReference ID
+ or a complex value.
+
+ :param property_name: name of property you want
+ :param property_set: all results from query
+ :return: the value of the property.
+ '''
+
+ for prop in property_set:
+ p = _property_from_propSet(prop.propSet, property_name)
+ if p is not None:
+ return p
+
+
+def _property_from_propSet(propSet, name='name'):
+ for p in propSet:
+ if p.name == name:
+ return p
+
+
+def get_host_ref_for_vm(session, instance, props):
+ """Get the ESXi host running a VM by its name."""
+
+ vm_ref = get_vm_ref(session, instance)
+ host_id = get_host_id_from_vm_ref(session, vm_ref)
+ return get_host_ref_from_id(session, host_id, props)
+
+
+def get_host_name_for_vm(session, instance):
+ """Get the ESXi host running a VM by its name."""
+ host_ref = get_host_ref_for_vm(session, instance, ['name'])
+ return get_host_name_from_host_ref(host_ref)
+
+
+def get_host_name_from_host_ref(host_ref):
+ p = _property_from_propSet(host_ref.propSet)
+ if p is not None:
+ return p.val
+
+
def get_cluster_ref_from_name(session, cluster_name):
"""Get reference to the cluster with the name specified."""
cls = session._call_method(vim_util, "get_objects",
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index c58aac219..e8f63f1d7 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -807,7 +807,7 @@ class VMwareVMOps(object):
instance['name'] = instance['name'] + self._rescue_suffix
self.destroy(instance, None)
instance['name'] = instance_orig_name
- self.power_on(instance)
+ self._power_on(instance)
def power_off(self, instance):
"""Power off the specified instance."""
@@ -832,7 +832,7 @@ class VMwareVMOps(object):
LOG.debug(_("VM was already in powered off state. So returning "
"without doing anything"), instance=instance)
- def power_on(self, instance):
+ def _power_on(self, instance):
"""Power on the specified instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
@@ -851,6 +851,9 @@ class VMwareVMOps(object):
self._session._wait_for_task(instance['uuid'], poweron_task)
LOG.debug(_("Powered on the VM"), instance=instance)
+ def power_on(self, context, instance, network_info, block_device_info):
+ self._power_on(instance)
+
def _get_orig_vm_name_label(self, instance):
return instance['name'] + '-orig'
@@ -954,7 +957,8 @@ class VMwareVMOps(object):
if network_info:
self.unplug_vifs(instance, network_info)
- def finish_revert_migration(self, instance, power_on=True):
+ def finish_revert_migration(self, instance, network_info,
+ block_device_info, power_on=True):
"""Finish reverting a resize."""
# The original vm was suffixed with '-orig'; find it using
# the old suffix, remove the suffix, then power it back on.
@@ -972,15 +976,15 @@ class VMwareVMOps(object):
LOG.debug(_("Renamed the VM from %s") % name_label,
instance=instance)
if power_on:
- self.power_on(instance)
+ self._power_on(instance)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
- power_on=True):
+ block_device_info=None, power_on=True):
"""Completes a resize, turning on the migrated instance."""
# 4. Start VM
if power_on:
- self.power_on(instance)
+ self._power_on(instance)
self._update_instance_progress(context, instance,
step=4,
total_steps=RESIZE_TOTAL_STEPS)
@@ -1085,6 +1089,26 @@ class VMwareVMOps(object):
'port': self._get_vnc_port(vm_ref),
'internal_access_path': None}
+ def get_vnc_console_vcenter(self, instance):
+ """Return connection info for a vnc console using vCenter logic."""
+
+ # vCenter does not run virtual machines and does not run
+ # a VNC proxy. Instead, you need to tell OpenStack to talk
+ # directly to the ESX host running the VM you are attempting
+ # to connect to via VNC.
+
+ vnc_console = self.get_vnc_console(instance)
+ host_name = vm_util.get_host_name_for_vm(
+ self._session,
+ instance)
+ vnc_console['host'] = host_name
+
+ # NOTE: VM can move hosts in some situations. Debug for admins.
+ LOG.debug(_("VM %(uuid)s is currently on host %(host_name)s"),
+ {'uuid': instance['name'], 'host_name': host_name})
+
+ return vnc_console
+
@staticmethod
def _get_vnc_port(vm_ref):
"""Return VNC port for an VM."""
diff --git a/nova/virt/xenapi/agent.py b/nova/virt/xenapi/agent.py
index c9e011856..0bd6d776e 100644
--- a/nova/virt/xenapi/agent.py
+++ b/nova/virt/xenapi/agent.py
@@ -27,6 +27,7 @@ from nova.api.metadata import password
from nova.compute import api as compute_api
from nova import context
from nova import crypto
+from nova import exception
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
@@ -77,7 +78,7 @@ CONF.register_opts(xenapi_agent_opts)
def _call_agent(session, instance, vm_ref, method, addl_args=None,
- timeout=None):
+ timeout=None, success_code='0'):
"""Abstracts out the interaction with the agent xenapi plugin."""
if addl_args is None:
addl_args = {}
@@ -101,43 +102,39 @@ def _call_agent(session, instance, vm_ref, method, addl_args=None,
LOG.error(_('TIMEOUT: The call to %(method)s timed out. '
'args=%(args)r'),
{'method': method, 'args': args}, instance=instance)
- return {'returncode': 'timeout', 'message': err_msg}
+ raise exception.AgentTimeout(method=method)
elif 'NOT IMPLEMENTED:' in err_msg:
- LOG.error(_('NOT IMPLEMENTED: The call to %(method)s is not'
- ' supported by the agent. args=%(args)r'),
+ LOG.error(_('NOT IMPLEMENTED: The call to %(method)s is not '
+ 'supported by the agent. args=%(args)r'),
{'method': method, 'args': args}, instance=instance)
- return {'returncode': 'notimplemented', 'message': err_msg}
+ raise exception.AgentNotImplemented(method=method)
else:
LOG.error(_('The call to %(method)s returned an error: %(e)s. '
'args=%(args)r'),
{'method': method, 'args': args, 'e': e},
instance=instance)
- return {'returncode': 'error', 'message': err_msg}
- return None
+ raise exception.AgentError(method=method)
- if isinstance(ret, dict):
- return ret
- try:
- return jsonutils.loads(ret)
- except TypeError:
- LOG.error(_('The agent call to %(method)s returned an invalid '
- 'response: %(ret)r. args=%(args)r'),
+ if not isinstance(ret, dict):
+ try:
+ ret = jsonutils.loads(ret)
+ except TypeError:
+ LOG.error(_('The agent call to %(method)s returned an invalid '
+ 'response: %(ret)r. args=%(args)r'),
+ {'method': method, 'ret': ret, 'args': args},
+ instance=instance)
+ raise exception.AgentError(method=method)
+
+ if ret['returncode'] != success_code:
+ LOG.error(_('The agent call to %(method)s returned an '
+ 'an error: %(ret)r. args=%(args)r'),
{'method': method, 'ret': ret, 'args': args},
instance=instance)
- return {'returncode': 'error',
- 'message': 'unable to deserialize response'}
-
-
-def _get_agent_version(session, instance, vm_ref):
- resp = _call_agent(session, instance, vm_ref, 'version')
- if resp['returncode'] != '0':
- LOG.error(_('Failed to query agent version: %r'),
- resp, instance=instance)
- return None
+ raise exception.AgentError(method=method)
# Some old versions of the Windows agent have a trailing \\r\\n
# (ie CRLF escaped) for some reason. Strip that off.
- return resp['message'].replace('\\r\\n', '')
+ return ret['message'].replace('\\r\\n', '')
class XenAPIBasedAgent(object):
@@ -147,6 +144,11 @@ class XenAPIBasedAgent(object):
self.instance = instance
self.vm_ref = vm_ref
+ def _call_agent(self, method, addl_args=None, timeout=None,
+ success_code='0'):
+ return _call_agent(self.session, self.instance, self.vm_ref,
+ method, addl_args, timeout, success_code)
+
def get_agent_version(self):
"""Get the version of the agent running on the VM instance."""
@@ -159,31 +161,47 @@ class XenAPIBasedAgent(object):
# normal as well as watch for domid changes
expiration = time.time() + CONF.agent_version_timeout
- while time.time() < expiration:
- ret = _get_agent_version(self.session, self.instance, self.vm_ref)
- if ret:
- return ret
-
- LOG.info(_('Reached maximum time attempting to query agent version'),
- instance=self.instance)
-
- return None
+ while True:
+ try:
+ return self._call_agent('version')
+ except exception.AgentTimeout:
+ if time.time() > expiration:
+ raise
def agent_update(self, agent_build):
"""Update agent on the VM instance."""
- LOG.info(_('Updating agent to %s'), agent_build['version'],
- instance=self.instance)
+ LOG.debug(_('Updating agent to %s'), agent_build['version'],
+ instance=self.instance)
# Send the encrypted password
args = {'url': agent_build['url'], 'md5sum': agent_build['md5hash']}
- resp = _call_agent(
- self.session, self.instance, self.vm_ref, 'agentupdate', args)
- if resp['returncode'] != '0':
- LOG.error(_('Failed to update agent: %r'), resp,
- instance=self.instance)
- return None
- return resp['message']
+ try:
+ self._call_agent('agentupdate', args)
+ except exception.AgentError as exc:
+ # Silently fail for agent upgrades
+ LOG.warning(_("Unable to update the agent due "
+ "to: %(exc)s") % dict(exc=exc),
+ instance=self.instance)
+
+ def _exchange_key_with_agent(self):
+ dh = SimpleDH()
+ args = {'pub': str(dh.get_public())}
+ resp = self._call_agent('key_init', args, success_code='D0')
+ agent_pub = int(resp)
+ dh.compute_shared(agent_pub)
+ return dh
+
+ def _save_instance_password_if_sshkey_present(self, new_pass):
+ sshkey = self.instance.get('key_data')
+ if sshkey:
+ ctxt = context.get_admin_context()
+ enc = crypto.ssh_encrypt_text(sshkey, new_pass)
+ sys_meta = utils.instance_sys_meta(self.instance)
+ sys_meta.update(password.convert_password(ctxt,
+ base64.b64encode(enc)))
+ self.virtapi.instance_update(ctxt, self.instance['uuid'],
+ {'system_metadata': sys_meta})
def set_admin_password(self, new_pass):
"""Set the root/admin password on the VM instance.
@@ -196,59 +214,24 @@ class XenAPIBasedAgent(object):
"""
LOG.debug(_('Setting admin password'), instance=self.instance)
- dh = SimpleDH()
-
- # Exchange keys
- args = {'pub': str(dh.get_public())}
- resp = _call_agent(
- self.session, self.instance, self.vm_ref, 'key_init', args)
-
- # Successful return code from key_init is 'D0'
- if resp['returncode'] != 'D0':
- msg = _('Failed to exchange keys: %r') % resp
- LOG.error(msg, instance=self.instance)
- raise NotImplementedError(msg)
-
- # Some old versions of the Windows agent have a trailing \\r\\n
- # (ie CRLF escaped) for some reason. Strip that off.
- agent_pub = int(resp['message'].replace('\\r\\n', ''))
- dh.compute_shared(agent_pub)
-
+ dh = self._exchange_key_with_agent()
# Some old versions of Linux and Windows agent expect trailing \n
# on password to work correctly.
enc_pass = dh.encrypt(new_pass + '\n')
- # Send the encrypted password
args = {'enc_pass': enc_pass}
- resp = _call_agent(
- self.session, self.instance, self.vm_ref, 'password', args)
-
- # Successful return code from password is '0'
- if resp['returncode'] != '0':
- msg = _('Failed to exchange keys: %r') % resp
- LOG.error(msg, instance=self.instance)
- raise NotImplementedError(msg)
-
- sshkey = self.instance.get('key_data')
- if sshkey:
- ctxt = context.get_admin_context()
- enc = crypto.ssh_encrypt_text(sshkey, new_pass)
- sys_meta = utils.metadata_to_dict(self.instance['system_metadata'])
- sys_meta.update(password.convert_password(ctxt,
- base64.b64encode(enc)))
- self.virtapi.instance_update(ctxt, self.instance['uuid'],
- {'system_metadata': sys_meta})
-
- return resp['message']
+ self._call_agent('password', args)
+ self._save_instance_password_if_sshkey_present(new_pass)
def inject_ssh_key(self):
sshkey = self.instance.get('key_data')
if not sshkey:
return
if self.instance['os_type'] == 'windows':
- LOG.warning(_("Skipping setting of ssh key for Windows."),
- instance=self.instance)
+ LOG.debug(_("Skipping setting of ssh key for Windows."),
+ instance=self.instance)
return
+
sshkey = str(sshkey)
keyfile = '/root/.ssh/authorized_keys'
key_data = ''.join([
@@ -268,30 +251,13 @@ class XenAPIBasedAgent(object):
b64_contents = base64.b64encode(contents)
args = {'b64_path': b64_path, 'b64_contents': b64_contents}
-
- # If the agent doesn't support file injection, a NotImplementedError
- # will be raised with the appropriate message.
- resp = _call_agent(
- self.session, self.instance, self.vm_ref, 'inject_file', args)
- if resp['returncode'] != '0':
- LOG.error(_('Failed to inject file: %r'), resp,
- instance=self.instance)
- return None
-
- return resp['message']
+ return self._call_agent('inject_file', args)
def resetnetwork(self):
LOG.debug(_('Resetting network'), instance=self.instance)
- resp = _call_agent(
- self.session, self.instance, self.vm_ref, 'resetnetwork',
- timeout=CONF.agent_resetnetwork_timeout)
- if resp['returncode'] != '0':
- LOG.error(_('Failed to reset network: %r'), resp,
- instance=self.instance)
- return None
-
- return resp['message']
+ return self._call_agent('resetnetwork',
+ timeout=CONF.agent_resetnetwork_timeout)
def find_guest_agent(base_dir):
diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py
index 12a087929..37aed9812 100755
--- a/nova/virt/xenapi/driver.py
+++ b/nova/virt/xenapi/driver.py
@@ -266,7 +266,8 @@ class XenAPIDriver(driver.ComputeDriver):
"""Power off the specified instance."""
self._vmops.power_off(instance)
- def power_on(self, instance):
+ def power_on(self, context, instance, network_info,
+ block_device_info=None):
"""Power on the specified instance."""
self._vmops.power_on(instance)
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index 2dd9765d1..f4eac3887 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -565,7 +565,7 @@ class SessionBase(object):
return 12 * 1024 * 1024 * 1024
def _plugin_agent_version(self, method, args):
- return as_json(returncode='0', message='1.0')
+ return as_json(returncode='0', message='1.0\\r\\n')
def _plugin_agent_key_init(self, method, args):
return as_json(returncode='D0', message='1')
@@ -579,6 +579,13 @@ class SessionBase(object):
def _plugin_agent_resetnetwork(self, method, args):
return as_json(returncode='0', message='success')
+ def _plugin_agent_agentupdate(self, method, args):
+ url = args["url"]
+ md5 = args["md5sum"]
+ message = "success with %(url)s and hash:%(md5)s" % dict(url=url,
+ md5=md5)
+ return as_json(returncode='0', message=message)
+
def _plugin_noop(self, method, args):
return ''
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index ff6f7f266..6e9f09184 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -1017,7 +1017,7 @@ def _create_image(context, session, instance, name_label, image_id,
elif cache_images == 'all':
cache = True
elif cache_images == 'some':
- sys_meta = utils.metadata_to_dict(instance['system_metadata'])
+ sys_meta = utils.instance_sys_meta(instance)
try:
cache = strutils.bool_from_string(sys_meta['image_cache_in_nova'])
except KeyError:
@@ -1112,7 +1112,7 @@ def _image_uses_bittorrent(context, instance):
if xenapi_torrent_images == 'all':
bittorrent = True
elif xenapi_torrent_images == 'some':
- sys_meta = utils.metadata_to_dict(instance['system_metadata'])
+ sys_meta = utils.instance_sys_meta(instance)
try:
bittorrent = strutils.bool_from_string(
sys_meta['image_bittorrent'])
@@ -1142,7 +1142,7 @@ def _fetch_vhd_image(context, session, instance, image_id):
if _image_uses_bittorrent(context, instance):
plugin_name = 'bittorrent'
callback = None
- _add_bittorrent_params(params)
+ _add_bittorrent_params(image_id, params)
else:
plugin_name = 'glance'
callback = _generate_glance_callback(context)
@@ -1180,20 +1180,18 @@ def _generate_glance_callback(context):
return pick_glance
-def _add_bittorrent_params(params):
- params['torrent_base_url'] = CONF.xenapi_torrent_base_url
- params['torrent_seed_duration'] = CONF.xenapi_torrent_seed_duration
- params['torrent_seed_chance'] = CONF.xenapi_torrent_seed_chance
- params['torrent_max_last_accessed'] =\
- CONF.xenapi_torrent_max_last_accessed
- params['torrent_listen_port_start'] =\
- CONF.xenapi_torrent_listen_port_start
- params['torrent_listen_port_end'] =\
- CONF.xenapi_torrent_listen_port_end
- params['torrent_download_stall_cutoff'] =\
- CONF.xenapi_torrent_download_stall_cutoff
- params['torrent_max_seeder_processes_per_host'] =\
- CONF.xenapi_torrent_max_seeder_processes_per_host
+def _add_bittorrent_params(image_id, params):
+ params['torrent_url'] = urlparse.urljoin(CONF.xenapi_torrent_base_url,
+ "%s.torrent" % image_id)
+ params['torrent_seed_duration'] = CONF.xenapi_torrent_seed_duration
+ params['torrent_seed_chance'] = CONF.xenapi_torrent_seed_chance
+ params['torrent_max_last_accessed'] = CONF.xenapi_torrent_max_last_accessed
+ params['torrent_listen_port_start'] = CONF.xenapi_torrent_listen_port_start
+ params['torrent_listen_port_end'] = CONF.xenapi_torrent_listen_port_end
+ params['torrent_download_stall_cutoff'] = \
+ CONF.xenapi_torrent_download_stall_cutoff
+ params['torrent_max_seeder_processes_per_host'] = \
+ CONF.xenapi_torrent_max_seeder_processes_per_host
def _get_vdi_chain_size(session, vdi_uuid):
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
index 7a0b4a67e..853bc3262 100644
--- a/nova/virt/xenapi/volume_utils.py
+++ b/nova/virt/xenapi/volume_utils.py
@@ -233,8 +233,16 @@ def parse_volume_info(connection_data):
target_host = _get_target_host(target_portal)
target_port = _get_target_port(target_portal)
target_iqn = connection_data['target_iqn']
- LOG.debug('(vol_id,number,host,port,iqn): (%s,%s,%s,%s)',
- (volume_id, target_host, target_port, target_iqn))
+
+ log_params = {
+ "vol_id": volume_id,
+ "host": target_host,
+ "port": target_port,
+ "iqn": target_iqn
+ }
+ LOG.debug(_('(vol_id,host,port,iqn): '
+ '(%(vol_id)s,%(host)s,%(port)s,%(iqn)s)'), log_params)
+
if (volume_id is None or
target_host is None or
target_iqn is None):