summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--etc/nova/nova.conf.sample6
-rw-r--r--nova/api/openstack/compute/contrib/coverage_ext.py4
-rw-r--r--nova/cmd/manage.py60
-rw-r--r--nova/compute/api.py28
-rw-r--r--nova/db/sqlalchemy/api.py52
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/177_add_floating_ip_uc.py40
-rw-r--r--nova/scheduler/host_manager.py66
-rw-r--r--nova/scheduler/manager.py4
-rw-r--r--nova/scheduler/rpcapi.py3
-rw-r--r--nova/tests/network/test_manager.py9
-rw-r--r--nova/tests/scheduler/test_host_manager.py97
-rw-r--r--nova/tests/scheduler/test_rpcapi.py4
-rw-r--r--nova/tests/test_db_api.py440
-rw-r--r--nova/tests/test_migrations.py30
-rw-r--r--nova/tests/test_powervm.py22
-rw-r--r--nova/tests/test_vmwareapi.py46
-rw-r--r--nova/virt/powervm/operator.py3
-rw-r--r--nova/virt/vmwareapi/vim.py49
18 files changed, 835 insertions, 128 deletions
diff --git a/etc/nova/nova.conf.sample b/etc/nova/nova.conf.sample
index 4d8fabd00..762576e84 100644
--- a/etc/nova/nova.conf.sample
+++ b/etc/nova/nova.conf.sample
@@ -2256,10 +2256,8 @@
# Options defined in nova.virt.vmwareapi.vim
#
-# VIM Service WSDL Location e.g
-# http://<server>/vimService.wsdl. Due to a bug in vSphere ESX
-# 4.1 default wsdl. Refer readme-vmware to setup (string
-# value)
+# Optional VIM Service WSDL Location e.g
+# http://<server>/vimService.wsdl
#vmwareapi_wsdl_loc=<None>
diff --git a/nova/api/openstack/compute/contrib/coverage_ext.py b/nova/api/openstack/compute/contrib/coverage_ext.py
index 50003bdab..154699470 100644
--- a/nova/api/openstack/compute/contrib/coverage_ext.py
+++ b/nova/api/openstack/compute/contrib/coverage_ext.py
@@ -42,7 +42,7 @@ CONF = cfg.CONF
class CoverageController(object):
"""The Coverage report API controller for the OpenStack API."""
def __init__(self):
- self.data_path = tempfile.mkdtemp(prefix='nova-coverage_')
+ self.data_path = None
self.services = []
self.combine = False
self._cover_inst = None
@@ -54,6 +54,8 @@ class CoverageController(object):
if not self._cover_inst:
try:
import coverage
+ if self.data_path is None:
+ self.data_path = tempfile.mkdtemp(prefix='nova-coverage_')
data_out = os.path.join(self.data_path, '.nova-coverage.api')
self._cover_inst = coverage.coverage(data_file=data_out)
except ImportError:
diff --git a/nova/cmd/manage.py b/nova/cmd/manage.py
index ee3c123a9..41b97f26d 100644
--- a/nova/cmd/manage.py
+++ b/nova/cmd/manage.py
@@ -78,7 +78,6 @@ from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova.openstack.common import timeutils
from nova import quota
-from nova.scheduler import rpcapi as scheduler_rpcapi
from nova import servicegroup
from nova import version
@@ -700,6 +699,60 @@ class ServiceCommands(object):
return(2)
print _("Service %(service)s on host %(host)s disabled.") % locals()
+ def _show_host_resources(self, context, host):
+ """Shows the physical/usage resource given by hosts.
+
+ :param context: security context
+ :param host: hostname
+ :returns:
+ example format is below::
+
+ {'resource':D, 'usage':{proj_id1:D, proj_id2:D}}
+ D: {'vcpus': 3, 'memory_mb': 2048, 'local_gb': 2048,
+ 'vcpus_used': 12, 'memory_mb_used': 10240,
+ 'local_gb_used': 64}
+
+ """
+ # Getting compute node info and related instances info
+ service_ref = db.service_get_by_compute_host(context, host)
+ instance_refs = db.instance_get_all_by_host(context,
+ service_ref['host'])
+
+ # Getting total available/used resource
+ compute_ref = service_ref['compute_node'][0]
+ resource = {'vcpus': compute_ref['vcpus'],
+ 'memory_mb': compute_ref['memory_mb'],
+ 'local_gb': compute_ref['local_gb'],
+ 'vcpus_used': compute_ref['vcpus_used'],
+ 'memory_mb_used': compute_ref['memory_mb_used'],
+ 'local_gb_used': compute_ref['local_gb_used']}
+ usage = dict()
+ if not instance_refs:
+ return {'resource': resource, 'usage': usage}
+
+ # Getting usage resource per project
+ project_ids = [i['project_id'] for i in instance_refs]
+ project_ids = list(set(project_ids))
+ for project_id in project_ids:
+ vcpus = [i['vcpus'] for i in instance_refs
+ if i['project_id'] == project_id]
+
+ mem = [i['memory_mb'] for i in instance_refs
+ if i['project_id'] == project_id]
+
+ root = [i['root_gb'] for i in instance_refs
+ if i['project_id'] == project_id]
+
+ ephemeral = [i['ephemeral_gb'] for i in instance_refs
+ if i['project_id'] == project_id]
+
+ usage[project_id] = {'vcpus': sum(vcpus),
+ 'memory_mb': sum(mem),
+ 'root_gb': sum(root),
+ 'ephemeral_gb': sum(ephemeral)}
+
+ return {'resource': resource, 'usage': usage}
+
@args('--host', metavar='<host>', help='Host')
def describe_resource(self, host):
"""Describes cpu/memory/hdd info for host.
@@ -707,9 +760,8 @@ class ServiceCommands(object):
:param host: hostname.
"""
- rpcapi = scheduler_rpcapi.SchedulerAPI()
- result = rpcapi.show_host_resources(context.get_admin_context(),
- host=host)
+ result = self._show_host_resources(context.get_admin_context(),
+ host=host)
if not isinstance(result, dict):
print _('An unexpected error has occurred.')
diff --git a/nova/compute/api.py b/nova/compute/api.py
index f4560bd0f..454fd90d9 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -403,17 +403,32 @@ class API(base.Base):
@staticmethod
def _handle_availability_zone(availability_zone):
# NOTE(vish): We have a legacy hack to allow admins to specify hosts
- # via az using az:host. It might be nice to expose an
+ # via az using az:host:node. It might be nice to expose an
# api to specify specific hosts to force onto, but for
# now it just supports this legacy hack.
+ # NOTE(deva): It is also possible to specify az::node, in which case
+ # the host manager will determine the correct host.
forced_host = None
+ forced_node = None
if availability_zone and ':' in availability_zone:
- availability_zone, forced_host = availability_zone.split(':')
+ c = availability_zone.count(':')
+ if c == 1:
+ availability_zone, forced_host = availability_zone.split(':')
+ elif c == 2:
+ if '::' in availability_zone:
+ availability_zone, forced_node = \
+ availability_zone.split('::')
+ else:
+ availability_zone, forced_host, forced_node = \
+ availability_zone.split(':')
+ else:
+ raise exception.InvalidInput(
+ reason="Unable to parse availability_zone")
if not availability_zone:
availability_zone = CONF.default_schedule_zone
- return availability_zone, forced_host
+ return availability_zone, forced_host, forced_node
@staticmethod
def _inherit_properties_from_image(image, auto_disk_config):
@@ -562,8 +577,8 @@ class API(base.Base):
root_device_name = block_device.properties_root_device_name(
image.get('properties', {}))
- availability_zone, forced_host = self._handle_availability_zone(
- availability_zone)
+ availability_zone, forced_host, forced_node = \
+ self._handle_availability_zone(availability_zone)
system_metadata = instance_types.save_instance_type_info(
dict(), instance_type)
@@ -611,6 +626,9 @@ class API(base.Base):
if forced_host:
check_policy(context, 'create:forced_host', {})
filter_properties['force_hosts'] = [forced_host]
+ if forced_node:
+ check_policy(context, 'create:forced_host', {})
+ filter_properties['force_nodes'] = [forced_node]
for i in xrange(num_instances):
options = base_options.copy()
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index ceb2741e7..72ade9857 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -685,21 +685,18 @@ def floating_ip_allocate_address(context, project_id, pool):
@require_context
def floating_ip_bulk_create(context, ips):
- existing_ips = {}
- for floating in _floating_ip_get_all(context).all():
- existing_ips[floating['address']] = floating
-
session = get_session()
with session.begin():
for ip in ips:
- addr = ip['address']
- if (addr in existing_ips and
- ip.get('id') != existing_ips[addr]['id']):
- raise exception.FloatingIpExists(**dict(existing_ips[addr]))
-
model = models.FloatingIp()
model.update(ip)
- session.add(model)
+ try:
+ # NOTE(boris-42): To get existing address we have to do each
+ # time session.flush()..
+ session.add(model)
+ session.flush()
+ except db_exc.DBDuplicateEntry:
+ raise exception.FloatingIpExists(address=ip['address'])
def _ip_range_splitter(ips, block_size=256):
@@ -731,26 +728,13 @@ def floating_ip_bulk_destroy(context, ips):
@require_context
def floating_ip_create(context, values, session=None):
- if not session:
- session = get_session()
-
floating_ip_ref = models.FloatingIp()
floating_ip_ref.update(values)
-
- # check uniqueness for not deleted addresses
- if not floating_ip_ref.deleted:
- try:
- floating_ip = _floating_ip_get_by_address(context,
- floating_ip_ref.address,
- session)
- except exception.FloatingIpNotFoundForAddress:
- pass
- else:
- if floating_ip.id != floating_ip_ref.id:
- raise exception.FloatingIpExists(**dict(floating_ip_ref))
-
- floating_ip_ref.save(session=session)
- return floating_ip_ref['address']
+ try:
+ floating_ip_ref.save()
+ except db_exc.DBDuplicateEntry:
+ raise exception.FloatingIpExists(address=values['address'])
+ return floating_ip_ref
@require_context
@@ -916,12 +900,12 @@ def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id, session=None):
def floating_ip_update(context, address, values):
session = get_session()
with session.begin():
- floating_ip_ref = _floating_ip_get_by_address(context,
- address,
- session)
- for (key, value) in values.iteritems():
- floating_ip_ref[key] = value
- floating_ip_ref.save(session=session)
+ float_ip_ref = _floating_ip_get_by_address(context, address, session)
+ float_ip_ref.update(values)
+ try:
+ float_ip_ref.save(session=session)
+ except db_exc.DBDuplicateEntry:
+ raise exception.FloatingIpExists(address=values['address'])
@require_context
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/177_add_floating_ip_uc.py b/nova/db/sqlalchemy/migrate_repo/versions/177_add_floating_ip_uc.py
new file mode 100644
index 000000000..c0dd7c91d
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/177_add_floating_ip_uc.py
@@ -0,0 +1,40 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2013 Boris Pavlovic (boris@pavlovic.me).
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from migrate.changeset import UniqueConstraint
+from sqlalchemy import MetaData, Table
+
+from nova.db.sqlalchemy import utils
+
+
+UC_NAME = "uniq_address_x_deleted"
+COLUMNS = ('address', 'deleted')
+TABLE_NAME = 'floating_ips'
+
+
+def upgrade(migrate_engine):
+ meta = MetaData(bind=migrate_engine)
+ t = Table(TABLE_NAME, meta, autoload=True)
+
+ utils.drop_old_duplicate_entries_from_table(migrate_engine, TABLE_NAME,
+ True, *COLUMNS)
+ uc = UniqueConstraint(*COLUMNS, table=t, name=UC_NAME)
+ uc.create()
+
+
+def downgrade(migrate_engine):
+ utils.drop_unique_constraint(migrate_engine, TABLE_NAME, UC_NAME, *COLUMNS)
diff --git a/nova/scheduler/host_manager.py b/nova/scheduler/host_manager.py
index d12f15f38..9dbe6bd67 100644
--- a/nova/scheduler/host_manager.py
+++ b/nova/scheduler/host_manager.py
@@ -300,40 +300,66 @@ class HostManager(object):
def _strip_ignore_hosts(host_map, hosts_to_ignore):
ignored_hosts = []
for host in hosts_to_ignore:
- if host in host_map:
- del host_map[host]
- ignored_hosts.append(host)
+ for (hostname, nodename) in host_map.keys():
+ if host == hostname:
+ del host_map[(hostname, nodename)]
+ ignored_hosts.append(host)
ignored_hosts_str = ', '.join(ignored_hosts)
- msg = _('Host filter ignoring hosts: %(ignored_hosts_str)s')
- LOG.debug(msg, locals())
+ msg = _('Host filter ignoring hosts: %s')
+ LOG.debug(msg % ignored_hosts_str)
def _match_forced_hosts(host_map, hosts_to_force):
- for host in host_map.keys():
- if host not in hosts_to_force:
- del host_map[host]
- if not host_map:
+ forced_hosts = []
+ for (hostname, nodename) in host_map.keys():
+ if hostname not in hosts_to_force:
+ del host_map[(hostname, nodename)]
+ else:
+ forced_hosts.append(hostname)
+ if host_map:
+ forced_hosts_str = ', '.join(forced_hosts)
+ msg = _('Host filter forcing available hosts to %s')
+ else:
forced_hosts_str = ', '.join(hosts_to_force)
- msg = _("No hosts matched due to not matching 'force_hosts'"
- "value of '%(forced_hosts_str)s'")
- LOG.debug(msg, locals())
- return
- forced_hosts_str = ', '.join(host_map.iterkeys())
- msg = _('Host filter forcing available hosts to '
- '%(forced_hosts_str)s')
- LOG.debug(msg, locals())
+ msg = _("No hosts matched due to not matching "
+ "'force_hosts' value of '%s'")
+ LOG.debug(msg % forced_hosts_str)
+
+ def _match_forced_nodes(host_map, nodes_to_force):
+ forced_nodes = []
+ for (hostname, nodename) in host_map.keys():
+ if nodename not in nodes_to_force:
+ del host_map[(hostname, nodename)]
+ else:
+ forced_nodes.append(nodename)
+ if host_map:
+ forced_nodes_str = ', '.join(forced_nodes)
+ msg = _('Host filter forcing available nodes to %s')
+ else:
+ forced_nodes_str = ', '.join(nodes_to_force)
+ msg = _("No nodes matched due to not matching "
+ "'force_nodes' value of '%s'")
+ LOG.debug(msg % forced_nodes_str)
filter_classes = self._choose_host_filters(filter_class_names)
ignore_hosts = filter_properties.get('ignore_hosts', [])
force_hosts = filter_properties.get('force_hosts', [])
- if ignore_hosts or force_hosts:
- name_to_cls_map = dict([(x.host, x) for x in hosts])
+ force_nodes = filter_properties.get('force_nodes', [])
+
+ if ignore_hosts or force_hosts or force_nodes:
+ # NOTE(deva): we can't assume "host" is unique because
+ # one host may have many nodes.
+ name_to_cls_map = dict([((x.host, x.nodename), x) for x in hosts])
if ignore_hosts:
_strip_ignore_hosts(name_to_cls_map, ignore_hosts)
if not name_to_cls_map:
return []
+ # NOTE(deva): allow force_hosts and force_nodes independently
if force_hosts:
_match_forced_hosts(name_to_cls_map, force_hosts)
- # NOTE(vish): Skip filters on forced hosts.
+ if force_nodes:
+ _match_forced_nodes(name_to_cls_map, force_nodes)
+ if force_hosts or force_nodes:
+ # NOTE(deva): Skip filters when forcing host or node
if name_to_cls_map:
return name_to_cls_map.values()
hosts = name_to_cls_map.itervalues()
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 65b171eba..ca7cd956f 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -241,9 +241,7 @@ class SchedulerManager(manager.Manager):
notifier.notify(context, notifier.publisher_id("scheduler"),
'scheduler.' + method, notifier.ERROR, payload)
- # NOTE (masumotok) : This method should be moved to nova.api.ec2.admin.
- # Based on bexar design summit discussion,
- # just put this here for bexar release.
+ # NOTE(hanlind): This method can be removed in v3.0 of the RPC API.
def show_host_resources(self, context, host):
"""Shows the physical/usage resource given by hosts.
diff --git a/nova/scheduler/rpcapi.py b/nova/scheduler/rpcapi.py
index ac2244de4..2cfd5688f 100644
--- a/nova/scheduler/rpcapi.py
+++ b/nova/scheduler/rpcapi.py
@@ -96,9 +96,6 @@ class SchedulerAPI(nova.openstack.common.rpc.proxy.RpcProxy):
filter_properties=filter_properties,
reservations=reservations_p))
- def show_host_resources(self, ctxt, host):
- return self.call(ctxt, self.make_msg('show_host_resources', host=host))
-
def live_migration(self, ctxt, block_migration, disk_over_commit,
instance, dest):
# NOTE(comstud): Call vs cast so we can get exceptions back, otherwise
diff --git a/nova/tests/network/test_manager.py b/nova/tests/network/test_manager.py
index 74015526e..def993f62 100644
--- a/nova/tests/network/test_manager.py
+++ b/nova/tests/network/test_manager.py
@@ -1040,10 +1040,11 @@ class VlanNetworkTestCase(test.TestCase):
context1 = context.RequestContext('user', 'project1')
context2 = context.RequestContext('user', 'project2')
- address = '1.2.3.4'
- float_addr = db.floating_ip_create(context1.elevated(),
- {'address': address,
- 'project_id': context1.project_id})
+ float_ip = db.floating_ip_create(context1.elevated(),
+ {'address': '1.2.3.4',
+ 'project_id': context1.project_id})
+
+ float_addr = float_ip['address']
instance = db.instance_create(context1,
{'project_id': 'project1'})
diff --git a/nova/tests/scheduler/test_host_manager.py b/nova/tests/scheduler/test_host_manager.py
index ddc9bd29a..c03c66c4e 100644
--- a/nova/tests/scheduler/test_host_manager.py
+++ b/nova/tests/scheduler/test_host_manager.py
@@ -45,6 +45,8 @@ class HostManagerTestCase(test.TestCase):
self.host_manager = host_manager.HostManager()
self.fake_hosts = [host_manager.HostState('fake_host%s' % x,
'fake-node') for x in xrange(1, 5)]
+ self.fake_hosts += [host_manager.HostState('fake_multihost',
+ 'fake-node%s' % x) for x in xrange(1, 5)]
self.addCleanup(timeutils.clear_time_override)
def test_choose_host_filters_not_found(self):
@@ -115,7 +117,7 @@ class HostManagerTestCase(test.TestCase):
def test_get_filtered_hosts_with_ignore(self):
fake_properties = {'ignore_hosts': ['fake_host1', 'fake_host3',
- 'fake_host5']}
+ 'fake_host5', 'fake_multihost']}
# [1] and [3] are host2 and host4
info = {'expected_objs': [self.fake_hosts[1], self.fake_hosts[3]],
@@ -156,7 +158,7 @@ class HostManagerTestCase(test.TestCase):
fake_properties)
self._verify_result(info, result, False)
- def test_get_filtered_hosts_with_ignore_and_force(self):
+ def test_get_filtered_hosts_with_ignore_and_force_hosts(self):
# Ensure ignore_hosts processed before force_hosts in host filters.
fake_properties = {'force_hosts': ['fake_host3', 'fake_host1'],
'ignore_hosts': ['fake_host1']}
@@ -172,6 +174,97 @@ class HostManagerTestCase(test.TestCase):
fake_properties)
self._verify_result(info, result, False)
+ def test_get_filtered_hosts_with_force_host_and_many_nodes(self):
+ # Ensure all nodes returned for a host with many nodes
+ fake_properties = {'force_hosts': ['fake_multihost']}
+
+ info = {'expected_objs': [self.fake_hosts[4], self.fake_hosts[5],
+ self.fake_hosts[6], self.fake_hosts[7]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_nodes(self):
+ fake_properties = {'force_nodes': ['fake-node2', 'fake-node4',
+ 'fake-node9']}
+
+ # [5] is fake-node2, [7] is fake-node4
+ info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_hosts_and_nodes(self):
+ # Ensure only overlapping results if both force host and node
+ fake_properties = {'force_hosts': ['fake_host1', 'fake_multihost'],
+ 'force_nodes': ['fake-node2', 'fake-node9']}
+
+ # [5] is fake-node2
+ info = {'expected_objs': [self.fake_hosts[5]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_hosts_and_wrong_nodes(self):
+ # Ensure non-overlapping force_node and force_host yield no result
+ fake_properties = {'force_hosts': ['fake_multihost'],
+ 'force_nodes': ['fake-node']}
+
+ info = {'expected_objs': [],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_ignore_hosts_and_force_nodes(self):
+ # Ensure ignore_hosts can coexist with force_nodes
+ fake_properties = {'force_nodes': ['fake-node4', 'fake-node2'],
+ 'ignore_hosts': ['fake_host1', 'fake_host2']}
+
+ info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_ignore_hosts_and_force_same_nodes(self):
+ # Ensure ignore_hosts is processed before force_nodes
+ fake_properties = {'force_nodes': ['fake_node4', 'fake_node2'],
+ 'ignore_hosts': ['fake_multihost']}
+
+ info = {'expected_objs': [],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
def test_update_service_capabilities(self):
service_states = self.host_manager.service_states
self.assertEqual(len(service_states.keys()), 0)
diff --git a/nova/tests/scheduler/test_rpcapi.py b/nova/tests/scheduler/test_rpcapi.py
index 9a7615c86..44e6a91b8 100644
--- a/nova/tests/scheduler/test_rpcapi.py
+++ b/nova/tests/scheduler/test_rpcapi.py
@@ -69,10 +69,6 @@ class SchedulerRpcAPITestCase(test.TestCase):
request_spec='fake_request_spec',
filter_properties='fake_props', reservations=list('fake_res'))
- def test_show_host_resources(self):
- self._test_scheduler_api('show_host_resources', rpc_method='call',
- host='fake_host')
-
def test_live_migration(self):
self._test_scheduler_api('live_migration', rpc_method='call',
block_migration='fake_block_migration',
diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py
index dcbdd5517..a3c281b49 100644
--- a/nova/tests/test_db_api.py
+++ b/nova/tests/test_db_api.py
@@ -335,36 +335,6 @@ class DbApiTestCase(DbTestCase):
self.assertEqual(0, len(results))
db.instance_update(ctxt, instance['uuid'], {"task_state": None})
- def test_multi_associate_disassociate(self):
- ctxt = context.get_admin_context()
- values = {'address': 'floating'}
- floating = db.floating_ip_create(ctxt, values)
- values = {'address': 'fixed'}
- fixed = db.fixed_ip_create(ctxt, values)
- res = db.floating_ip_fixed_ip_associate(ctxt, floating, fixed, 'foo')
- self.assertEqual(res['address'], fixed)
- res = db.floating_ip_fixed_ip_associate(ctxt, floating, fixed, 'foo')
- self.assertEqual(res, None)
- res = db.floating_ip_disassociate(ctxt, floating)
- self.assertEqual(res['address'], fixed)
- res = db.floating_ip_disassociate(ctxt, floating)
- self.assertEqual(res, None)
-
- def test_floating_ip_get_by_fixed_address(self):
- ctxt = context.get_admin_context()
- values = {'address': 'fixed'}
- fixed = db.fixed_ip_create(ctxt, values)
- fixed_ip_ref = db.fixed_ip_get_by_address(ctxt, fixed)
- values = {'address': 'floating1',
- 'fixed_ip_id': fixed_ip_ref['id']}
- floating1 = db.floating_ip_create(ctxt, values)
- values = {'address': 'floating2',
- 'fixed_ip_id': fixed_ip_ref['id']}
- floating2 = db.floating_ip_create(ctxt, values)
- floating_ip_refs = db.floating_ip_get_by_fixed_address(ctxt, fixed)
- self.assertEqual(floating1, floating_ip_refs[0]['address'])
- self.assertEqual(floating2, floating_ip_refs[1]['address'])
-
def test_network_create_safe(self):
ctxt = context.get_admin_context()
values = {'host': 'localhost', 'project_id': 'project1'}
@@ -2398,7 +2368,7 @@ class FixedIPTestCase(BaseInstanceTypeTestCase):
fixed_ip_ref = db.fixed_ip_get_by_address(self.ctxt, fixed)
values = {'address': 'floating',
'fixed_ip_id': fixed_ip_ref['id']}
- floating = db.floating_ip_create(self.ctxt, values)
+ floating = db.floating_ip_create(self.ctxt, values)['address']
fixed_ip_ref = db.fixed_ip_get_by_floating_address(self.ctxt, floating)
self.assertEqual(fixed, fixed_ip_ref['address'])
@@ -2621,6 +2591,414 @@ class FixedIPTestCase(BaseInstanceTypeTestCase):
self.assertEqual(fixed_ip['network_id'], network['id'])
+class FloatingIpTestCase(test.TestCase, ModelsObjectComparatorMixin):
+
+ def setUp(self):
+ super(FloatingIpTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ def _get_base_values(self):
+ return {
+ 'address': '1.1.1.1',
+ 'fixed_ip_id': None,
+ 'project_id': 'fake_project',
+ 'host': 'fake_host',
+ 'auto_assigned': False,
+ 'pool': 'fake_pool',
+ 'interface': 'fake_interface',
+ }
+
+ def _create_floating_ip(self, values):
+ if not values:
+ values = {}
+ vals = self._get_base_values()
+ vals.update(values)
+ return db.floating_ip_create(self.ctxt, vals)
+
+ def test_floating_ip_get(self):
+ values = [{'address': '0.0.0.0'}, {'address': '1.1.1.1'}]
+ floating_ips = [self._create_floating_ip(val) for val in values]
+
+ for floating_ip in floating_ips:
+ real_floating_ip = db.floating_ip_get(self.ctxt, floating_ip['id'])
+ self._assertEqualObjects(floating_ip, real_floating_ip,
+ ignored_keys=['fixed_ip'])
+
+ def test_floating_ip_get_not_found(self):
+ self.assertRaises(exception.FloatingIpNotFound,
+ db.floating_ip_get, self.ctxt, 100500)
+
+ def test_floating_ip_get_pools(self):
+ values = [
+ {'address': '0.0.0.0', 'pool': 'abc'},
+ {'address': '1.1.1.1', 'pool': 'abc'},
+ {'address': '2.2.2.2', 'pool': 'def'},
+ {'address': '3.3.3.3', 'pool': 'ghi'},
+ ]
+ for val in values:
+ self._create_floating_ip(val)
+ expected_pools = [{'name': x}
+ for x in set(map(lambda x: x['pool'], values))]
+ real_pools = db.floating_ip_get_pools(self.ctxt)
+ self._assertEqualListsOfPrimitivesAsSets(real_pools, expected_pools)
+
+ def test_floating_ip_allocate_address(self):
+ pools = {
+ 'pool1': ['0.0.0.0', '1.1.1.1'],
+ 'pool2': ['2.2.2.2'],
+ 'pool3': ['3.3.3.3', '4.4.4.4', '5.5.5.5']
+ }
+ for pool, addresses in pools.iteritems():
+ for address in addresses:
+ vals = {'pool': pool, 'address': address, 'project_id': None}
+ self._create_floating_ip(vals)
+
+ project_id = self._get_base_values()['project_id']
+ for pool, addresses in pools.iteritems():
+ alloc_addrs = []
+ for i in addresses:
+ float_addr = db.floating_ip_allocate_address(self.ctxt,
+ project_id, pool)
+ alloc_addrs.append(float_addr)
+ self._assertEqualListsOfPrimitivesAsSets(alloc_addrs, addresses)
+
+ def test_floating_ip_allocate_address_no_more_floating_ips(self):
+ self.assertRaises(exception.NoMoreFloatingIps,
+ db.floating_ip_allocate_address,
+ self.ctxt, 'any_project_id', 'no_such_pool')
+
+ def test_floating_ip_allocate_not_authorized(self):
+ ctxt = context.RequestContext(user_id='a', project_id='abc',
+ is_admin=False)
+ self.assertRaises(exception.NotAuthorized,
+ db.floating_ip_allocate_address,
+ ctxt, 'other_project_id', 'any_pool')
+
+ def _get_existing_ips(self):
+ return [ip['address'] for ip in db.floating_ip_get_all(self.ctxt)]
+
+ def test_floating_ip_bulk_create(self):
+ expected_ips = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.1.4']
+ db.floating_ip_bulk_create(self.ctxt,
+ map(lambda x: {'address': x}, expected_ips))
+ self._assertEqualListsOfPrimitivesAsSets(self._get_existing_ips(),
+ expected_ips)
+
+ def test_floating_ip_bulk_create_duplicate(self):
+ ips = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.1.4']
+ prepare_ips = lambda x: {'address': x}
+
+ db.floating_ip_bulk_create(self.ctxt, map(prepare_ips, ips))
+ self.assertRaises(exception.FloatingIpExists,
+ db.floating_ip_bulk_create,
+ self.ctxt, map(prepare_ips, ['1.1.1.5', '1.1.1.4']))
+ self.assertRaises(exception.FloatingIpNotFoundForAddress,
+ db.floating_ip_get_by_address,
+ self.ctxt, '1.1.1.5')
+
+ def test_floating_ip_bulk_destroy(self):
+ ips_for_delete = []
+ ips_for_non_delete = []
+
+ def create_ips(i):
+ return [{'address': '1.1.%s.%s' % (i, k)} for k in xrange(1, 256)]
+
+ # NOTE(boris-42): Create more then 256 ip to check that
+ # _ip_range_splitter works properly.
+ for i in xrange(1, 3):
+ ips_for_delete.extend(create_ips(i))
+ ips_for_non_delete.extend(create_ips(3))
+
+ db.floating_ip_bulk_create(self.ctxt,
+ ips_for_delete + ips_for_non_delete)
+ db.floating_ip_bulk_destroy(self.ctxt, ips_for_delete)
+
+ expected_addresses = map(lambda x: x['address'], ips_for_non_delete)
+ self._assertEqualListsOfPrimitivesAsSets(self._get_existing_ips(),
+ expected_addresses)
+
+ def test_floating_ip_create(self):
+ floating_ip = self._create_floating_ip({})
+ ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
+ 'created_at']
+
+ self.assertFalse(floating_ip['id'] is None)
+ self._assertEqualObjects(floating_ip, self._get_base_values(),
+ ignored_keys)
+
+ def test_floating_ip_create_duplicate(self):
+ self._create_floating_ip({})
+ self.assertRaises(exception.FloatingIpExists,
+ self._create_floating_ip, {})
+
+ def test_floating_ip_count_by_project(self):
+ projects = {
+ 'project1': ['1.1.1.1', '2.2.2.2', '3.3.3.3'],
+ 'project2': ['4.4.4.4', '5.5.5.5'],
+ 'project3': ['6.6.6.6']
+ }
+ for project_id, addresses in projects.iteritems():
+ for address in addresses:
+ self._create_floating_ip({'project_id': project_id,
+ 'address': address})
+ for project_id, addresses in projects.iteritems():
+ real_count = db.floating_ip_count_by_project(self.ctxt, project_id)
+ self.assertEqual(len(addresses), real_count)
+
+ def test_floating_ip_count_by_project_not_authorized(self):
+ ctxt = context.RequestContext(user_id='a', project_id='abc',
+ is_admin=False)
+ self.assertRaises(exception.NotAuthorized,
+ db.floating_ip_count_by_project,
+ ctxt, 'def', 'does_not_matter')
+
+ def _create_fixed_ip(self, params):
+ default_params = {'address': '192.168.0.1'}
+ default_params.update(params)
+ return db.fixed_ip_create(self.ctxt, default_params)
+
+ def test_floating_ip_fixed_ip_associate(self):
+ float_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
+ fixed_addresses = ['2.2.2.1', '2.2.2.2', '2.2.2.3']
+
+ float_ips = [self._create_floating_ip({'address': address})
+ for address in float_addresses]
+ fixed_addrs = [self._create_fixed_ip({'address': address})
+ for address in fixed_addresses]
+
+ for float_ip, fixed_addr in zip(float_ips, fixed_addrs):
+ fixed_ip = db.floating_ip_fixed_ip_associate(self.ctxt,
+ float_ip.address,
+ fixed_addr, 'host')
+ self.assertEqual(fixed_ip.address, fixed_addr)
+
+ updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id)
+ self.assertEqual(fixed_ip.id, updated_float_ip.fixed_ip_id)
+ self.assertEqual('host', updated_float_ip.host)
+
+ # Test that already allocated float_ip returns None
+ result = db.floating_ip_fixed_ip_associate(self.ctxt,
+ float_addresses[0],
+ fixed_addresses[0], 'host')
+ self.assertTrue(result is None)
+
+ def test_floating_ip_fixed_ip_associate_float_ip_not_found(self):
+ self.assertRaises(exception.FloatingIpNotFoundForAddress,
+ db.floating_ip_fixed_ip_associate,
+ self.ctxt, 'non exist', 'some', 'some')
+
+ def test_floating_ip_deallocate(self):
+ values = {'address': '1.1.1.1', 'project_id': 'fake', 'host': 'fake'}
+ float_ip = self._create_floating_ip(values)
+ db.floating_ip_deallocate(self.ctxt, float_ip.address)
+
+ updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id)
+ self.assertTrue(updated_float_ip.project_id is None)
+ self.assertTrue(updated_float_ip.host is None)
+ self.assertFalse(updated_float_ip.auto_assigned)
+
+ def test_floating_ip_destroy(self):
+ addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
+ float_ips = [self._create_floating_ip({'address': addr})
+ for addr in addresses]
+
+ expected_len = len(addresses)
+ for float_ip in float_ips:
+ db.floating_ip_destroy(self.ctxt, float_ip.address)
+ self.assertRaises(exception.FloatingIpNotFound,
+ db.floating_ip_get, self.ctxt, float_ip.id)
+ expected_len -= 1
+ if expected_len > 0:
+ self.assertEqual(expected_len,
+ len(db.floating_ip_get_all(self.ctxt)))
+ else:
+ self.assertRaises(exception.NoFloatingIpsDefined,
+ db.floating_ip_get_all, self.ctxt)
+
+ def test_floating_ip_disassociate(self):
+ float_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
+ fixed_addresses = ['2.2.2.1', '2.2.2.2', '2.2.2.3']
+
+ float_ips = [self._create_floating_ip({'address': address})
+ for address in float_addresses]
+ fixed_addrs = [self._create_fixed_ip({'address': address})
+ for address in fixed_addresses]
+
+ for float_ip, fixed_addr in zip(float_ips, fixed_addrs):
+ db.floating_ip_fixed_ip_associate(self.ctxt,
+ float_ip.address,
+ fixed_addr, 'host')
+
+ for float_ip, fixed_addr in zip(float_ips, fixed_addrs):
+ fixed = db.floating_ip_disassociate(self.ctxt, float_ip.address)
+ self.assertEqual(fixed.address, fixed_addr)
+ updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id)
+ self.assertTrue(updated_float_ip.fixed_ip_id is None)
+ self.assertTrue(updated_float_ip.host is None)
+
+ def test_floating_ip_disassociate_not_found(self):
+ self.assertRaises(exception.FloatingIpNotFoundForAddress,
+ db.floating_ip_disassociate, self.ctxt, 'non exist')
+
+ def test_floating_ip_set_auto_assigned(self):
+ addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
+ float_ips = [self._create_floating_ip({'address': addr,
+ 'auto_assigned': False})
+ for addr in addresses]
+
+ for i in xrange(2):
+ db.floating_ip_set_auto_assigned(self.ctxt, float_ips[i].address)
+ for i in xrange(2):
+ float_ip = db.floating_ip_get(self.ctxt, float_ips[i].id)
+ self.assertTrue(float_ip.auto_assigned)
+
+ float_ip = db.floating_ip_get(self.ctxt, float_ips[2].id)
+ self.assertFalse(float_ip.auto_assigned)
+
+ def test_floating_ip_get_all(self):
+ addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
+ float_ips = [self._create_floating_ip({'address': addr})
+ for addr in addresses]
+ self._assertEqualListsOfObjects(float_ips,
+ db.floating_ip_get_all(self.ctxt))
+
+ def test_floating_ip_get_all_not_found(self):
+ self.assertRaises(exception.NoFloatingIpsDefined,
+ db.floating_ip_get_all, self.ctxt)
+
+ def test_floating_ip_get_all_by_host(self):
+ hosts = {
+ 'host1': ['1.1.1.1', '1.1.1.2'],
+ 'host2': ['2.1.1.1', '2.1.1.2'],
+ 'host3': ['3.1.1.1', '3.1.1.2', '3.1.1.3']
+ }
+
+ hosts_with_float_ips = {}
+ for host, addresses in hosts.iteritems():
+ hosts_with_float_ips[host] = []
+ for address in addresses:
+ float_ip = self._create_floating_ip({'host': host,
+ 'address': address})
+ hosts_with_float_ips[host].append(float_ip)
+
+ for host, float_ips in hosts_with_float_ips.iteritems():
+ real_float_ips = db.floating_ip_get_all_by_host(self.ctxt, host)
+ self._assertEqualListsOfObjects(float_ips, real_float_ips)
+
+ def test_floating_ip_get_all_by_host_not_found(self):
+ self.assertRaises(exception.FloatingIpNotFoundForHost,
+ db.floating_ip_get_all_by_host,
+ self.ctxt, 'non_exists_host')
+
+ def test_floating_ip_get_all_by_project(self):
+ projects = {
+ 'pr1': ['1.1.1.1', '1.1.1.2'],
+ 'pr2': ['2.1.1.1', '2.1.1.2'],
+ 'pr3': ['3.1.1.1', '3.1.1.2', '3.1.1.3']
+ }
+
+ projects_with_float_ips = {}
+ for project_id, addresses in projects.iteritems():
+ projects_with_float_ips[project_id] = []
+ for address in addresses:
+ float_ip = self._create_floating_ip({'project_id': project_id,
+ 'address': address})
+ projects_with_float_ips[project_id].append(float_ip)
+
+ for project_id, float_ips in projects_with_float_ips.iteritems():
+ real_float_ips = db.floating_ip_get_all_by_project(self.ctxt,
+ project_id)
+ self._assertEqualListsOfObjects(float_ips, real_float_ips,
+ ignored_keys='fixed_ip')
+
+ def test_floating_ip_get_all_by_project_not_authorized(self):
+ ctxt = context.RequestContext(user_id='a', project_id='abc',
+ is_admin=False)
+ self.assertRaises(exception.NotAuthorized,
+ db.floating_ip_get_all_by_project,
+ ctxt, 'other_project')
+
+ def test_floating_ip_get_by_address(self):
+ addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
+ float_ips = [self._create_floating_ip({'address': addr})
+ for addr in addresses]
+
+ for float_ip in float_ips:
+ real_float_ip = db.floating_ip_get_by_address(self.ctxt,
+ float_ip.address)
+ self._assertEqualObjects(float_ip, real_float_ip,
+ ignored_keys='fixed_ip')
+
+ def test_floating_ip_get_by_address_not_found(self):
+ self.assertRaises(exception.FloatingIpNotFoundForAddress,
+ db.floating_ip_get_by_address,
+ self.ctxt, 'non_exists_host')
+
+ def test_floating_ip_get_by_fixed_address(self):
+ fixed_float = [
+ ('1.1.1.1', '2.2.2.1'),
+ ('1.1.1.2', '2.2.2.2'),
+ ('1.1.1.3', '2.2.2.3')
+ ]
+
+ for fixed_addr, float_addr in fixed_float:
+ self._create_floating_ip({'address': float_addr})
+ self._create_fixed_ip({'address': fixed_addr})
+ db.floating_ip_fixed_ip_associate(self.ctxt, float_addr,
+ fixed_addr, 'some_host')
+
+ for fixed_addr, float_addr in fixed_float:
+ float_ip = db.floating_ip_get_by_fixed_address(self.ctxt,
+ fixed_addr)
+ self.assertEqual(float_addr, float_ip[0]['address'])
+
+ def test_floating_ip_get_by_fixed_ip_id(self):
+ fixed_float = [
+ ('1.1.1.1', '2.2.2.1'),
+ ('1.1.1.2', '2.2.2.2'),
+ ('1.1.1.3', '2.2.2.3')
+ ]
+
+ for fixed_addr, float_addr in fixed_float:
+ self._create_floating_ip({'address': float_addr})
+ self._create_fixed_ip({'address': fixed_addr})
+ db.floating_ip_fixed_ip_associate(self.ctxt, float_addr,
+ fixed_addr, 'some_host')
+
+ for fixed_addr, float_addr in fixed_float:
+ fixed_ip = db.fixed_ip_get_by_address(self.ctxt, fixed_addr)
+ float_ip = db.floating_ip_get_by_fixed_ip_id(self.ctxt,
+ fixed_ip['id'])
+ self.assertEqual(float_addr, float_ip[0]['address'])
+
+ def test_floating_ip_update(self):
+ float_ip = self._create_floating_ip({})
+
+ values = {
+ 'project_id': 'some_pr',
+ 'host': 'some_host',
+ 'auto_assigned': True,
+ 'interface': 'some_interface',
+ 'pool': 'some_pool'
+ }
+ db.floating_ip_update(self.ctxt, float_ip['address'], values)
+ updated_float_ip = db.floating_ip_get(self.ctxt, float_ip['id'])
+ self._assertEqualObjects(updated_float_ip, values,
+ ignored_keys=['id', 'address', 'updated_at',
+ 'deleted_at', 'created_at',
+ 'deleted', 'fixed_ip_id',
+ 'fixed_ip'])
+
+ def test_floating_ip_update_to_duplicate(self):
+ float_ip1 = self._create_floating_ip({'address': '1.1.1.1'})
+ float_ip2 = self._create_floating_ip({'address': '1.1.1.2'})
+
+ self.assertRaises(exception.FloatingIpExists,
+ db.floating_ip_update,
+ self.ctxt, float_ip2['address'],
+ {'address': float_ip1['address']})
+
+
class InstanceDestroyConstraints(test.TestCase):
def test_destroy_with_equal_any_constraint_met(self):
diff --git a/nova/tests/test_migrations.py b/nova/tests/test_migrations.py
index 8c2f04f21..a0f71b25a 100644
--- a/nova/tests/test_migrations.py
+++ b/nova/tests/test_migrations.py
@@ -1285,6 +1285,36 @@ class TestNovaMigrations(BaseMigrationTestCase, CommonTestsMixIn):
self.assertFalse('availability_zone' in rows[0])
+ def _pre_upgrade_177(self, engine):
+ floating_ips = get_table(engine, 'floating_ips')
+ data = [
+ {'address': '128.128.128.128', 'deleted': 0},
+ {'address': '128.128.128.128', 'deleted': 0},
+ {'address': '128.128.128.129', 'deleted': 0},
+ ]
+
+ for item in data:
+ floating_ips.insert().values(item).execute()
+ return data
+
+ def _check_177(self, engine, data):
+ floating_ips = get_table(engine, 'floating_ips')
+
+ def get_(address, deleted):
+ deleted_value = 0 if not deleted else floating_ips.c.id
+ return floating_ips.select().\
+ where(floating_ips.c.address == address).\
+ where(floating_ips.c.deleted == deleted_value).\
+ execute().\
+ fetchall()
+
+ self.assertEqual(1, len(get_('128.128.128.128', False)))
+ self.assertEqual(1, len(get_('128.128.128.128', True)))
+ self.assertEqual(1, len(get_('128.128.128.129', False)))
+ self.assertRaises(sqlalchemy.exc.IntegrityError,
+ floating_ips.insert().execute,
+ dict(address='128.128.128.129', deleted=0))
+
class TestBaremetalMigrations(BaseMigrationTestCase, CommonTestsMixIn):
"""Test sqlalchemy-migrate migrations."""
diff --git a/nova/tests/test_powervm.py b/nova/tests/test_powervm.py
index 9440a13b4..96954269e 100644
--- a/nova/tests/test_powervm.py
+++ b/nova/tests/test_powervm.py
@@ -22,6 +22,7 @@ import contextlib
from nova import context
from nova import db
+from nova import exception as nova_exception
from nova import test
from nova.compute import instance_types
@@ -214,6 +215,27 @@ class PowerVMDriverTestCase(test.TestCase):
state = self.powervm_connection.get_info(self.instance)['state']
self.assertEqual(state, power_state.RUNNING)
+ def test_spawn_create_lpar_fail(self):
+ # Verify on a failed spawn, we get the original exception raised.
+ # helper function
+ def raise_(ex):
+ raise ex
+
+ self.flags(powervm_img_local_path='/images/')
+ self.stubs.Set(images, 'fetch', lambda *x, **y: None)
+ self.stubs.Set(
+ self.powervm_connection._powervm,
+ 'get_host_stats',
+ lambda *x, **y: raise_(
+ (nova_exception.ProcessExecutionError('instance_name'))))
+ fake_net_info = network_model.NetworkInfo([
+ fake_network_cache_model.new_vif()])
+ self.assertRaises(exception.PowerVMLPARCreationFailed,
+ self.powervm_connection.spawn,
+ context.get_admin_context(),
+ self.instance,
+ {'id': 'ANY_ID'}, [], 's3cr3t', fake_net_info)
+
def test_spawn_cleanup_on_fail(self):
# Verify on a failed spawn, we get the original exception raised.
# helper function
diff --git a/nova/tests/test_vmwareapi.py b/nova/tests/test_vmwareapi.py
index 60f7db72c..bbd5a2caa 100644
--- a/nova/tests/test_vmwareapi.py
+++ b/nova/tests/test_vmwareapi.py
@@ -19,9 +19,11 @@
"""
Test suite for VMwareAPI.
"""
-import mox
import urllib2
+import mox
+from oslo.config import cfg
+
from nova.compute import power_state
from nova.compute import task_states
from nova import context
@@ -35,6 +37,7 @@ from nova.tests.vmwareapi import db_fakes
from nova.tests.vmwareapi import stubs
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import fake as vmwareapi_fake
+from nova.virt.vmwareapi import vim
from nova.virt.vmwareapi import vm_util
@@ -52,6 +55,47 @@ class fake_http_resp(object):
return "console log"
+class VMwareAPIConfTestCase(test.TestCase):
+ """Unit tests for VMWare API configurations."""
+ def setUp(self):
+ super(VMwareAPIConfTestCase, self).setUp()
+
+ def tearDown(self):
+ super(VMwareAPIConfTestCase, self).tearDown()
+
+ def test_configure_without_wsdl_loc_override(self):
+ # Test the default configuration behavior. By default,
+ # use the WSDL sitting on the host we are talking to in
+ # order to bind the SOAP client.
+ wsdl_loc = cfg.CONF.vmwareapi_wsdl_loc
+ self.assertIsNone(wsdl_loc)
+ wsdl_url = vim.Vim.get_wsdl_url("https", "www.example.com")
+ url = vim.Vim.get_soap_url("https", "www.example.com")
+ self.assertEqual("https://www.example.com/sdk/vimService.wsdl",
+ wsdl_url)
+ self.assertEqual("https://www.example.com/sdk", url)
+
+ def test_configure_with_wsdl_loc_override(self):
+ # Use the setting vmwareapi_wsdl_loc to override the
+ # default path to the WSDL.
+ #
+ # This is useful as a work-around for XML parsing issues
+ # found when using some WSDL in combination with some XML
+ # parsers.
+ #
+ # The wsdl_url should point to a different host than the one we
+ # are actually going to send commands to.
+ fake_wsdl = "https://www.test.com/sdk/foo.wsdl"
+ self.flags(vmwareapi_wsdl_loc=fake_wsdl)
+ wsdl_loc = cfg.CONF.vmwareapi_wsdl_loc
+ self.assertIsNotNone(wsdl_loc)
+ self.assertEqual(fake_wsdl, wsdl_loc)
+ wsdl_url = vim.Vim.get_wsdl_url("https", "www.example.com")
+ url = vim.Vim.get_soap_url("https", "www.example.com")
+ self.assertEqual(fake_wsdl, wsdl_url)
+ self.assertEqual("https://www.example.com/sdk", url)
+
+
class VMwareAPIVMTestCase(test.TestCase):
"""Unit tests for Vmware API connection calls."""
diff --git a/nova/virt/powervm/operator.py b/nova/virt/powervm/operator.py
index 905d018de..946835224 100644
--- a/nova/virt/powervm/operator.py
+++ b/nova/virt/powervm/operator.py
@@ -202,7 +202,8 @@ class PowerVMOperator(object):
except nova_exception.ProcessExecutionError:
LOG.exception(_("LPAR instance '%s' creation failed") %
instance['name'])
- raise exception.PowerVMLPARCreationFailed()
+ raise exception.PowerVMLPARCreationFailed(
+ instance_name=instance['name'])
_create_image(context, instance, image_id)
LOG.debug(_("Activating the LPAR instance '%s'")
diff --git a/nova/virt/vmwareapi/vim.py b/nova/virt/vmwareapi/vim.py
index 775f77453..69480c8e1 100644
--- a/nova/virt/vmwareapi/vim.py
+++ b/nova/virt/vmwareapi/vim.py
@@ -37,10 +37,9 @@ ADDRESS_IN_USE_ERROR = 'Address already in use'
vmwareapi_wsdl_loc_opt = cfg.StrOpt('vmwareapi_wsdl_loc',
default=None,
- help='VIM Service WSDL Location '
+ help='Optional VIM Service WSDL Location '
'e.g http://<server>/vimService.wsdl. '
- 'Due to a bug in vSphere ESX 4.1 default wsdl. '
- 'Refer readme-vmware to setup')
+ 'Optional over-ride to default location for bug work-arounds')
CONF = cfg.CONF
CONF.register_opt(vmwareapi_wsdl_loc_opt)
@@ -86,16 +85,44 @@ class Vim:
self._protocol = protocol
self._host_name = host
+ self.wsdl_url = Vim.get_wsdl_url(protocol, host)
+ self.url = Vim.get_soap_url(protocol, host)
+ self.client = suds.client.Client(self.wsdl_url, location=self.url,
+ plugins=[VIMMessagePlugin()])
+ self._service_content = self.RetrieveServiceContent(
+ "ServiceInstance")
+
+ @staticmethod
+ def get_wsdl_url(protocol, host_name):
+ """
+ allows override of the wsdl location, making this static
+ means we can test the logic outside of the constructor
+ without forcing the test environment to have multiple valid
+ wsdl locations to test against.
+
+ :param protocol: https or http
+ :param host_name: localhost or other server name
+ :return: string to WSDL location for vSphere WS Management API
+ """
+ # optional WSDL location over-ride for work-arounds
wsdl_url = CONF.vmwareapi_wsdl_loc
if wsdl_url is None:
- raise Exception(_("Must specify vmwareapi_wsdl_loc"))
- # TODO(sateesh): Use this when VMware fixes their faulty wsdl
- #wsdl_url = '%s://%s/sdk/vimService.wsdl' % (self._protocol,
- # self._host_name)
- url = '%s://%s/sdk' % (self._protocol, self._host_name)
- self.client = suds.client.Client(wsdl_url, location=url,
- plugins=[VIMMessagePlugin()])
- self._service_content = self.RetrieveServiceContent("ServiceInstance")
+ # calculate default WSDL location if no override supplied
+ wsdl_url = '%s://%s/sdk/vimService.wsdl' % (protocol, host_name)
+ return wsdl_url
+
+ @staticmethod
+ def get_soap_url(protocol, host_name):
+ """
+ Calculates the location of the SOAP services
+ for a particular server. Created as a static
+ method for testing.
+
+ :param protocol: https or http
+ :param host_name: localhost or other vSphere server name
+ :return: the url to the active vSphere WS Management API
+ """
+ return '%s://%s/sdk' % (protocol, host_name)
def get_service_content(self):
"""Gets the service content object."""