summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--doc/api_samples/os-fixed-ips/fixedip-post-req.json3
-rw-r--r--doc/api_samples/os-fixed-ips/fixedip-post-req.xml2
-rw-r--r--doc/api_samples/os-fixed-ips/fixedips-get-resp.json8
-rw-r--r--doc/api_samples/os-fixed-ips/fixedips-get-resp.xml7
-rw-r--r--doc/source/man/nova-cert.rst2
-rw-r--r--etc/nova/nova.conf.sample9
-rw-r--r--etc/nova/rootwrap.d/baremetal_compute_ipmi.filters9
-rw-r--r--nova/api/openstack/compute/contrib/cloudpipe.py4
-rw-r--r--nova/api/openstack/compute/contrib/volumes.py9
-rw-r--r--nova/cert/manager.py5
-rw-r--r--nova/cert/rpcapi.py5
-rw-r--r--nova/compute/manager.py11
-rw-r--r--nova/console/api.py5
-rw-r--r--nova/console/manager.py5
-rw-r--r--nova/console/rpcapi.py5
-rw-r--r--nova/consoleauth/manager.py5
-rw-r--r--nova/consoleauth/rpcapi.py5
-rw-r--r--nova/db/sqlalchemy/api.py46
-rw-r--r--nova/db/sqlalchemy/models.py2
-rw-r--r--nova/network/manager.py18
-rw-r--r--nova/scheduler/filter_scheduler.py21
-rw-r--r--nova/scheduler/manager.py5
-rw-r--r--nova/scheduler/rpcapi.py5
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_cloudpipe.py4
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_volumes.py21
-rw-r--r--nova/tests/api/openstack/compute/test_server_actions.py12
-rw-r--r--nova/tests/baremetal/db/base.py9
-rw-r--r--nova/tests/baremetal/test_driver.py276
-rw-r--r--nova/tests/baremetal/test_ipmi.py228
-rw-r--r--nova/tests/baremetal/test_volume_driver.py2
-rw-r--r--nova/tests/cert/test_rpcapi.py10
-rw-r--r--nova/tests/compute/test_compute.py25
-rw-r--r--nova/tests/console/test_console.py10
-rw-r--r--nova/tests/console/test_rpcapi.py44
-rw-r--r--nova/tests/consoleauth/test_consoleauth.py5
-rw-r--r--nova/tests/consoleauth/test_rpcapi.py11
-rw-r--r--nova/tests/integrated/api_samples/os-fixed-ips/fixedip-post-req.json.tpl3
-rw-r--r--nova/tests/integrated/api_samples/os-fixed-ips/fixedip-post-req.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-fixed-ips/fixedips-get-resp.json.tpl8
-rw-r--r--nova/tests/integrated/api_samples/os-fixed-ips/fixedips-get-resp.xml.tpl7
-rw-r--r--nova/tests/integrated/test_api_samples.py94
-rw-r--r--nova/tests/network/test_manager.py8
-rw-r--r--nova/tests/scheduler/test_rpcapi.py7
-rw-r--r--nova/tests/test_cinder.py18
-rw-r--r--nova/tests/test_db_api.py21
-rw-r--r--nova/tests/test_migrations.py4
-rw-r--r--nova/tests/virt/disk/test_api.py63
-rw-r--r--nova/tests/virt/disk/test_nbd.py24
-rw-r--r--nova/virt/baremetal/base.py2
-rw-r--r--nova/virt/baremetal/db/api.py7
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/session.py11
-rw-r--r--nova/virt/baremetal/driver.py134
-rw-r--r--nova/virt/baremetal/fake.py37
-rw-r--r--nova/virt/baremetal/ipmi.py257
-rw-r--r--nova/virt/baremetal/volume_driver.py16
-rw-r--r--nova/virt/disk/api.py19
-rw-r--r--nova/virt/disk/vfs/localfs.py3
-rw-r--r--nova/volume/cinder.py4
58 files changed, 1300 insertions, 302 deletions
diff --git a/doc/api_samples/os-fixed-ips/fixedip-post-req.json b/doc/api_samples/os-fixed-ips/fixedip-post-req.json
new file mode 100644
index 000000000..cf8ba0e0b
--- /dev/null
+++ b/doc/api_samples/os-fixed-ips/fixedip-post-req.json
@@ -0,0 +1,3 @@
+{
+ "reserve": "None"
+} \ No newline at end of file
diff --git a/doc/api_samples/os-fixed-ips/fixedip-post-req.xml b/doc/api_samples/os-fixed-ips/fixedip-post-req.xml
new file mode 100644
index 000000000..e29b685be
--- /dev/null
+++ b/doc/api_samples/os-fixed-ips/fixedip-post-req.xml
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<reserve>None</reserve> \ No newline at end of file
diff --git a/doc/api_samples/os-fixed-ips/fixedips-get-resp.json b/doc/api_samples/os-fixed-ips/fixedips-get-resp.json
new file mode 100644
index 000000000..d63c91559
--- /dev/null
+++ b/doc/api_samples/os-fixed-ips/fixedips-get-resp.json
@@ -0,0 +1,8 @@
+{
+ "fixed_ip": {
+ "address": "192.168.1.1",
+ "cidr": "192.168.1.0/24",
+ "host": "host",
+ "hostname": "openstack"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-fixed-ips/fixedips-get-resp.xml b/doc/api_samples/os-fixed-ips/fixedips-get-resp.xml
new file mode 100644
index 000000000..a9676721f
--- /dev/null
+++ b/doc/api_samples/os-fixed-ips/fixedips-get-resp.xml
@@ -0,0 +1,7 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<fixed_ip>
+ <cidr>192.168.1.0/24</cidr>
+ <hostname>openstack</hostname>
+ <host>host</host>
+ <address>192.168.1.1</address>
+</fixed_ip> \ No newline at end of file
diff --git a/doc/source/man/nova-cert.rst b/doc/source/man/nova-cert.rst
index ea176a4cd..f8c6d0c2d 100644
--- a/doc/source/man/nova-cert.rst
+++ b/doc/source/man/nova-cert.rst
@@ -21,7 +21,7 @@ SYNOPSIS
DESCRIPTION
===========
-nova-cert is a server daemon that serves the Nova Cert service for X509 certificates.
+nova-cert is a server daemon that serves the Nova Cert service for X509 certificates. Used to generate certificates for euca-bundle-image. Only needed for EC2 API.
OPTIONS
=======
diff --git a/etc/nova/nova.conf.sample b/etc/nova/nova.conf.sample
index 2fdd612b4..c3c10239c 100644
--- a/etc/nova/nova.conf.sample
+++ b/etc/nova/nova.conf.sample
@@ -920,6 +920,15 @@
# l3_lib=nova.network.l3.LinuxNetL3
#### (StrOpt) Indicates underlying L3 management library
+# update_dns_entries=false
+#### (BoolOpt) If True, when a DNS entry must be updated, it sends a fanout
+#### cast to all network hosts to update their DNS entries in multi
+#### host mode
+
+# dns_update_periodic_interval=-1
+#### (IntOpt) Number of periodic scheduler ticks to wait between runs of
+#### updates to DNS entries
+
######## defined in nova.network.quantumv2.api ########
diff --git a/etc/nova/rootwrap.d/baremetal_compute_ipmi.filters b/etc/nova/rootwrap.d/baremetal_compute_ipmi.filters
new file mode 100644
index 000000000..a2858cd11
--- /dev/null
+++ b/etc/nova/rootwrap.d/baremetal_compute_ipmi.filters
@@ -0,0 +1,9 @@
+# nova-rootwrap command filters for compute nodes
+# This file should be owned by (and only-writeable by) the root user
+
+[Filters]
+# nova/virt/baremetal/ipmi.py: 'ipmitool', ..
+ipmitool: CommandFilter, /usr/bin/ipmitool, root
+
+# nova/virt/baremetal/ipmi.py: 'kill', '-TERM', str(console_pid)
+kill_shellinaboxd: KillFilter, root, /usr/local/bin/shellinaboxd, -15, -TERM
diff --git a/nova/api/openstack/compute/contrib/cloudpipe.py b/nova/api/openstack/compute/contrib/cloudpipe.py
index 1b60f6c1f..bb3007735 100644
--- a/nova/api/openstack/compute/contrib/cloudpipe.py
+++ b/nova/api/openstack/compute/contrib/cloudpipe.py
@@ -74,7 +74,9 @@ class CloudpipeController(object):
def _get_all_cloudpipes(self, context):
"""Get all cloudpipes"""
- return [instance for instance in self.compute_api.get_all(context)
+ instances = self.compute_api.get_all(context,
+ search_opts={'deleted': False})
+ return [instance for instance in instances
if instance['image_ref'] == str(CONF.vpn_image_id)
and instance['vm_state'] != vm_states.DELETED]
diff --git a/nova/api/openstack/compute/contrib/volumes.py b/nova/api/openstack/compute/contrib/volumes.py
index abdef3a7d..9564921f4 100644
--- a/nova/api/openstack/compute/contrib/volumes.py
+++ b/nova/api/openstack/compute/contrib/volumes.py
@@ -26,6 +26,7 @@ from nova.api.openstack import xmlutil
from nova import compute
from nova import exception
from nova.openstack.common import log as logging
+from nova.openstack.common import uuidutils
from nova import utils
from nova import volume
@@ -365,6 +366,12 @@ class VolumeAttachmentController(wsgi.Controller):
instance['uuid'],
assigned_mountpoint)}
+ def _validate_volume_id(self, volume_id):
+ if not uuidutils.is_uuid_like(volume_id):
+ msg = _("Bad volumeId format: volumeId is "
+ "not in proper format (%s)") % volume_id
+ raise exc.HTTPBadRequest(explanation=msg)
+
@wsgi.serializers(xml=VolumeAttachmentTemplate)
def create(self, req, server_id, body):
"""Attach a volume to an instance."""
@@ -377,6 +384,8 @@ class VolumeAttachmentController(wsgi.Controller):
volume_id = body['volumeAttachment']['volumeId']
device = body['volumeAttachment'].get('device')
+ self._validate_volume_id(volume_id)
+
msg = _("Attach volume %(volume_id)s to instance %(server_id)s"
" at %(device)s") % locals()
LOG.audit(msg, context=context)
diff --git a/nova/cert/manager.py b/nova/cert/manager.py
index 2d17a675a..3a00c47a6 100644
--- a/nova/cert/manager.py
+++ b/nova/cert/manager.py
@@ -34,7 +34,7 @@ LOG = logging.getLogger(__name__)
class CertManager(manager.Manager):
- RPC_API_VERSION = '1.0'
+ RPC_API_VERSION = '1.1'
def init_host(self):
crypto.ensure_ca_filesystem()
@@ -66,3 +66,6 @@ class CertManager(manager.Manager):
def decrypt_text(self, context, project_id, text):
"""Decrypt base64 encoded text using the projects private key."""
return crypto.decrypt_text(project_id, base64.b64decode(text))
+
+ def get_backdoor_port(self, context):
+ return self.backdoor_port
diff --git a/nova/cert/rpcapi.py b/nova/cert/rpcapi.py
index 35d02b643..79b136571 100644
--- a/nova/cert/rpcapi.py
+++ b/nova/cert/rpcapi.py
@@ -31,6 +31,7 @@ class CertAPI(nova.openstack.common.rpc.proxy.RpcProxy):
API version history:
1.0 - Initial version.
+ 1.1 - Added get_backdoor_port()
'''
#
@@ -78,3 +79,7 @@ class CertAPI(nova.openstack.common.rpc.proxy.RpcProxy):
return self.call(ctxt, self.make_msg('decrypt_text',
project_id=project_id,
text=text))
+
+ def get_backdoor_port(self, context, host):
+ return self.call(context, self.make_msg('get_backdoor_port'),
+ version='1.1')
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index ccd82d3f0..9e4ac301e 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -674,7 +674,7 @@ class ComputeManager(manager.SchedulerDependentManager):
rescheduled = self._reschedule(context, request_spec,
filter_properties, instance['uuid'],
self.scheduler_rpcapi.run_instance, method_args,
- task_state)
+ task_state, exc_info)
except Exception:
rescheduled = False
@@ -689,7 +689,8 @@ class ComputeManager(manager.SchedulerDependentManager):
raise exc_info[0], exc_info[1], exc_info[2]
def _reschedule(self, context, request_spec, filter_properties,
- instance_uuid, scheduler_method, method_args, task_state):
+ instance_uuid, scheduler_method, method_args, task_state,
+ exc_info=None):
"""Attempt to re-schedule a compute operation."""
retry = filter_properties.get('retry', None)
@@ -713,6 +714,10 @@ class ComputeManager(manager.SchedulerDependentManager):
# reset the task state:
self._instance_update(context, instance_uuid, task_state=task_state)
+ if exc_info:
+ # stringify to avoid circular ref problem in json serialization:
+ retry['exc'] = traceback.format_exception(*exc_info)
+
scheduler_method(context, *method_args)
return True
@@ -1870,7 +1875,7 @@ class ComputeManager(manager.SchedulerDependentManager):
rescheduled = self._reschedule(context, request_spec,
filter_properties, instance_uuid, scheduler_method,
- method_args, task_state)
+ method_args, task_state, exc_info)
except Exception:
rescheduled = False
LOG.exception(_("Error trying to reschedule"),
diff --git a/nova/console/api.py b/nova/console/api.py
index 7eb7a31e2..cad1999be 100644
--- a/nova/console/api.py
+++ b/nova/console/api.py
@@ -68,3 +68,8 @@ class API(base.Base):
else:
instance = self.db.instance_get(context, instance_uuid)
return instance
+
+ def get_backdoor_port(self, context, host):
+ topic = self._get_console_topic(context, host)
+ rpcapi = console_rpcapi.ConsoleAPI(topic=topic)
+ return rpcapi.get_backdoor_port(context, host)
diff --git a/nova/console/manager.py b/nova/console/manager.py
index 6bae3b45c..243c028d9 100644
--- a/nova/console/manager.py
+++ b/nova/console/manager.py
@@ -52,7 +52,7 @@ class ConsoleProxyManager(manager.Manager):
"""
- RPC_API_VERSION = '1.0'
+ RPC_API_VERSION = '1.1'
def __init__(self, console_driver=None, *args, **kwargs):
if not console_driver:
@@ -132,3 +132,6 @@ class ConsoleProxyManager(manager.Manager):
pool_info['compute_host'] = instance_host
pool = self.db.console_pool_create(context, pool_info)
return pool
+
+ def get_backdoor_port(self, context):
+ return self.backdoor_port
diff --git a/nova/console/rpcapi.py b/nova/console/rpcapi.py
index 8c31bb97f..15a3b46ec 100644
--- a/nova/console/rpcapi.py
+++ b/nova/console/rpcapi.py
@@ -31,6 +31,7 @@ class ConsoleAPI(nova.openstack.common.rpc.proxy.RpcProxy):
API version history:
1.0 - Initial version.
+ 1.1 - Added get_backdoor_port()
'''
#
@@ -54,3 +55,7 @@ class ConsoleAPI(nova.openstack.common.rpc.proxy.RpcProxy):
def remove_console(self, ctxt, console_id):
self.cast(ctxt, self.make_msg('remove_console', console_id=console_id))
+
+ def get_backdoor_port(self, ctxt, host):
+ return self.call(ctxt, self.make_msg('get_backdoor_port'),
+ version='1.1')
diff --git a/nova/consoleauth/manager.py b/nova/consoleauth/manager.py
index 23d0a06f6..8d2171de7 100644
--- a/nova/consoleauth/manager.py
+++ b/nova/consoleauth/manager.py
@@ -45,7 +45,7 @@ CONF.import_opt('memcached_servers', 'nova.config')
class ConsoleAuthManager(manager.Manager):
"""Manages token based authentication."""
- RPC_API_VERSION = '1.0'
+ RPC_API_VERSION = '1.1'
def __init__(self, scheduler_driver=None, *args, **kwargs):
super(ConsoleAuthManager, self).__init__(*args, **kwargs)
@@ -75,3 +75,6 @@ class ConsoleAuthManager(manager.Manager):
LOG.audit(_("Checking Token: %(token)s, %(token_valid)s)"), locals())
if token_valid:
return jsonutils.loads(token_str)
+
+ def get_backdoor_port(self, context):
+ return self.backdoor_port
diff --git a/nova/consoleauth/rpcapi.py b/nova/consoleauth/rpcapi.py
index f4b4390ac..64b915ec3 100644
--- a/nova/consoleauth/rpcapi.py
+++ b/nova/consoleauth/rpcapi.py
@@ -30,6 +30,7 @@ class ConsoleAuthAPI(nova.openstack.common.rpc.proxy.RpcProxy):
API version history:
1.0 - Initial version.
+ 1.1 - Added get_backdoor_port()
'''
#
@@ -59,3 +60,7 @@ class ConsoleAuthAPI(nova.openstack.common.rpc.proxy.RpcProxy):
def check_token(self, ctxt, token):
return self.call(ctxt, self.make_msg('check_token', token=token))
+
+ def get_backdoor_port(self, ctxt, host):
+ return self.call(ctxt, self.make_msg('get_backdoor_port'),
+ version='1.1')
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 9cc3b64a1..095f76126 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -1768,38 +1768,40 @@ def instance_get_all_hung_in_rebooting(context, reboot_window):
@require_context
-def instance_test_and_set(context, instance_uuid, attr, ok_states,
- new_state, session=None):
+def instance_test_and_set(context, instance_uuid, attr, ok_states, new_state):
"""Atomically check if an instance is in a valid state, and if it is, set
the instance into a new state.
"""
- if not session:
- session = get_session()
+ if not uuidutils.is_uuid_like(instance_uuid):
+ raise exception.InvalidUUID(instance_uuid)
+ session = get_session()
with session.begin():
query = model_query(context, models.Instance, session=session,
- project_only=True)
-
- if uuidutils.is_uuid_like(instance_uuid):
- query = query.filter_by(uuid=instance_uuid)
+ project_only=True).\
+ filter_by(uuid=instance_uuid)
+
+ attr_column = getattr(models.Instance, attr)
+ filter_op = None
+ # NOTE(boris-42): `SELECT IN` doesn't work with None values because
+ # they are incomparable.
+ if None in ok_states:
+ filter_op = or_(attr_column == None,
+ attr_column.in_(filter(lambda x: x is not None,
+ ok_states)))
else:
- raise exception.InvalidUUID(instance_uuid)
-
- # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
- # then this has concurrency issues
- instance = query.with_lockmode('update').first()
+ filter_op = attr_column.in_(ok_states)
- state = instance[attr]
- if state not in ok_states:
+ count = query.filter(filter_op).\
+ update({attr: new_state}, synchronize_session=False)
+ if count == 0:
+ instance_ref = query.first()
raise exception.InstanceInvalidState(
attr=attr,
- instance_uuid=instance['uuid'],
- state=state,
+ instance_uuid=instance_ref['uuid'],
+ state=instance_ref[attr],
method='instance_test_and_set')
- instance[attr] = new_state
- instance.save(session=session)
-
@require_context
def instance_update(context, instance_uuid, values):
@@ -2109,8 +2111,8 @@ def network_delete_safe(context, network_id):
filter_by(network_id=network_id).\
filter_by(deleted=False).\
filter_by(allocated=True).\
- all()
- if result:
+ count()
+ if result != 0:
raise exception.NetworkInUse(network_id=network_id)
network_ref = network_get(context, network_id=network_id,
session=session)
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index a382d8d55..c7e956a40 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -981,7 +981,7 @@ class InstanceFault(BASE, NovaBase):
class InstanceIdMapping(BASE, NovaBase):
- """Compatability layer for the EC2 instance service"""
+ """Compatibility layer for the EC2 instance service"""
__tablename__ = 'instance_id_mappings'
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 97d4fa10d..0e8530d14 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -170,6 +170,10 @@ network_opts = [
help='If True, when a DNS entry must be updated, it sends a '
'fanout cast to all network hosts to update their DNS '
'entries in multi host mode'),
+ cfg.IntOpt("dns_update_periodic_interval",
+ default=-1,
+ help='Number of periodic scheduler ticks to wait between '
+ 'runs of updates to DNS entries.'),
cfg.StrOpt('dhcp_domain',
default='novalocal',
help='domain to use for building the hostnames'),
@@ -543,6 +547,9 @@ class FloatingIP(object):
# find previously associated instance
fixed_ip = self.db.fixed_ip_get(context,
floating_ip['fixed_ip_id'])
+ if fixed_ip['address'] == fixed_address:
+ # NOTE(vish): already associated to this address
+ return
orig_instance_uuid = fixed_ip['instance_uuid']
self.disassociate_floating_ip(context, floating_address)
@@ -1912,7 +1919,7 @@ class NetworkManager(manager.SchedulerDependentManager):
@wrap_check_policy
def get_network(self, context, network_uuid):
network = self.db.network_get_by_uuid(context.elevated(), network_uuid)
- return dict(network.iteritems())
+ return jsonutils.to_primitive(network)
@wrap_check_policy
def get_all_networks(self, context):
@@ -1943,6 +1950,15 @@ class NetworkManager(manager.SchedulerDependentManager):
return self.db.virtual_interface_get_by_address(context,
mac_address)
+ @manager.periodic_task(
+ ticks_between_runs=CONF.dns_update_periodic_interval)
+ def _periodic_update_dns(self, context):
+ """Update local DNS entries of all networks on this host"""
+ networks = self.db.network_get_all_by_host(context, self.host)
+ for network in networks:
+ dev = self.driver.get_dev(network)
+ self.driver.update_dns(context, dev, network)
+
def update_dns(self, context, network_ids):
"""Called when fixed IP is allocated or deallocated"""
if CONF.fake_network:
diff --git a/nova/scheduler/filter_scheduler.py b/nova/scheduler/filter_scheduler.py
index c18daa4cc..ea9a39b6f 100644
--- a/nova/scheduler/filter_scheduler.py
+++ b/nova/scheduler/filter_scheduler.py
@@ -191,6 +191,23 @@ class FilterScheduler(driver.Scheduler):
"'scheduler_max_attempts', must be >= 1"))
return max_attempts
+ def _log_compute_error(self, instance_uuid, retry):
+ """If the request contained an exception from a previous compute
+ build/resize operation, log it to aid debugging
+ """
+ exc = retry.pop('exc', None) # string-ified exception from compute
+ if not exc:
+ return # no exception info from a prevous attempt, skip
+
+ hosts = retry.get('hosts', None)
+ if not hosts:
+ return # no previously attempted hosts, skip
+
+ last_host, last_node = hosts[-1]
+ msg = _("Error from last host: %(last_host)s (node %(last_node)s): "
+ "%(exc)s") % locals()
+ LOG.error(msg, instance_uuid=instance_uuid)
+
def _populate_retry(self, filter_properties, instance_properties):
"""Populate filter properties with history of retries for this
request. If maximum retries is exceeded, raise NoValidHost.
@@ -212,8 +229,10 @@ class FilterScheduler(driver.Scheduler):
}
filter_properties['retry'] = retry
+ instance_uuid = instance_properties.get('uuid')
+ self._log_compute_error(instance_uuid, retry)
+
if retry['num_attempts'] > max_attempts:
- instance_uuid = instance_properties.get('uuid')
msg = _("Exceeded max scheduling attempts %(max_attempts)d for "
"instance %(instance_uuid)s") % locals()
raise exception.NoValidHost(reason=msg)
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 535eb7797..f3eb6e2e8 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -54,7 +54,7 @@ QUOTAS = quota.QUOTAS
class SchedulerManager(manager.Manager):
"""Chooses a host to run instances on."""
- RPC_API_VERSION = '2.4'
+ RPC_API_VERSION = '2.5'
def __init__(self, scheduler_driver=None, *args, **kwargs):
if not scheduler_driver:
@@ -263,3 +263,6 @@ class SchedulerManager(manager.Manager):
@manager.periodic_task
def _expire_reservations(self, context):
QUOTAS.expire(context)
+
+ def get_backdoor_port(self, context):
+ return self.backdoor_port
diff --git a/nova/scheduler/rpcapi.py b/nova/scheduler/rpcapi.py
index 4bc6e0e45..6ae4adcae 100644
--- a/nova/scheduler/rpcapi.py
+++ b/nova/scheduler/rpcapi.py
@@ -49,6 +49,7 @@ class SchedulerAPI(nova.openstack.common.rpc.proxy.RpcProxy):
2.3 - Remove create_volume()
2.4 - Change update_service_capabilities()
- accepts a list of capabilities
+ 2.5 - Add get_backdoor_port()
'''
#
@@ -106,3 +107,7 @@ class SchedulerAPI(nova.openstack.common.rpc.proxy.RpcProxy):
service_name=service_name, host=host,
capabilities=capabilities),
version='2.4')
+
+ def get_backdoor_port(self, context, host):
+ return self.call(context, self.make_msg('get_backdoor_port'),
+ version='2.5')
diff --git a/nova/tests/api/openstack/compute/contrib/test_cloudpipe.py b/nova/tests/api/openstack/compute/contrib/test_cloudpipe.py
index 5f92d521e..1ff26a60d 100644
--- a/nova/tests/api/openstack/compute/contrib/test_cloudpipe.py
+++ b/nova/tests/api/openstack/compute/contrib/test_cloudpipe.py
@@ -39,11 +39,11 @@ def fake_vpn_instance():
}
-def compute_api_get_all_empty(context):
+def compute_api_get_all_empty(context, search_opts=None):
return []
-def compute_api_get_all(context):
+def compute_api_get_all(context, search_opts=None):
return [fake_vpn_instance()]
diff --git a/nova/tests/api/openstack/compute/contrib/test_volumes.py b/nova/tests/api/openstack/compute/contrib/test_volumes.py
index e8a315edd..3119f55e8 100644
--- a/nova/tests/api/openstack/compute/contrib/test_volumes.py
+++ b/nova/tests/api/openstack/compute/contrib/test_volumes.py
@@ -289,6 +289,27 @@ class VolumeAttachTests(test.TestCase):
self.assertEqual(result['volumeAttachment']['id'],
'00000000-aaaa-aaaa-aaaa-000000000000')
+ def test_attach_volume_bad_id(self):
+ self.stubs.Set(compute_api.API,
+ 'attach_volume',
+ fake_attach_volume)
+ attachments = volumes.VolumeAttachmentController()
+
+ body = {
+ 'volumeAttachment': {
+ 'device': None,
+ 'volumeId': 'TESTVOLUME',
+ }
+ }
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-volumes/attach')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = jsonutils.dumps(body)
+
+ self.assertRaises(webob.exc.HTTPBadRequest, attachments.create,
+ req, FAKE_UUID, body)
+
class VolumeSerializerTest(test.TestCase):
def _verify_volume_attachment(self, attach, tree):
diff --git a/nova/tests/api/openstack/compute/test_server_actions.py b/nova/tests/api/openstack/compute/test_server_actions.py
index 423750470..157ac0e8d 100644
--- a/nova/tests/api/openstack/compute/test_server_actions.py
+++ b/nova/tests/api/openstack/compute/test_server_actions.py
@@ -193,6 +193,18 @@ class ServerActionsControllerTest(test.TestCase):
self.controller._action_reboot,
req, FAKE_UUID, body)
+ def test_reboot_raises_unprocessable_entity(self):
+ body = dict(reboot=dict(type="HARD"))
+
+ def fake_reboot(*args, **kwargs):
+ raise NotImplementedError()
+
+ self.stubs.Set(compute_api.API, 'reboot', fake_reboot)
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPUnprocessableEntity,
+ self.controller._action_reboot,
+ req, FAKE_UUID, body)
+
def test_rebuild_accepted_minimum(self):
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE, host='fake_host')
diff --git a/nova/tests/baremetal/db/base.py b/nova/tests/baremetal/db/base.py
index f2b905895..37e51fe79 100644
--- a/nova/tests/baremetal/db/base.py
+++ b/nova/tests/baremetal/db/base.py
@@ -24,8 +24,9 @@ from nova.virt.baremetal.db.sqlalchemy import session as bm_session
_DB_CACHE = None
CONF = cfg.CONF
-CONF.import_opt('baremetal_sql_connection',
- 'nova.virt.baremetal.db.sqlalchemy.session')
+CONF.import_opt('sql_connection',
+ 'nova.virt.baremetal.db.sqlalchemy.session',
+ group='baremetal')
class Database(test.Database):
@@ -38,11 +39,11 @@ class BMDBTestCase(test.TestCase):
def setUp(self):
super(BMDBTestCase, self).setUp()
- self.flags(baremetal_sql_connection='sqlite://')
+ self.flags(sql_connection='sqlite://', group='baremetal')
global _DB_CACHE
if not _DB_CACHE:
_DB_CACHE = Database(bm_session, bm_migration,
- sql_connection=CONF.baremetal_sql_connection,
+ sql_connection=CONF.baremetal.sql_connection,
sqlite_db=None,
sqlite_clean_db=None)
self.useFixture(_DB_CACHE)
diff --git a/nova/tests/baremetal/test_driver.py b/nova/tests/baremetal/test_driver.py
index c62c01aac..d060d3a5d 100644
--- a/nova/tests/baremetal/test_driver.py
+++ b/nova/tests/baremetal/test_driver.py
@@ -1,3 +1,7 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# coding=utf-8
+
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright (c) 2011 University of Southern California / ISI
# All Rights Reserved.
@@ -15,175 +19,161 @@
# under the License.
"""
-Tests for baremetal driver.
+Tests for the base baremetal driver class.
"""
-from nova import exception
+import mox
+
from nova.openstack.common import cfg
-from nova.tests.baremetal.db import base
-from nova.tests.baremetal.db import utils
+
+from nova import exception
+from nova import test
+
+from nova.tests.baremetal.db import base as bm_db_base
+from nova.tests.baremetal.db import utils as bm_db_utils
from nova.tests.image import fake as fake_image
-from nova.tests import test_virt_drivers
-from nova.tests import utils as test_utils
+from nova.tests import utils
+
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import db
from nova.virt.baremetal import driver as bm_driver
-from nova.virt.baremetal import volume_driver
-from nova.virt.firewall import NoopFirewallDriver
-
+from nova.virt.baremetal import fake
+from nova.virt import firewall
CONF = cfg.CONF
-
-class FakeVifDriver(object):
-
- def plug(self, instance, vif):
- pass
-
- def unplug(self, instance, vif):
- pass
-
-FakeFirewallDriver = NoopFirewallDriver
-
-
-class FakeVolumeDriver(volume_driver.VolumeDriver):
- def __init__(self, virtapi):
- super(FakeVolumeDriver, self).__init__(virtapi)
- self._initiator = "testtesttest"
-
-
-NODE = utils.new_bm_node(cpus=2, memory_mb=4096, service_host="host1")
-NICS = [
- {'address': '01:23:45:67:89:01', 'datapath_id': '0x1', 'port_no': 1, },
- {'address': '01:23:45:67:89:02', 'datapath_id': '0x2', 'port_no': 2, },
- ]
-
-
-def class_path(class_):
- return class_.__module__ + '.' + class_.__name__
-
-
COMMON_FLAGS = dict(
- baremetal_sql_connection='sqlite:///:memory:',
- baremetal_driver='nova.virt.baremetal.fake.Fake',
- power_manager='nova.virt.baremetal.fake.FakePowerManager',
- baremetal_vif_driver=class_path(FakeVifDriver),
- firewall_driver=class_path(FakeFirewallDriver),
- baremetal_volume_driver=class_path(FakeVolumeDriver),
- instance_type_extra_specs=['cpu_arch:test'],
- host=NODE['service_host'],
+ firewall_driver='nova.virt.baremetal.fake.FakeFirewallDriver',
+ host='test_host',
)
-
-def _create_baremetal_stuff():
- context = test_utils.get_test_admin_context()
- node = db.bm_node_create(context, NODE)
- for nic in NICS:
- db.bm_interface_create(context,
- node['id'],
- nic['address'],
- nic['datapath_id'],
- nic['port_no'])
- return node
+BAREMETAL_FLAGS = dict(
+ driver='nova.virt.baremetal.fake.FakeDriver',
+ instance_type_extra_specs=['cpu_arch:test', 'test_spec:test_value'],
+ power_manager='nova.virt.baremetal.fake.FakePowerManager',
+ vif_driver='nova.virt.baremetal.fake.FakeVifDriver',
+ volume_driver='nova.virt.baremetal.fake.FakeVolumeDriver',
+ group='baremetal',
+)
-class BaremetalDriverSpawnTestCase(base.Database):
+class BareMetalDriverNoDBTestCase(test.TestCase):
def setUp(self):
- super(BaremetalDriverSpawnTestCase, self).setUp()
+ super(BareMetalDriverNoDBTestCase, self).setUp()
self.flags(**COMMON_FLAGS)
- fake_image.stub_out_image_service(self.stubs)
+ self.flags(**BAREMETAL_FLAGS)
+ self.driver = bm_driver.BareMetalDriver(None)
- self.node = _create_baremetal_stuff()
- self.node_id = self.node['id']
+ def test_validate_driver_loading(self):
+ self.assertTrue(isinstance(self.driver.driver,
+ fake.FakeDriver))
+ self.assertTrue(isinstance(self.driver.vif_driver,
+ fake.FakeVifDriver))
+ self.assertTrue(isinstance(self.driver.volume_driver,
+ fake.FakeVolumeDriver))
+ self.assertTrue(isinstance(self.driver.firewall_driver,
+ fake.FakeFirewallDriver))
- self.context = test_utils.get_test_admin_context()
- self.instance = test_utils.get_test_instance()
- self.network_info = test_utils.get_test_network_info()
- self.block_device_info = None
- self.image_meta = test_utils.get_test_image_info(None, self.instance)
- self.driver = bm_driver.BareMetalDriver(None)
- self.kwargs = dict(
- context=self.context,
- instance=self.instance,
- image_meta=self.image_meta,
- injected_files=[('/foo', 'bar'), ('/abc', 'xyz')],
- admin_password='testpass',
- network_info=self.network_info,
- block_device_info=self.block_device_info)
- self.addCleanup(fake_image.FakeImageService_reset)
- def test_ok(self):
- self.instance['node'] = str(self.node_id)
- self.driver.spawn(**self.kwargs)
- node = db.bm_node_get(self.context, self.node_id)
- self.assertEqual(node['instance_uuid'], self.instance['uuid'])
- self.assertEqual(node['task_state'], baremetal_states.ACTIVE)
-
- def test_without_node(self):
- self.assertRaises(
- exception.NovaException,
- self.driver.spawn,
- **self.kwargs)
-
- def test_node_not_found(self):
- self.instance['node'] = "123456789"
- self.assertRaises(
- exception.InstanceNotFound,
- self.driver.spawn,
- **self.kwargs)
-
- def test_node_in_use(self):
- self.instance['node'] = str(self.node_id)
- db.bm_node_update(self.context, self.node_id,
- {'instance_uuid': 'something'})
- self.assertRaises(
- exception.NovaException,
- self.driver.spawn,
- **self.kwargs)
-
-
-class BaremetalDriverTestCase(test_virt_drivers._VirtDriverTestCase,
- base.Database):
+class BareMetalDriverWithDBTestCase(bm_db_base.BMDBTestCase):
def setUp(self):
- super(BaremetalDriverTestCase, self).setUp()
- self.driver_module = 'nova.virt.baremetal.BareMetalDriver'
+ super(BareMetalDriverWithDBTestCase, self).setUp()
self.flags(**COMMON_FLAGS)
- self.node = _create_baremetal_stuff()
- self.node_id = self.node['id']
+ self.flags(**BAREMETAL_FLAGS)
+
fake_image.stub_out_image_service(self.stubs)
+ self.context = utils.get_test_admin_context()
+ self.driver = bm_driver.BareMetalDriver(None)
+ self.node_info = bm_db_utils.new_bm_node(
+ id=123,
+ service_host='test_host',
+ cpus=2,
+ memory_mb=2048,
+ )
+ self.nic_info = [
+ {'address': '01:23:45:67:89:01', 'datapath_id': '0x1',
+ 'port_no': 1},
+ {'address': '01:23:45:67:89:02', 'datapath_id': '0x2',
+ 'port_no': 2},
+ ]
self.addCleanup(fake_image.FakeImageService_reset)
- def _get_running_instance(self):
- instance_ref = test_utils.get_test_instance()
- instance_ref['node'] = str(self.node_id)
- network_info = test_utils.get_test_network_info()
- image_info = test_utils.get_test_image_info(None, instance_ref)
- self.connection.spawn(self.ctxt, instance_ref, image_info,
- [], 'herp', network_info=network_info)
- return instance_ref, network_info
-
- def test_loading_baremetal_drivers(self):
- from nova.virt.baremetal import fake
- drv = bm_driver.BareMetalDriver(None)
- self.assertTrue(isinstance(drv.baremetal_nodes, fake.Fake))
- self.assertTrue(isinstance(drv._vif_driver, FakeVifDriver))
- self.assertTrue(isinstance(drv._firewall_driver, FakeFirewallDriver))
- self.assertTrue(isinstance(drv._volume_driver, FakeVolumeDriver))
+ def _create_node(self):
+ self.node = db.bm_node_create(self.context, self.node_info)
+ for nic in self.nic_info:
+ db.bm_interface_create(
+ self.context,
+ self.node['id'],
+ nic['address'],
+ nic['datapath_id'],
+ nic['port_no'],
+ )
+ self.test_instance = utils.get_test_instance()
+ self.test_instance['node'] = self.node['id']
+ self.spawn_params = dict(
+ admin_password='test_pass',
+ block_device_info=None,
+ context=self.context,
+ image_meta=utils.get_test_image_info(None,
+ self.test_instance),
+ injected_files=[('/fake/path', 'hello world')],
+ instance=self.test_instance,
+ network_info=utils.get_test_network_info(),
+ )
def test_get_host_stats(self):
- self.flags(instance_type_extra_specs=['cpu_arch:x86_64',
- 'x:123',
- 'y:456', ])
- drv = bm_driver.BareMetalDriver(None)
- cap_list = drv.get_host_stats()
- self.assertTrue(isinstance(cap_list, list))
- self.assertEqual(len(cap_list), 1)
- cap = cap_list[0]
- self.assertEqual(cap['cpu_arch'], 'x86_64')
- self.assertEqual(cap['x'], '123')
- self.assertEqual(cap['y'], '456')
- self.assertEqual(cap['hypervisor_type'], 'baremetal')
- self.assertEqual(cap['baremetal_driver'],
- 'nova.virt.baremetal.fake.Fake')
+ self._create_node()
+ stats = self.driver.get_host_stats()
+ self.assertTrue(isinstance(stats, list))
+ self.assertEqual(len(stats), 1)
+ stats = stats[0]
+ self.assertEqual(stats['cpu_arch'], 'test')
+ self.assertEqual(stats['test_spec'], 'test_value')
+ self.assertEqual(stats['hypervisor_type'], 'baremetal')
+ self.assertEqual(stats['hypervisor_hostname'], '123')
+ self.assertEqual(stats['host'], 'test_host')
+ self.assertEqual(stats['vcpus'], 2)
+ self.assertEqual(stats['host_memory_total'], 2048)
+
+ def test_spawn_ok(self):
+ self._create_node()
+ self.driver.spawn(**self.spawn_params)
+ row = db.bm_node_get(self.context, self.node['id'])
+ self.assertEqual(row['task_state'], baremetal_states.ACTIVE)
+
+ def test_spawn_node_in_use(self):
+ self._create_node()
+ db.bm_node_update(self.context, self.node['id'],
+ {'instance_uuid': '1234-5678'})
+
+ self.assertRaises(exception.NovaException,
+ self.driver.spawn, **self.spawn_params)
+
+ row = db.bm_node_get(self.context, self.node['id'])
+ self.assertEqual(row['task_state'], None)
+
+ def test_spawn_node_not_found(self):
+ self._create_node()
+ db.bm_node_update(self.context, self.node['id'],
+ {'id': 9876})
+
+ self.assertRaises(exception.InstanceNotFound,
+ self.driver.spawn, **self.spawn_params)
+
+ row = db.bm_node_get(self.context, 9876)
+ self.assertEqual(row['task_state'], None)
+
+ def test_spawn_fails(self):
+ self._create_node()
+
+ self.mox.StubOutWithMock(fake.FakePowerManager, 'activate_node')
+ fake.FakePowerManager.activate_node().AndRaise(test.TestingException)
+ self.mox.ReplayAll()
+
+ self.assertRaises(test.TestingException,
+ self.driver.spawn, **self.spawn_params)
+
+ row = db.bm_node_get(self.context, self.node['id'])
+ self.assertEqual(row['task_state'], baremetal_states.ERROR)
diff --git a/nova/tests/baremetal/test_ipmi.py b/nova/tests/baremetal/test_ipmi.py
new file mode 100644
index 000000000..40ec43abd
--- /dev/null
+++ b/nova/tests/baremetal/test_ipmi.py
@@ -0,0 +1,228 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# coding=utf-8
+
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test class for baremetal IPMI power manager.
+"""
+
+import os
+import stat
+import tempfile
+import time
+
+import mox
+
+from nova.openstack.common import cfg
+from nova import test
+from nova.tests.baremetal.db import utils as bm_db_utils
+from nova import utils
+from nova.virt.baremetal import baremetal_states
+from nova.virt.baremetal import ipmi
+from nova.virt.baremetal import utils as bm_utils
+from nova.virt.libvirt import utils as libvirt_utils
+
+CONF = cfg.CONF
+
+
+class BareMetalIPMITestCase(test.TestCase):
+
+ def setUp(self):
+ super(BareMetalIPMITestCase, self).setUp()
+ self.node = bm_db_utils.new_bm_node(
+ id=123,
+ pm_address='fake-address',
+ pm_user='fake-user',
+ pm_password='fake-password')
+ self.ipmi = ipmi.IPMI(self.node)
+
+ def test_construct(self):
+ self.assertEqual(self.ipmi.node_id, 123)
+ self.assertEqual(self.ipmi.address, 'fake-address')
+ self.assertEqual(self.ipmi.user, 'fake-user')
+ self.assertEqual(self.ipmi.password, 'fake-password')
+
+ def test_make_password_file(self):
+ pw_file = ipmi._make_password_file(self.node['pm_password'])
+ try:
+ self.assertTrue(os.path.isfile(pw_file))
+ self.assertEqual(os.stat(pw_file)[stat.ST_MODE] & 0777, 0600)
+ with open(pw_file, "r") as f:
+ pm_password = f.read()
+ self.assertEqual(pm_password, self.node['pm_password'])
+ finally:
+ os.unlink(pw_file)
+
+ def test_exec_ipmitool(self):
+ pw_file = '/tmp/password_file'
+
+ self.mox.StubOutWithMock(ipmi, '_make_password_file')
+ self.mox.StubOutWithMock(utils, 'execute')
+ self.mox.StubOutWithMock(bm_utils, 'unlink_without_raise')
+ ipmi._make_password_file(self.ipmi.password).AndReturn(pw_file)
+ args = [
+ 'ipmitool',
+ '-I', 'lanplus',
+ '-H', self.ipmi.address,
+ '-U', self.ipmi.user,
+ '-f', pw_file,
+ 'A', 'B', 'C',
+ ]
+ utils.execute(*args, attempts=3).AndReturn(('', ''))
+ bm_utils.unlink_without_raise(pw_file).AndReturn(None)
+ self.mox.ReplayAll()
+
+ self.ipmi._exec_ipmitool('A B C')
+ self.mox.VerifyAll()
+
+ def test_is_power(self):
+ self.mox.StubOutWithMock(self.ipmi, '_exec_ipmitool')
+ self.ipmi._exec_ipmitool("power status").AndReturn(
+ ["Chassis Power is on\n"])
+ self.mox.ReplayAll()
+
+ self.ipmi._is_power("on")
+ self.mox.VerifyAll()
+
+ def test_power_already_on(self):
+ self.flags(ipmi_power_retry=0, group='baremetal')
+ self.mox.StubOutWithMock(self.ipmi, '_exec_ipmitool')
+
+ self.ipmi._exec_ipmitool("power status").AndReturn(
+ ["Chassis Power is on\n"])
+ self.mox.ReplayAll()
+
+ self.ipmi.state = baremetal_states.DELETED
+ self.ipmi._power_on()
+ self.mox.VerifyAll()
+ self.assertEqual(self.ipmi.state, baremetal_states.ACTIVE)
+
+ def test_power_on_ok(self):
+ self.flags(ipmi_power_retry=0, group='baremetal')
+ self.mox.StubOutWithMock(self.ipmi, '_exec_ipmitool')
+
+ self.ipmi._exec_ipmitool("power status").AndReturn(
+ ["Chassis Power is off\n"])
+ self.ipmi._exec_ipmitool("power on").AndReturn([])
+ self.ipmi._exec_ipmitool("power status").AndReturn(
+ ["Chassis Power is on\n"])
+ self.mox.ReplayAll()
+
+ self.ipmi.state = baremetal_states.DELETED
+ self.ipmi._power_on()
+ self.mox.VerifyAll()
+ self.assertEqual(self.ipmi.state, baremetal_states.ACTIVE)
+
+ def test_power_on_fail(self):
+ self.flags(ipmi_power_retry=0, group='baremetal')
+ self.mox.StubOutWithMock(self.ipmi, '_exec_ipmitool')
+
+ self.ipmi._exec_ipmitool("power status").AndReturn(
+ ["Chassis Power is off\n"])
+ self.ipmi._exec_ipmitool("power on").AndReturn([])
+ self.ipmi._exec_ipmitool("power status").AndReturn(
+ ["Chassis Power is off\n"])
+ self.mox.ReplayAll()
+
+ self.ipmi.state = baremetal_states.DELETED
+ self.ipmi._power_on()
+ self.mox.VerifyAll()
+ self.assertEqual(self.ipmi.state, baremetal_states.ERROR)
+
+ def test_power_on_max_retries(self):
+ self.flags(ipmi_power_retry=2, group='baremetal')
+ self.mox.StubOutWithMock(self.ipmi, '_exec_ipmitool')
+
+ self.ipmi._exec_ipmitool("power status").AndReturn(
+ ["Chassis Power is off\n"])
+ self.ipmi._exec_ipmitool("power on").AndReturn([])
+ self.ipmi._exec_ipmitool("power status").AndReturn(
+ ["Chassis Power is off\n"])
+ self.ipmi._exec_ipmitool("power on").AndReturn([])
+ self.ipmi._exec_ipmitool("power status").AndReturn(
+ ["Chassis Power is off\n"])
+ self.ipmi._exec_ipmitool("power on").AndReturn([])
+ self.ipmi._exec_ipmitool("power status").AndReturn(
+ ["Chassis Power is off\n"])
+ self.mox.ReplayAll()
+
+ self.ipmi.state = baremetal_states.DELETED
+ self.ipmi._power_on()
+ self.mox.VerifyAll()
+ self.assertEqual(self.ipmi.state, baremetal_states.ERROR)
+ self.assertEqual(self.ipmi.retries, 3)
+
+ def test_power_off_ok(self):
+ self.flags(ipmi_power_retry=0, group='baremetal')
+ self.mox.StubOutWithMock(self.ipmi, '_exec_ipmitool')
+
+ self.ipmi._exec_ipmitool("power status").AndReturn(
+ ["Chassis Power is on\n"])
+ self.ipmi._exec_ipmitool("power off").AndReturn([])
+ self.ipmi._exec_ipmitool("power status").AndReturn(
+ ["Chassis Power is off\n"])
+ self.mox.ReplayAll()
+
+ self.ipmi.state = baremetal_states.ACTIVE
+ self.ipmi._power_off()
+ self.mox.VerifyAll()
+ self.assertEqual(self.ipmi.state, baremetal_states.DELETED)
+
+ def test_get_console_pid_path(self):
+ self.flags(terminal_pid_dir='/tmp', group='baremetal')
+ path = ipmi._get_console_pid_path(self.ipmi.node_id)
+ self.assertEqual(path, '/tmp/%s.pid' % self.ipmi.node_id)
+
+ def test_console_pid(self):
+ fd, path = tempfile.mkstemp()
+ with os.fdopen(fd, 'w') as f:
+ f.write("12345\n")
+
+ self.mox.StubOutWithMock(ipmi, '_get_console_pid_path')
+ ipmi._get_console_pid_path(self.ipmi.node_id).AndReturn(path)
+ self.mox.ReplayAll()
+
+ pid = ipmi._get_console_pid(self.ipmi.node_id)
+ bm_utils.unlink_without_raise(path)
+ self.mox.VerifyAll()
+ self.assertEqual(pid, 12345)
+
+ def test_console_pid_nan(self):
+ fd, path = tempfile.mkstemp()
+ with os.fdopen(fd, 'w') as f:
+ f.write("hello world\n")
+
+ self.mox.StubOutWithMock(ipmi, '_get_console_pid_path')
+ ipmi._get_console_pid_path(self.ipmi.node_id).AndReturn(path)
+ self.mox.ReplayAll()
+
+ pid = ipmi._get_console_pid(self.ipmi.node_id)
+ bm_utils.unlink_without_raise(path)
+ self.mox.VerifyAll()
+ self.assertTrue(pid is None)
+
+ def test_console_pid_file_not_found(self):
+ pid_path = ipmi._get_console_pid_path(self.ipmi.node_id)
+
+ self.mox.StubOutWithMock(os.path, 'exists')
+ os.path.exists(pid_path).AndReturn(False)
+ self.mox.ReplayAll()
+
+ pid = ipmi._get_console_pid(self.ipmi.node_id)
+ self.mox.VerifyAll()
+ self.assertTrue(pid is None)
diff --git a/nova/tests/baremetal/test_volume_driver.py b/nova/tests/baremetal/test_volume_driver.py
index dacca6e53..e1b81d0b1 100644
--- a/nova/tests/baremetal/test_volume_driver.py
+++ b/nova/tests/baremetal/test_volume_driver.py
@@ -161,6 +161,6 @@ class BareMetalVolumeTestCase(test.TestCase):
self.assertTrue(tid is None)
def test_get_iqn(self):
- self.flags(baremetal_iscsi_iqn_prefix='iqn.2012-12.a.b')
+ self.flags(iscsi_iqn_prefix='iqn.2012-12.a.b', group='baremetal')
iqn = volume_driver._get_iqn('instname', '/dev/vdx')
self.assertEquals('iqn.2012-12.a.b:instname-dev-vdx', iqn)
diff --git a/nova/tests/cert/test_rpcapi.py b/nova/tests/cert/test_rpcapi.py
index 8db8a0f6c..f61d42408 100644
--- a/nova/tests/cert/test_rpcapi.py
+++ b/nova/tests/cert/test_rpcapi.py
@@ -33,8 +33,12 @@ class CertRpcAPITestCase(test.TestCase):
ctxt = context.RequestContext('fake_user', 'fake_project')
rpcapi = cert_rpcapi.CertAPI()
expected_retval = 'foo'
+ expected_version = kwargs.pop('version', rpcapi.BASE_RPC_API_VERSION)
expected_msg = rpcapi.make_msg(method, **kwargs)
- expected_msg['version'] = rpcapi.BASE_RPC_API_VERSION
+ expected_msg['version'] = expected_version
+
+ if method == 'get_backdoor_port':
+ del expected_msg['args']['host']
self.call_ctxt = None
self.call_topic = None
@@ -84,3 +88,7 @@ class CertRpcAPITestCase(test.TestCase):
def test_decrypt_text(self):
self._test_cert_api('decrypt_text',
project_id='fake_project_id', text='blah')
+
+ def test_get_backdoor_port(self):
+ self._test_cert_api('get_backdoor_port', host='fake_host',
+ version='1.1')
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index 3983dc8bb..8263529b1 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -23,6 +23,7 @@ import copy
import datetime
import sys
import time
+import traceback
import uuid
import mox
@@ -5924,7 +5925,8 @@ class ComputeReschedulingTestCase(BaseTestCase):
self.updated_task_state = kwargs.get('task_state')
self.stubs.Set(self.compute, '_instance_update', fake_update)
- def _reschedule(self, request_spec=None, filter_properties=None):
+ def _reschedule(self, request_spec=None, filter_properties=None,
+ exc_info=None):
if not filter_properties:
filter_properties = {}
@@ -5940,7 +5942,7 @@ class ComputeReschedulingTestCase(BaseTestCase):
requested_networks, is_first_time, filter_properties)
return self.compute._reschedule(self.context, request_spec,
filter_properties, instance_uuid, scheduler_method,
- method_args, self.expected_task_state)
+ method_args, self.expected_task_state, exc_info=exc_info)
def test_reschedule_no_filter_properties(self):
"""no filter_properties will disable re-scheduling"""
@@ -5961,10 +5963,17 @@ class ComputeReschedulingTestCase(BaseTestCase):
retry = dict(num_attempts=1)
filter_properties = dict(retry=retry)
request_spec = {'instance_uuids': ['foo', 'bar']}
+ try:
+ raise test.TestingException("just need an exception")
+ except test.TestingException:
+ exc_info = sys.exc_info()
+ exc_str = traceback.format_exception(*exc_info)
+
self.assertTrue(self._reschedule(filter_properties=filter_properties,
- request_spec=request_spec))
+ request_spec=request_spec, exc_info=exc_info))
self.assertEqual(1, len(request_spec['instance_uuids']))
self.assertEqual(self.updated_task_state, self.expected_task_state)
+ self.assertEqual(exc_str, filter_properties['retry']['exc'])
class ComputeReschedulingResizeTestCase(ComputeReschedulingTestCase):
@@ -5974,7 +5983,8 @@ class ComputeReschedulingResizeTestCase(ComputeReschedulingTestCase):
super(ComputeReschedulingResizeTestCase, self).setUp()
self.expected_task_state = task_states.RESIZE_PREP
- def _reschedule(self, request_spec=None, filter_properties=None):
+ def _reschedule(self, request_spec=None, filter_properties=None,
+ exc_info=None):
if not filter_properties:
filter_properties = {}
@@ -5991,7 +6001,7 @@ class ComputeReschedulingResizeTestCase(ComputeReschedulingTestCase):
return self.compute._reschedule(self.context, request_spec,
filter_properties, instance_uuid, scheduler_method,
- method_args, self.expected_task_state)
+ method_args, self.expected_task_state, exc_info=exc_info)
class InnerTestingException(Exception):
@@ -6111,7 +6121,8 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
self.instance)
self.compute._reschedule(self.context, None, {}, instance_uuid,
self.compute.scheduler_rpcapi.run_instance,
- method_args, task_states.SCHEDULING).AndReturn(True)
+ method_args, task_states.SCHEDULING, exc_info).AndReturn(
+ True)
self.compute._log_original_error(exc_info, instance_uuid)
self.mox.ReplayAll()
@@ -6205,7 +6216,7 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
self.compute._reschedule(self.context, {}, {},
self.instance_uuid,
self.compute.scheduler_rpcapi.prep_resize, method_args,
- task_states.RESIZE_PREP).AndReturn(True)
+ task_states.RESIZE_PREP, exc_info).AndReturn(True)
self.compute._log_original_error(exc_info, self.instance_uuid)
self.mox.ReplayAll()
diff --git a/nova/tests/console/test_console.py b/nova/tests/console/test_console.py
index 65db24844..4a1dc8fe6 100644
--- a/nova/tests/console/test_console.py
+++ b/nova/tests/console/test_console.py
@@ -185,3 +185,13 @@ class ConsoleAPITestCase(test.TestCase):
self.mox.ReplayAll()
self.console_api.create_console(self.context, self.fake_uuid)
+
+ def test_get_backdoor_port(self):
+ self.mox.StubOutWithMock(console_rpcapi.ConsoleAPI,
+ 'get_backdoor_port')
+
+ console_rpcapi.ConsoleAPI.get_backdoor_port(self.context, 'fake_host')
+
+ self.mox.ReplayAll()
+
+ self.console_api.get_backdoor_port(self.context, 'fake_host')
diff --git a/nova/tests/console/test_rpcapi.py b/nova/tests/console/test_rpcapi.py
index ef4bc4ae7..6e9417ada 100644
--- a/nova/tests/console/test_rpcapi.py
+++ b/nova/tests/console/test_rpcapi.py
@@ -29,31 +29,43 @@ CONF.import_opt('console_topic', 'nova.config')
class ConsoleRpcAPITestCase(test.TestCase):
- def _test_console_api(self, method, **kwargs):
+ def _test_console_api(self, method, rpc_method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
rpcapi = console_rpcapi.ConsoleAPI()
+ expected_retval = 'foo' if method == 'call' else None
+ expected_version = kwargs.pop('version', rpcapi.BASE_RPC_API_VERSION)
expected_msg = rpcapi.make_msg(method, **kwargs)
- expected_msg['version'] = rpcapi.BASE_RPC_API_VERSION
+ expected_msg['version'] = expected_version
- self.cast_ctxt = None
- self.cast_topic = None
- self.cast_msg = None
+ if method == 'get_backdoor_port':
+ del expected_msg['args']['host']
- def _fake_cast(_ctxt, _topic, _msg):
- self.cast_ctxt = _ctxt
- self.cast_topic = _topic
- self.cast_msg = _msg
+ self.fake_args = None
+ self.fake_kwargs = None
- self.stubs.Set(rpc, 'cast', _fake_cast)
+ def _fake_rpc_method(*args, **kwargs):
+ self.fake_args = args
+ self.fake_kwargs = kwargs
+ if expected_retval:
+ return expected_retval
- getattr(rpcapi, method)(ctxt, **kwargs)
+ self.stubs.Set(rpc, rpc_method, _fake_rpc_method)
- self.assertEqual(self.cast_ctxt, ctxt)
- self.assertEqual(self.cast_topic, CONF.console_topic)
- self.assertEqual(self.cast_msg, expected_msg)
+ retval = getattr(rpcapi, method)(ctxt, **kwargs)
+
+ self.assertEqual(retval, expected_retval)
+ expected_args = [ctxt, CONF.console_topic, expected_msg]
+ for arg, expected_arg in zip(self.fake_args, expected_args):
+ self.assertEqual(arg, expected_arg)
def test_add_console(self):
- self._test_console_api('add_console', instance_id='i')
+ self._test_console_api('add_console', instance_id='i',
+ rpc_method='cast')
def test_remove_console(self):
- self._test_console_api('remove_console', console_id='i')
+ self._test_console_api('remove_console', console_id='i',
+ rpc_method='cast')
+
+ def test_get_backdoor_port(self):
+ self._test_console_api('get_backdoor_port', host='fake_host',
+ rpc_method='call', version='1.1')
diff --git a/nova/tests/consoleauth/test_consoleauth.py b/nova/tests/consoleauth/test_consoleauth.py
index 202e4c7b7..f92a4be1c 100644
--- a/nova/tests/consoleauth/test_consoleauth.py
+++ b/nova/tests/consoleauth/test_consoleauth.py
@@ -47,3 +47,8 @@ class ConsoleauthTestCase(test.TestCase):
self.assertTrue(self.manager.check_token(self.context, token))
timeutils.advance_time_seconds(1)
self.assertFalse(self.manager.check_token(self.context, token))
+
+ def test_get_backdoor_port(self):
+ self.manager.backdoor_port = 59697
+ port = self.manager.get_backdoor_port(self.context)
+ self.assertEqual(port, self.manager.backdoor_port)
diff --git a/nova/tests/consoleauth/test_rpcapi.py b/nova/tests/consoleauth/test_rpcapi.py
index 51a8eda14..c15ff7ff4 100644
--- a/nova/tests/consoleauth/test_rpcapi.py
+++ b/nova/tests/consoleauth/test_rpcapi.py
@@ -18,6 +18,7 @@
Unit Tests for nova.consoleauth.rpcapi
"""
+from nova.consoleauth import manager as consoleauth_manager
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import context
from nova.openstack.common import cfg
@@ -32,8 +33,12 @@ class ConsoleAuthRpcAPITestCase(test.TestCase):
ctxt = context.RequestContext('fake_user', 'fake_project')
rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
expected_retval = 'foo'
+ expected_version = kwargs.pop('version', rpcapi.BASE_RPC_API_VERSION)
expected_msg = rpcapi.make_msg(method, **kwargs)
- expected_msg['version'] = rpcapi.BASE_RPC_API_VERSION
+ expected_msg['version'] = expected_version
+
+ if method == 'get_backdoor_port':
+ del expected_msg['args']['host']
self.call_ctxt = None
self.call_topic = None
@@ -64,3 +69,7 @@ class ConsoleAuthRpcAPITestCase(test.TestCase):
def test_check_token(self):
self._test_consoleauth_api('check_token', token='t')
+
+ def test_get_backdoor_port(self):
+ self._test_consoleauth_api('get_backdoor_port', host='fake_host',
+ version='1.1')
diff --git a/nova/tests/integrated/api_samples/os-fixed-ips/fixedip-post-req.json.tpl b/nova/tests/integrated/api_samples/os-fixed-ips/fixedip-post-req.json.tpl
new file mode 100644
index 000000000..85ae4890a
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-fixed-ips/fixedip-post-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "reserve": "%(reserve)s"
+}
diff --git a/nova/tests/integrated/api_samples/os-fixed-ips/fixedip-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-fixed-ips/fixedip-post-req.xml.tpl
new file mode 100644
index 000000000..3896b24eb
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-fixed-ips/fixedip-post-req.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<reserve>%(reserve)s</reserve>
diff --git a/nova/tests/integrated/api_samples/os-fixed-ips/fixedips-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-fixed-ips/fixedips-get-resp.json.tpl
new file mode 100644
index 000000000..a3d11475b
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-fixed-ips/fixedips-get-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "fixed_ip": {
+ "cidr": "%(cidr)s",
+ "hostname": "%(hostname)s",
+ "host": "%(host)s",
+ "address": "%(address)s"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-fixed-ips/fixedips-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-fixed-ips/fixedips-get-resp.xml.tpl
new file mode 100644
index 000000000..3e9598f34
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-fixed-ips/fixedips-get-resp.xml.tpl
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<fixed_ip>
+ <cidr>%(cidr)s</cidr>
+ <hostname>%(hostname)s</hostname>
+ <host>%(host)s</host>
+ <address>%(address)s</address>
+</fixed_ip>
diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py
index a8099a6bb..f8bd522fe 100644
--- a/nova/tests/integrated/test_api_samples.py
+++ b/nova/tests/integrated/test_api_samples.py
@@ -32,6 +32,7 @@ from nova.compute import api
from nova import context
from nova import db
from nova.db.sqlalchemy import models
+from nova import exception
from nova.network import api
from nova.network.manager import NetworkManager
from nova.openstack.common import cfg
@@ -81,6 +82,9 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
indent=4)
else:
+ if data is None:
+ # Likely from missing XML file.
+ return ""
xml = etree.XML(data)
data = etree.tostring(xml, encoding="UTF-8",
xml_declaration=True, pretty_print=True)
@@ -235,7 +239,10 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
with file(self._get_sample(name)) as sample:
sample_data = sample.read()
except IOError:
- sample_data = "{}"
+ if self.ctype == 'json':
+ sample_data = "{}"
+ else:
+ sample_data = None
try:
response_result = self._verify_something(subs, expected,
@@ -356,7 +363,6 @@ class ApiSamplesTrap(ApiSampleTestBase):
do_not_approve_additions.append('OS-FLV-DISABLED')
do_not_approve_additions.append('os-config-drive')
do_not_approve_additions.append('os-create-server-ext')
- do_not_approve_additions.append('os-fixed-ips')
do_not_approve_additions.append('os-flavor-access')
do_not_approve_additions.append('os-flavor-extra-specs')
do_not_approve_additions.append('os-flavor-rxtx')
@@ -1559,6 +1565,90 @@ class AgentsXmlTest(AgentsJsonTest):
ctype = "xml"
+class FixedIpJsonTest(ApiSampleTestBase):
+ extension_name = "nova.api.openstack.compute.contrib.fixed_ips.Fixed_ips"
+
+ def _get_flags(self):
+ f = super(FixedIpJsonTest, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ return f
+
+ def setUp(self):
+ super(FixedIpJsonTest, self).setUp()
+
+ fake_fixed_ips = [{'id': 1,
+ 'address': '192.168.1.1',
+ 'network_id': 1,
+ 'virtual_interface_id': 1,
+ 'instance_uuid': '1',
+ 'allocated': False,
+ 'leased': False,
+ 'reserved': False,
+ 'host': None},
+ {'id': 2,
+ 'address': '192.168.1.2',
+ 'network_id': 1,
+ 'virtual_interface_id': 2,
+ 'instance_uuid': '2',
+ 'allocated': False,
+ 'leased': False,
+ 'reserved': False,
+ 'host': None},
+ ]
+
+ def fake_fixed_ip_get_by_address(context, address):
+ for fixed_ip in fake_fixed_ips:
+ if fixed_ip['address'] == address:
+ return fixed_ip
+ raise exception.FixedIpNotFoundForAddress(address=address)
+
+ def fake_fixed_ip_get_by_address_detailed(context, address):
+ network = {'id': 1,
+ 'cidr': "192.168.1.0/24"}
+ host = {'host': "host",
+ 'hostname': 'openstack'}
+ for fixed_ip in fake_fixed_ips:
+ if fixed_ip['address'] == address:
+ return (fixed_ip, network, host)
+ raise exception.FixedIpNotFoundForAddress(address=address)
+
+ def fake_fixed_ip_update(context, address, values):
+ fixed_ip = fake_fixed_ip_get_by_address(context, address)
+ if fixed_ip is None:
+ raise exception.FixedIpNotFoundForAddress(address=address)
+ else:
+ for key in values:
+ fixed_ip[key] = values[key]
+
+ self.stubs.Set(db, "fixed_ip_get_by_address",
+ fake_fixed_ip_get_by_address)
+ self.stubs.Set(db, "fixed_ip_get_by_address_detailed",
+ fake_fixed_ip_get_by_address_detailed)
+ self.stubs.Set(db, "fixed_ip_update", fake_fixed_ip_update)
+
+ def test_fixed_ip_reserve(self):
+ """Reserve a Fixed IP"""
+ project = {'reserve': None}
+ response = self._do_post('os-fixed-ips/192.168.1.1/action',
+ 'fixedip-post-req',
+ project)
+ self.assertEqual(response.status, 202)
+
+ def test_get_fixed_ip(self):
+ """Return data about the given fixed ip."""
+ response = self._do_get('os-fixed-ips/192.168.1.1')
+ self.assertEqual(response.status, 200)
+ project = {'cidr': '192.168.1.0/24',
+ 'hostname': 'openstack',
+ 'host': 'host',
+ 'address': '192.168.1.1'}
+ return self._verify_response('fixedips-get-resp', project, response)
+
+
+class FixedIpXmlTest(FixedIpJsonTest):
+ ctype = "xml"
+
+
class AggregatesSampleJsonTest(ServersSampleBase):
extension_name = "nova.api.openstack.compute.contrib" + \
".aggregates.Aggregates"
diff --git a/nova/tests/network/test_manager.py b/nova/tests/network/test_manager.py
index 0e441eef8..c2aa1dbbb 100644
--- a/nova/tests/network/test_manager.py
+++ b/nova/tests/network/test_manager.py
@@ -740,15 +740,19 @@ class VlanNetworkTestCase(test.TestCase):
self.stubs.Set(self.network, 'disassociate_floating_ip', fake9)
def fake_fixed_ip_get(context, fixed_ip_id):
- return {'instance_uuid': 'fake_uuid'}
+ return {'address': 'old', 'instance_uuid': 'fake_uuid'}
self.stubs.Set(self.network.db, 'fixed_ip_get', fake_fixed_ip_get)
+ # doesn't raise because we exit early if the address is the same
+ self.network.associate_floating_ip(ctxt, mox.IgnoreArg(), 'old')
+
+ # raises because we call disassociate which is mocked
self.assertRaises(test.TestingException,
self.network.associate_floating_ip,
ctxt,
mox.IgnoreArg(),
- mox.IgnoreArg())
+ 'new')
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
diff --git a/nova/tests/scheduler/test_rpcapi.py b/nova/tests/scheduler/test_rpcapi.py
index b1174559a..da7652a50 100644
--- a/nova/tests/scheduler/test_rpcapi.py
+++ b/nova/tests/scheduler/test_rpcapi.py
@@ -37,6 +37,9 @@ class SchedulerRpcAPITestCase(test.TestCase):
expected_msg = rpcapi.make_msg(method, **kwargs)
expected_msg['version'] = expected_version
+ if method == 'get_backdoor_port':
+ del expected_msg['args']['host']
+
self.fake_args = None
self.fake_kwargs = None
@@ -84,3 +87,7 @@ class SchedulerRpcAPITestCase(test.TestCase):
rpc_method='fanout_cast', service_name='fake_name',
host='fake_host', capabilities='fake_capabilities',
version='2.4')
+
+ def test_get_backdoor_port(self):
+ self._test_scheduler_api('get_backdoor_port', rpc_method='call',
+ host='fake_host', version='2.5')
diff --git a/nova/tests/test_cinder.py b/nova/tests/test_cinder.py
index 11d29a3ff..dfdd4f3d7 100644
--- a/nova/tests/test_cinder.py
+++ b/nova/tests/test_cinder.py
@@ -42,6 +42,11 @@ def _stub_volume(**kwargs):
volume.update(kwargs)
return volume
+_image_metadata = {
+ 'kernel_id': 'fake',
+ 'ramdisk_id': 'fake'
+}
+
class FakeHTTPClient(cinder.cinder_client.client.HTTPClient):
@@ -82,6 +87,13 @@ class FakeHTTPClient(cinder.cinder_client.client.HTTPClient):
def get_volumes_nonexisting(self, **kw):
raise cinder_exception.NotFound(code=404, message='Resource not found')
+ def get_volumes_5678(self, **kw):
+ """Volume with image metadata"""
+ volume = {'volume': _stub_volume(id='1234',
+ volume_image_metadata=_image_metadata)
+ }
+ return (200, volume)
+
class FakeCinderClient(cinder.cinder_client.Client):
@@ -155,3 +167,9 @@ class CinderTestCase(test.TestCase):
def test_get_non_existing_volume(self):
self.assertRaises(exception.VolumeNotFound, self.api.get, self.context,
'nonexisting')
+
+ def test_volume_with_image_metadata(self):
+ volume = self.api.get(self.context, '5678')
+ self.assert_called('GET', '/volumes/5678')
+ self.assertTrue('volume_image_metadata' in volume)
+ self.assertEqual(volume['volume_image_metadata'], _image_metadata)
diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py
index 29bce8bf5..ea6e9aea5 100644
--- a/nova/tests/test_db_api.py
+++ b/nova/tests/test_db_api.py
@@ -284,6 +284,27 @@ class DbApiTestCase(test.TestCase):
self.assertRaises(exception.DuplicateVlan,
db.network_create_safe, ctxt, values2)
+ def test_instance_test_and_set(self):
+ ctxt = context.get_admin_context()
+ states = [
+ (None, [None, 'some'], 'building'),
+ (None, [None], 'building'),
+ ('building', ['building'], 'ready'),
+ ('building', [None, 'building'], 'ready')]
+ for st in states:
+ inst = db.instance_create(ctxt, {'vm_state': st[0]})
+ uuid = inst['uuid']
+ db.instance_test_and_set(ctxt, uuid, 'vm_state', st[1], st[2])
+ inst = db.instance_get_by_uuid(ctxt, uuid)
+ self.assertEqual(inst["vm_state"], st[2])
+
+ def test_instance_test_and_set_exception(self):
+ ctxt = context.get_admin_context()
+ inst = db.instance_create(ctxt, {'vm_state': 'building'})
+ self.assertRaises(exception.InstanceInvalidState,
+ db.instance_test_and_set, ctxt,
+ inst['uuid'], 'vm_state', [None, 'disable'], 'run')
+
def test_instance_update_with_instance_uuid(self):
""" test instance_update() works when an instance UUID is passed """
ctxt = context.get_admin_context()
diff --git a/nova/tests/test_migrations.py b/nova/tests/test_migrations.py
index f0a047b1f..125b2fe36 100644
--- a/nova/tests/test_migrations.py
+++ b/nova/tests/test_migrations.py
@@ -61,7 +61,7 @@ def _is_mysql_avail(user="openstack_citest",
engine = sqlalchemy.create_engine(connect_uri)
connection = engine.connect()
except Exception:
- # intential catch all to handle exceptions even if we don't
+ # intentionally catch all to handle exceptions even if we don't
# have mysql code loaded at all.
return False
else:
@@ -217,7 +217,7 @@ class TestMigrations(test.TestCase):
if not _have_mysql():
self.skipTest("mysql not available")
# add this to the global lists to make reset work with it, it's removed
- # automaticaly in tearDown so no need to clean it up here.
+ # automatically in tearDown so no need to clean it up here.
connect_string = _mysql_get_connect_string()
engine = sqlalchemy.create_engine(connect_string)
self.engines["mysqlcitest"] = engine
diff --git a/nova/tests/virt/disk/test_api.py b/nova/tests/virt/disk/test_api.py
new file mode 100644
index 000000000..95368a2c2
--- /dev/null
+++ b/nova/tests/virt/disk/test_api.py
@@ -0,0 +1,63 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import os
+import tempfile
+
+import fixtures
+
+from nova.openstack.common import importutils
+from nova import test
+from nova.virt.disk import api
+
+
+class APITestCase(test.TestCase):
+
+ def test_can_resize_need_fs_type_specified(self):
+ # NOTE(mikal): Bug 1094373 saw a regression where we failed to
+ # treat a failure to mount as a failure to be able to resize the
+ # filesystem
+ def _fake_get_disk_size(path):
+ return 10
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.disk.api.get_disk_size', _fake_get_disk_size))
+
+ def fake_trycmd(*args, **kwargs):
+ return '', 'broken'
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', fake_trycmd))
+
+ def fake_returns_true(*args, **kwargs):
+ return True
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.disk.mount.nbd.NbdMount.get_dev',
+ fake_returns_true))
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.disk.mount.nbd.NbdMount.map_dev',
+ fake_returns_true))
+
+ # Force the use of localfs, which is what was used during the failure
+ # reported in the bug
+ def fake_import_fails(*args, **kwargs):
+ raise Exception('Failed')
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.openstack.common.importutils.import_module',
+ fake_import_fails))
+
+ imgfile = tempfile.NamedTemporaryFile()
+ self.addCleanup(imgfile.close)
+ self.assertFalse(api.can_resize_fs(imgfile, 100, use_cow=True))
diff --git a/nova/tests/virt/disk/test_nbd.py b/nova/tests/virt/disk/test_nbd.py
index 16003c9ac..59b0784d9 100644
--- a/nova/tests/virt/disk/test_nbd.py
+++ b/nova/tests/virt/disk/test_nbd.py
@@ -16,11 +16,12 @@
# under the License.
-import fixtures
import os
+import tempfile
-from nova import test
+import fixtures
+from nova import test
from nova.virt.disk.mount import nbd
ORIG_EXISTS = os.path.exists
@@ -270,3 +271,22 @@ class NbdTestCase(test.TestCase):
# No error logged, device consumed
self.assertFalse(n.get_dev())
+
+ def test_do_mount_need_to_specify_fs_type(self):
+ # NOTE(mikal): Bug 1094373 saw a regression where we failed to
+ # communicate a failed mount properly.
+ def fake_trycmd(*args, **kwargs):
+ return '', 'broken'
+ self.useFixture(fixtures.MonkeyPatch('nova.utils.trycmd', fake_trycmd))
+
+ imgfile = tempfile.NamedTemporaryFile()
+ self.addCleanup(imgfile.close)
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ mount = nbd.NbdMount(imgfile.name, tempdir)
+
+ def fake_returns_true(*args, **kwargs):
+ return True
+ mount.get_dev = fake_returns_true
+ mount.map_dev = fake_returns_true
+
+ self.assertFalse(mount.do_mount())
diff --git a/nova/virt/baremetal/base.py b/nova/virt/baremetal/base.py
index 4b0640885..97d4f8ca3 100644
--- a/nova/virt/baremetal/base.py
+++ b/nova/virt/baremetal/base.py
@@ -51,7 +51,7 @@ class NodeDriver(object):
class PowerManager(object):
- def __init__(self, node):
+ def __init__(self, **kwargs):
pass
def activate_node(self):
diff --git a/nova/virt/baremetal/db/api.py b/nova/virt/baremetal/db/api.py
index 0b8cf781c..3ff533c6c 100644
--- a/nova/virt/baremetal/db/api.py
+++ b/nova/virt/baremetal/db/api.py
@@ -37,8 +37,8 @@ these objects be simple dictionaries.
:baremetal_db_backend: string to lookup in the list of LazyPluggable backends.
`sqlalchemy` is the only supported backend right now.
-:baremetal_sql_connection: string specifying the sqlalchemy connection to use,
- like: `sqlite:///var/lib/nova/nova.sqlite`.
+:[BAREMETAL] sql_connection: string specifying the sqlalchemy connection to
+ use, like: `sqlite:///var/lib/nova/nova.sqlite`.
"""
@@ -46,6 +46,9 @@ from nova.openstack.common import cfg
from nova import utils
+# NOTE(deva): we can't move baremetal_db_backend into an OptGroup yet
+# because utils.LazyPluggable doesn't support reading from
+# option groups. See bug #1093043.
db_opts = [
cfg.StrOpt('baremetal_db_backend',
default='sqlalchemy',
diff --git a/nova/virt/baremetal/db/sqlalchemy/session.py b/nova/virt/baremetal/db/sqlalchemy/session.py
index 3e562e32a..14013669d 100644
--- a/nova/virt/baremetal/db/sqlalchemy/session.py
+++ b/nova/virt/baremetal/db/sqlalchemy/session.py
@@ -23,14 +23,19 @@ from nova.db.sqlalchemy import session as nova_session
from nova.openstack.common import cfg
opts = [
- cfg.StrOpt('baremetal_sql_connection',
+ cfg.StrOpt('sql_connection',
default='sqlite:///$state_path/baremetal_$sqlite_db',
help='The SQLAlchemy connection string used to connect to the '
'bare-metal database'),
]
+baremetal_group = cfg.OptGroup(name='baremetal',
+ title='Baremetal Options')
+
CONF = cfg.CONF
-CONF.register_opts(opts)
+CONF.register_group(baremetal_group)
+CONF.register_opts(opts, baremetal_group)
+
CONF.import_opt('sqlite_db', 'nova.db.sqlalchemy.session')
CONF.import_opt('state_path', 'nova.config')
@@ -55,5 +60,5 @@ def get_engine():
"""Return a SQLAlchemy engine."""
global _ENGINE
if _ENGINE is None:
- _ENGINE = nova_session.create_engine(CONF.baremetal_sql_connection)
+ _ENGINE = nova_session.create_engine(CONF.baremetal.sql_connection)
return _ENGINE
diff --git a/nova/virt/baremetal/driver.py b/nova/virt/baremetal/driver.py
index e840c4e75..217f36fed 100644
--- a/nova/virt/baremetal/driver.py
+++ b/nova/virt/baremetal/driver.py
@@ -28,22 +28,22 @@ from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.virt.baremetal import baremetal_states
-from nova.virt.baremetal import db as bmdb
+from nova.virt.baremetal import db
from nova.virt import driver
from nova.virt import firewall
from nova.virt.libvirt import imagecache
opts = [
- cfg.BoolOpt('baremetal_inject_password',
+ cfg.BoolOpt('inject_password',
default=True,
help='Whether baremetal compute injects password or not'),
- cfg.StrOpt('baremetal_injected_network_template',
+ cfg.StrOpt('injected_network_template',
default='$pybasedir/nova/virt/baremetal/interfaces.template',
help='Template file for injected network'),
- cfg.StrOpt('baremetal_vif_driver',
+ cfg.StrOpt('vif_driver',
default='nova.virt.baremetal.vif_driver.BareMetalVIFDriver',
help='Baremetal VIF driver.'),
- cfg.StrOpt('baremetal_volume_driver',
+ cfg.StrOpt('volume_driver',
default='nova.virt.baremetal.volume_driver.LibvirtVolumeDriver',
help='Baremetal volume driver.'),
cfg.ListOpt('instance_type_extra_specs',
@@ -52,13 +52,13 @@ opts = [
'instance_type_extra_specs for this compute '
'host to advertise. Valid entries are name=value, pairs '
'For example, "key1:val1, key2:val2"'),
- cfg.StrOpt('baremetal_driver',
+ cfg.StrOpt('driver',
default='nova.virt.baremetal.pxe.PXE',
help='Baremetal driver back-end (pxe or tilera)'),
cfg.StrOpt('power_manager',
default='nova.virt.baremetal.ipmi.Ipmi',
help='Baremetal power management method'),
- cfg.StrOpt('baremetal_tftp_root',
+ cfg.StrOpt('tftp_root',
default='/tftpboot',
help='Baremetal compute node\'s tftp root path'),
]
@@ -66,8 +66,12 @@ opts = [
LOG = logging.getLogger(__name__)
+baremetal_group = cfg.OptGroup(name='baremetal',
+ title='Baremetal Options')
+
CONF = cfg.CONF
-CONF.register_opts(opts)
+CONF.register_group(baremetal_group)
+CONF.register_opts(opts, baremetal_group)
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
firewall.__name__,
@@ -75,13 +79,13 @@ DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
def _get_baremetal_nodes(context):
- nodes = bmdb.bm_node_get_all(context, service_host=CONF.host)
+ nodes = db.bm_node_get_all(context, service_host=CONF.host)
return nodes
def _get_baremetal_node_by_instance_uuid(instance_uuid):
ctx = nova_context.get_admin_context()
- node = bmdb.bm_node_get_by_instance_uuid(ctx, instance_uuid)
+ node = db.bm_node_get_by_instance_uuid(ctx, instance_uuid)
if node['service_host'] != CONF.host:
LOG.error(_("Request for baremetal node %s "
"sent to wrong service host") % instance_uuid)
@@ -93,15 +97,15 @@ def _update_baremetal_state(context, node, instance, state):
instance_uuid = None
if instance:
instance_uuid = instance['uuid']
- bmdb.bm_node_update(context, node['id'],
+ db.bm_node_update(context, node['id'],
{'instance_uuid': instance_uuid,
'task_state': state,
})
-def get_power_manager(node, **kwargs):
- cls = importutils.import_class(CONF.power_manager)
- return cls(node, **kwargs)
+def get_power_manager(**kwargs):
+ cls = importutils.import_class(CONF.baremetal.power_manager)
+ return cls(**kwargs)
class BareMetalDriver(driver.ComputeDriver):
@@ -114,19 +118,19 @@ class BareMetalDriver(driver.ComputeDriver):
def __init__(self, virtapi, read_only=False):
super(BareMetalDriver, self).__init__(virtapi)
- self.baremetal_nodes = importutils.import_object(
- CONF.baremetal_driver)
- self._vif_driver = importutils.import_object(
- CONF.baremetal_vif_driver)
- self._firewall_driver = firewall.load_driver(
+ self.driver = importutils.import_object(
+ CONF.baremetal.driver)
+ self.vif_driver = importutils.import_object(
+ CONF.baremetal.vif_driver)
+ self.firewall_driver = firewall.load_driver(
default=DEFAULT_FIREWALL_DRIVER)
- self._volume_driver = importutils.import_object(
- CONF.baremetal_volume_driver, virtapi)
- self._image_cache_manager = imagecache.ImageCacheManager()
+ self.volume_driver = importutils.import_object(
+ CONF.baremetal.volume_driver, virtapi)
+ self.image_cache_manager = imagecache.ImageCacheManager()
extra_specs = {}
- extra_specs["baremetal_driver"] = CONF.baremetal_driver
- for pair in CONF.instance_type_extra_specs:
+ extra_specs["baremetal_driver"] = CONF.baremetal.driver
+ for pair in CONF.baremetal.instance_type_extra_specs:
keyval = pair.split(':', 1)
keyval[0] = keyval[0].strip()
keyval[1] = keyval[1].strip()
@@ -135,9 +139,9 @@ class BareMetalDriver(driver.ComputeDriver):
LOG.warning(
_('cpu_arch is not found in instance_type_extra_specs'))
extra_specs['cpu_arch'] = ''
- self._extra_specs = extra_specs
+ self.extra_specs = extra_specs
- self._supported_instances = [
+ self.supported_instances = [
(extra_specs['cpu_arch'], 'baremetal', 'baremetal'),
]
@@ -174,7 +178,7 @@ class BareMetalDriver(driver.ComputeDriver):
if not nodename:
raise exception.NovaException(_("Baremetal node id not supplied"
" to driver"))
- node = bmdb.bm_node_get(context, nodename)
+ node = db.bm_node_get(context, nodename)
if node['instance_uuid']:
raise exception.NovaException(_("Baremetal node %s already"
" in use") % nodename)
@@ -184,28 +188,28 @@ class BareMetalDriver(driver.ComputeDriver):
_update_baremetal_state(context, node, instance,
baremetal_states.BUILDING)
- var = self.baremetal_nodes.define_vars(instance, network_info,
+ var = self.driver.define_vars(instance, network_info,
block_device_info)
self._plug_vifs(instance, network_info, context=context)
- self._firewall_driver.setup_basic_filtering(instance, network_info)
- self._firewall_driver.prepare_instance_filter(instance,
+ self.firewall_driver.setup_basic_filtering(instance, network_info)
+ self.firewall_driver.prepare_instance_filter(instance,
network_info)
- self.baremetal_nodes.create_image(var, context, image_meta, node,
+ self.driver.create_image(var, context, image_meta, node,
instance,
injected_files=injected_files,
admin_password=admin_password)
- self.baremetal_nodes.activate_bootloader(var, context, node,
+ self.driver.activate_bootloader(var, context, node,
instance, image_meta)
- pm = get_power_manager(node)
+ pm = get_power_manager(node=node, instance=instance)
state = pm.activate_node()
_update_baremetal_state(context, node, instance, state)
- self.baremetal_nodes.activate_node(var, context, node, instance)
- self._firewall_driver.apply_instance_filter(instance, network_info)
+ self.driver.activate_node(var, context, node, instance)
+ self.firewall_driver.apply_instance_filter(instance, network_info)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
@@ -226,7 +230,7 @@ class BareMetalDriver(driver.ComputeDriver):
block_device_info=None):
node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
ctx = nova_context.get_admin_context()
- pm = get_power_manager(node)
+ pm = get_power_manager(node=node, instance=instance)
state = pm.reboot_node()
_update_baremetal_state(ctx, node, instance, state)
@@ -242,12 +246,12 @@ class BareMetalDriver(driver.ComputeDriver):
% instance['uuid'])
return
- var = self.baremetal_nodes.define_vars(instance, network_info,
+ var = self.driver.define_vars(instance, network_info,
block_device_info)
- self.baremetal_nodes.deactivate_node(var, ctx, node, instance)
+ self.driver.deactivate_node(var, ctx, node, instance)
- pm = get_power_manager(node)
+ pm = get_power_manager(node=node, instance=instance)
pm.stop_console()
@@ -263,12 +267,12 @@ class BareMetalDriver(driver.ComputeDriver):
mountpoint = vol['mount_device']
self.detach_volume(connection_info, instance['name'], mountpoint)
- self.baremetal_nodes.deactivate_bootloader(var, ctx, node, instance)
+ self.driver.deactivate_bootloader(var, ctx, node, instance)
- self.baremetal_nodes.destroy_images(var, ctx, node, instance)
+ self.driver.destroy_images(var, ctx, node, instance)
# stop firewall
- self._firewall_driver.unfilter_instance(instance,
+ self.firewall_driver.unfilter_instance(instance,
network_info=network_info)
self._unplug_vifs(instance, network_info)
@@ -278,25 +282,25 @@ class BareMetalDriver(driver.ComputeDriver):
def power_off(self, instance):
"""Power off the specified instance."""
node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
- pm = get_power_manager(node)
+ pm = get_power_manager(node=node, instance=instance)
pm.deactivate_node()
def power_on(self, instance):
"""Power on the specified instance"""
node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
- pm = get_power_manager(node)
+ pm = get_power_manager(node=node, instance=instance)
pm.activate_node()
def get_volume_connector(self, instance):
- return self._volume_driver.get_volume_connector(instance)
+ return self.volume_driver.get_volume_connector(instance)
def attach_volume(self, connection_info, instance_name, mountpoint):
- return self._volume_driver.attach_volume(connection_info,
+ return self.volume_driver.attach_volume(connection_info,
instance_name, mountpoint)
@exception.wrap_exception()
def detach_volume(self, connection_info, instance_name, mountpoint):
- return self._volume_driver.detach_volume(connection_info,
+ return self.volume_driver.detach_volume(connection_info,
instance_name, mountpoint)
def get_info(self, instance):
@@ -304,7 +308,7 @@ class BareMetalDriver(driver.ComputeDriver):
# so we convert from InstanceNotFound
inst_uuid = instance.get('uuid')
node = _get_baremetal_node_by_instance_uuid(inst_uuid)
- pm = get_power_manager(node)
+ pm = get_power_manager(node=node, instance=instance)
ps = power_state.SHUTDOWN
if pm.is_power_on():
ps = power_state.RUNNING
@@ -315,15 +319,15 @@ class BareMetalDriver(driver.ComputeDriver):
'cpu_time': 0}
def refresh_security_group_rules(self, security_group_id):
- self._firewall_driver.refresh_security_group_rules(security_group_id)
+ self.firewall_driver.refresh_security_group_rules(security_group_id)
return True
def refresh_security_group_members(self, security_group_id):
- self._firewall_driver.refresh_security_group_members(security_group_id)
+ self.firewall_driver.refresh_security_group_members(security_group_id)
return True
def refresh_provider_fw_rules(self):
- self._firewall_driver.refresh_provider_fw_rules()
+ self.firewall_driver.refresh_provider_fw_rules()
def _node_resource(self, node):
vcpus_used = 0
@@ -352,27 +356,27 @@ class BareMetalDriver(driver.ComputeDriver):
return dic
def refresh_instance_security_rules(self, instance):
- self._firewall_driver.refresh_instance_security_rules(instance)
+ self.firewall_driver.refresh_instance_security_rules(instance)
def get_available_resource(self, nodename):
context = nova_context.get_admin_context()
- node = bmdb.bm_node_get(context, nodename)
+ node = db.bm_node_get(context, nodename)
dic = self._node_resource(node)
return dic
def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
- self._firewall_driver.setup_basic_filtering(instance_ref, network_info)
- self._firewall_driver.prepare_instance_filter(instance_ref,
+ self.firewall_driver.setup_basic_filtering(instance_ref, network_info)
+ self.firewall_driver.prepare_instance_filter(instance_ref,
network_info)
def unfilter_instance(self, instance_ref, network_info):
- self._firewall_driver.unfilter_instance(instance_ref,
+ self.firewall_driver.unfilter_instance(instance_ref,
network_info=network_info)
def get_host_stats(self, refresh=False):
caps = []
context = nova_context.get_admin_context()
- nodes = bmdb.bm_node_get_all(context,
+ nodes = db.bm_node_get_all(context,
service_host=CONF.host)
for node in nodes:
res = self._node_resource(node)
@@ -389,8 +393,8 @@ class BareMetalDriver(driver.ComputeDriver):
data['hypervisor_type'] = res['hypervisor_type']
data['hypervisor_version'] = res['hypervisor_version']
data['hypervisor_hostname'] = nodename
- data['supported_instances'] = self._supported_instances
- data.update(self._extra_specs)
+ data['supported_instances'] = self.supported_instances
+ data.update(self.extra_specs)
data['host'] = CONF.host
data['node'] = nodename
# TODO(NTTdocomo): put node's extra specs here
@@ -406,24 +410,24 @@ class BareMetalDriver(driver.ComputeDriver):
context = nova_context.get_admin_context()
node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
if node:
- pifs = bmdb.bm_interface_get_all_by_bm_node_id(context, node['id'])
+ pifs = db.bm_interface_get_all_by_bm_node_id(context, node['id'])
for pif in pifs:
if pif['vif_uuid']:
- bmdb.bm_interface_set_vif_uuid(context, pif['id'], None)
+ db.bm_interface_set_vif_uuid(context, pif['id'], None)
for (network, mapping) in network_info:
- self._vif_driver.plug(instance, (network, mapping))
+ self.vif_driver.plug(instance, (network, mapping))
def _unplug_vifs(self, instance, network_info):
for (network, mapping) in network_info:
- self._vif_driver.unplug(instance, (network, mapping))
+ self.vif_driver.unplug(instance, (network, mapping))
def manage_image_cache(self, context, all_instances):
"""Manage the local cache of images."""
- self._image_cache_manager.verify_base_images(context, all_instances)
+ self.image_cache_manager.verify_base_images(context, all_instances)
def get_console_output(self, instance):
node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
- return self.baremetal_nodes.get_console_output(node, instance)
+ return self.driver.get_console_output(node, instance)
def get_available_nodes(self):
context = nova_context.get_admin_context()
diff --git a/nova/virt/baremetal/fake.py b/nova/virt/baremetal/fake.py
index 9df964c39..920eb0c10 100644
--- a/nova/virt/baremetal/fake.py
+++ b/nova/virt/baremetal/fake.py
@@ -18,13 +18,14 @@
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import base
+from nova.virt.firewall import NoopFirewallDriver
def get_baremetal_nodes():
- return Fake()
+ return FakeDriver()
-class Fake(base.NodeDriver):
+class FakeDriver(base.NodeDriver):
def define_vars(self, instance, network_info, block_device_info):
return {}
@@ -73,3 +74,35 @@ class FakePowerManager(base.PowerManager):
def stop_console(self):
pass
+
+
+class FakeFirewallDriver(NoopFirewallDriver):
+
+ def __init__(self):
+ super(FakeFirewallDriver, self).__init__()
+
+
+class FakeVifDriver(object):
+
+ def __init__(self):
+ super(FakeVifDriver, self).__init__()
+
+ def plug(self, instance, vif):
+ pass
+
+ def unplug(self, instance, vif):
+ pass
+
+
+class FakeVolumeDriver(object):
+
+ def __init__(self, virtapi):
+ super(FakeVolumeDriver, self).__init__()
+ self.virtapi = virtapi
+ self._initiator = "fake_initiator"
+
+ def attach_volume(self, connection_info, instance_name, mountpoint):
+ pass
+
+ def detach_volume(self, connection_info, instance_name, mountpoint):
+ pass
diff --git a/nova/virt/baremetal/ipmi.py b/nova/virt/baremetal/ipmi.py
new file mode 100644
index 000000000..cc1704c7c
--- /dev/null
+++ b/nova/virt/baremetal/ipmi.py
@@ -0,0 +1,257 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# coding=utf-8
+
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Baremetal IPMI power manager.
+"""
+
+import os
+import stat
+import tempfile
+import time
+
+from nova.exception import InvalidParameterValue
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova import utils
+from nova.virt.baremetal import baremetal_states
+from nova.virt.baremetal import base
+from nova.virt.baremetal import utils as bm_utils
+
+opts = [
+ cfg.StrOpt('terminal',
+ default='shellinaboxd',
+ help='path to baremetal terminal program'),
+ cfg.StrOpt('terminal_cert_dir',
+ default=None,
+ help='path to baremetal terminal SSL cert(PEM)'),
+ cfg.StrOpt('terminal_pid_dir',
+ default='$state_path/baremetal/console',
+ help='path to directory stores pidfiles of baremetal_terminal'),
+ cfg.IntOpt('ipmi_power_retry',
+ default=5,
+ help='maximal number of retries for IPMI operations'),
+ ]
+
+baremetal_group = cfg.OptGroup(name='baremetal',
+ title='Baremetal Options')
+
+CONF = cfg.CONF
+CONF.register_group(baremetal_group)
+CONF.register_opts(opts, baremetal_group)
+
+LOG = logging.getLogger(__name__)
+
+
+def _make_password_file(password):
+ fd, path = tempfile.mkstemp()
+ os.fchmod(fd, stat.S_IRUSR | stat.S_IWUSR)
+ with os.fdopen(fd, "w") as f:
+ f.write(password)
+ return path
+
+
+def _get_console_pid_path(node_id):
+ name = "%s.pid" % node_id
+ path = os.path.join(CONF.baremetal.terminal_pid_dir, name)
+ return path
+
+
+def _get_console_pid(node_id):
+ pid_path = _get_console_pid_path(node_id)
+ if os.path.exists(pid_path):
+ with open(pid_path, 'r') as f:
+ pid_str = f.read()
+ try:
+ return int(pid_str)
+ except ValueError:
+ LOG.warn(_("pid file %s does not contain any pid"), pid_path)
+ return None
+
+
+class IPMI(base.PowerManager):
+ """IPMI Power Driver for Baremetal Nova Compute
+
+ This PowerManager class provides mechanism for controlling the power state
+ of physical hardware via IPMI calls. It also provides serial console access
+ where available.
+
+ """
+
+ def __init__(self, node, **kwargs):
+ self.state = None
+ self.retries = None
+ self.node_id = node['id']
+ self.address = node['pm_address']
+ self.user = node['pm_user']
+ self.password = node['pm_password']
+ self.port = node['terminal_port']
+
+ if self.node_id == None:
+ raise InvalidParameterValue(_("Node id not supplied to IPMI"))
+ if self.address == None:
+ raise InvalidParameterValue(_("Address not supplied to IPMI"))
+ if self.user == None:
+ raise InvalidParameterValue(_("User not supplied to IPMI"))
+ if self.password == None:
+ raise InvalidParameterValue(_("Password not supplied to IPMI"))
+
+ def _exec_ipmitool(self, command):
+ args = ['ipmitool',
+ '-I',
+ 'lanplus',
+ '-H',
+ self.address,
+ '-U',
+ self.user,
+ '-f']
+ pwfile = _make_password_file(self.password)
+ try:
+ args.append(pwfile)
+ args.extend(command.split(" "))
+ out, err = utils.execute(*args, attempts=3)
+ LOG.debug(_("ipmitool stdout: '%(out)s', stderr: '%(err)%s'"),
+ locals())
+ return out, err
+ finally:
+ bm_utils.unlink_without_raise(pwfile)
+
+ def _is_power(self, state):
+ out_err = self._exec_ipmitool("power status")
+ return out_err[0] == ("Chassis Power is %s\n" % state)
+
+ def _power_on(self):
+ """Turn the power to this node ON"""
+
+ def _wait_for_power_on():
+ """Called at an interval until the node's power is on"""
+
+ if self._is_power("on"):
+ self.state = baremetal_states.ACTIVE
+ raise utils.LoopingCallDone()
+ if self.retries > CONF.baremetal.ipmi_power_retry:
+ self.state = baremetal_states.ERROR
+ raise utils.LoopingCallDone()
+ try:
+ self.retries += 1
+ self._exec_ipmitool("power on")
+ except Exception:
+ LOG.exception(_("IPMI power on failed"))
+
+ self.retries = 0
+ timer = utils.LoopingCall(_wait_for_power_on)
+ timer.start(interval=0.5).wait()
+
+ def _power_off(self):
+ """Turn the power to this node OFF"""
+
+ def _wait_for_power_off():
+ """Called at an interval until the node's power is off"""
+
+ if self._is_power("off"):
+ self.state = baremetal_states.DELETED
+ raise utils.LoopingCallDone()
+ if self.retries > CONF.baremetal.ipmi_power_retry:
+ self.state = baremetal_states.ERROR
+ raise utils.LoopingCallDone()
+ try:
+ self.retries += 1
+ self._exec_ipmitool("power off")
+ except Exception:
+ LOG.exception(_("IPMI power off failed"))
+
+ self.retries = 0
+ timer = utils.LoopingCall(_wait_for_power_off)
+ timer.start(interval=0.5).wait()
+
+ def _set_pxe_for_next_boot(self):
+ try:
+ self._exec_ipmitool("chassis bootdev pxe")
+ except Exception:
+ LOG.exception(_("IPMI set next bootdev failed"))
+
+ def activate_node(self):
+ """Turns the power to node ON"""
+ if self._is_power("on") and self.state == baremetal_states.ACTIVE:
+ LOG.warning(_("Activate node called, but node %s "
+ "is already active") % self.address)
+ self._set_pxe_for_next_boot()
+ self._power_on()
+ return self.state
+
+ def reboot_node(self):
+ """Cycles the power to a node"""
+ self._power_off()
+ self._set_pxe_for_next_boot()
+ self._power_on()
+ return self.state
+
+ def deactivate_node(self):
+ """Turns the power to node OFF, regardless of current state"""
+ self._power_off()
+ return self.state
+
+ def is_power_on(self):
+ return self._is_power("on")
+
+ def start_console(self):
+ if not self.port:
+ return
+ args = []
+ args.append(CONF.baremetal.terminal)
+ if CONF.baremetal.terminal_cert_dir:
+ args.append("-c")
+ args.append(CONF.baremetal.terminal_cert_dir)
+ else:
+ args.append("-t")
+ args.append("-p")
+ args.append(str(self.port))
+ args.append("--background=%s" % _get_console_pid_path(self.node_id))
+ args.append("-s")
+
+ try:
+ pwfile = _make_password_file(self.password)
+ ipmi_args = "/:%(uid)s:%(gid)s:HOME:ipmitool -H %(address)s" \
+ " -I lanplus -U %(user)s -f %(pwfile)s sol activate" \
+ % {'uid': os.getuid(),
+ 'gid': os.getgid(),
+ 'address': self.address,
+ 'user': self.user,
+ 'pwfile': pwfile,
+ }
+
+ args.append(ipmi_args)
+ # Run shellinaboxd without pipes. Otherwise utils.execute() waits
+ # infinitely since shellinaboxd does not close passed fds.
+ x = ["'" + arg.replace("'", "'\\''") + "'" for arg in args]
+ x.append('</dev/null')
+ x.append('>/dev/null')
+ x.append('2>&1')
+ utils.execute(' '.join(x), shell=True)
+ finally:
+ bm_utils.unlink_without_raise(pwfile)
+
+ def stop_console(self):
+ console_pid = _get_console_pid(self.node_id)
+ if console_pid:
+ # Allow exitcode 99 (RC_UNAUTHORIZED)
+ utils.execute('kill', '-TERM', str(console_pid),
+ run_as_root=True,
+ check_exit_code=[0, 99])
+ bm_utils.unlink_without_raise(_get_console_pid_path(self.node_id))
diff --git a/nova/virt/baremetal/volume_driver.py b/nova/virt/baremetal/volume_driver.py
index bf8e47a63..6708b33ab 100644
--- a/nova/virt/baremetal/volume_driver.py
+++ b/nova/virt/baremetal/volume_driver.py
@@ -29,18 +29,22 @@ from nova.virt import driver
from nova.virt.libvirt import utils as libvirt_utils
opts = [
- cfg.BoolOpt('baremetal_use_unsafe_iscsi',
+ cfg.BoolOpt('use_unsafe_iscsi',
default=False,
help='Do not set this out of dev/test environments. '
'If a node does not have an fixed PXE IP address, '
'volumes are exported with globally opened ACL'),
- cfg.StrOpt('baremetal_iscsi_iqn_prefix',
+ cfg.StrOpt('iscsi_iqn_prefix',
default='iqn.2010-10.org.openstack.baremetal',
help='iSCSI IQN prefix used in baremetal volume connections.'),
]
+baremetal_group = cfg.OptGroup(name='baremetal',
+ title='Baremetal Options')
+
CONF = cfg.CONF
-CONF.register_opts(opts)
+CONF.register_group(baremetal_group)
+CONF.register_opts(opts, baremetal_group)
CONF.import_opt('libvirt_volume_drivers', 'nova.virt.libvirt.driver')
@@ -172,7 +176,7 @@ def _find_tid(iqn):
def _get_iqn(instance_name, mountpoint):
mp = mountpoint.replace('/', '-').strip('-')
- iqn = '%s:%s-%s' % (CONF.baremetal_iscsi_iqn_prefix,
+ iqn = '%s:%s-%s' % (CONF.baremetal.iscsi_iqn_prefix,
instance_name,
mp)
return iqn
@@ -230,7 +234,7 @@ class LibvirtVolumeDriver(VolumeDriver):
ctx = nova_context.get_admin_context()
pxe_ip = bmdb.bm_pxe_ip_get_by_bm_node_id(ctx, node['id'])
if not pxe_ip:
- if not CONF.baremetal_use_unsafe_iscsi:
+ if not CONF.baremetal.use_unsafe_iscsi:
raise exception.NovaException(_(
'No fixed PXE IP is associated to %s') % instance_name)
@@ -250,7 +254,7 @@ class LibvirtVolumeDriver(VolumeDriver):
# instance's initiator ip, it allows any initiators
# to connect to the volume. This means other bare-metal
# instances that are not attached the volume can connect
- # to the volume. Do not set CONF.baremetal_use_unsafe_iscsi
+ # to the volume. Do not set CONF.baremetal.use_unsafe_iscsi
# out of dev/test environments.
# TODO(NTTdocomo): support CHAP
_allow_iscsi_tgtadm(tid, 'ALL')
diff --git a/nova/virt/disk/api.py b/nova/virt/disk/api.py
index 758299f16..f663515cd 100644
--- a/nova/virt/disk/api.py
+++ b/nova/virt/disk/api.py
@@ -127,20 +127,32 @@ def can_resize_fs(image, size, use_cow=False):
# Check that we're increasing the size
virt_size = get_disk_size(image)
if virt_size >= size:
+ LOG.debug(_('Cannot resize filesystem %s to a smaller size.'),
+ image)
return False
# Check the image is unpartitioned
if use_cow:
# Try to mount an unpartitioned qcow2 image
+ LOG.debug(_('Checking if we can resize the COW image %s.'), image)
try:
inject_data(image, use_cow=True)
- except exception.NovaException:
+ except exception.NovaException, e:
+ LOG.debug(_('File injection failed for image %(image)s with '
+ 'error %(error)s. Cannot resize.'),
+ {'image': image,
+ 'error': e})
return False
else:
# For raw, we can directly inspect the file system
+ LOG.debug(_('Checking if we can resize the non-COW image %s.'), image)
try:
utils.execute('e2label', image)
- except exception.ProcessExecutionError:
+ except exception.ProcessExecutionError, e:
+ LOG.debug(_('Unable to determine label for image %(image)s with '
+ 'error %(errror)s. Cannot resize.'),
+ {'image': image,
+ 'error': e})
return False
return True
@@ -252,8 +264,7 @@ class _DiskImage(object):
# Public module functions
-def inject_data(image,
- key=None, net=None, metadata=None, admin_password=None,
+def inject_data(image, key=None, net=None, metadata=None, admin_password=None,
files=None, partition=None, use_cow=False):
"""Injects a ssh key and optionally net data into a disk image.
diff --git a/nova/virt/disk/vfs/localfs.py b/nova/virt/disk/vfs/localfs.py
index 3686994fa..9efa6798b 100644
--- a/nova/virt/disk/vfs/localfs.py
+++ b/nova/virt/disk/vfs/localfs.py
@@ -74,8 +74,7 @@ class VFSLocalFS(vfs.VFS):
self.imgdir,
self.partition)
if not mount.do_mount():
- raise Exception(_("Failed to mount image: %s") %
- mount.error)
+ raise exception.NovaException(mount.error)
self.mount = mount
except Exception, e:
LOG.debug(_("Failed to mount image %(ex)s)") %
diff --git a/nova/volume/cinder.py b/nova/volume/cinder.py
index 1b8305505..04c151d1e 100644
--- a/nova/volume/cinder.py
+++ b/nova/volume/cinder.py
@@ -20,6 +20,7 @@
Handles all requests relating to volumes + cinder.
"""
+from copy import deepcopy
import sys
from cinderclient import exceptions as cinder_exception
@@ -117,6 +118,9 @@ def _untranslate_volume_summary_view(context, vol):
item['value'] = value
d['volume_metadata'].append(item)
+ if hasattr(vol, 'volume_image_metadata'):
+ d['volume_image_metadata'] = deepcopy(vol.volume_image_metadata)
+
return d