summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--HACKING.rst1
-rwxr-xr-xbin/nova-rootwrap14
-rw-r--r--doc/source/devref/aggregates.rst2
-rw-r--r--etc/nova/nova.conf.sample10
-rw-r--r--etc/nova/rootwrap.d/baremetal-compute-pxe.filters11
-rw-r--r--nova/api/ec2/__init__.py8
-rw-r--r--nova/api/ec2/cloud.py14
-rw-r--r--nova/api/ec2/ec2utils.py2
-rw-r--r--nova/api/openstack/compute/contrib/admin_actions.py2
-rw-r--r--nova/compute/api.py48
-rw-r--r--nova/compute/manager.py8
-rw-r--r--nova/compute/utils.py4
-rw-r--r--nova/conductor/api.py6
-rw-r--r--nova/conductor/manager.py7
-rw-r--r--nova/conductor/rpcapi.py6
-rw-r--r--nova/db/api.py13
-rw-r--r--nova/db/sqlalchemy/api.py88
-rw-r--r--nova/network/manager.py5
-rw-r--r--nova/network/quantumv2/api.py55
-rw-r--r--nova/openstack/common/eventlet_backdoor.py2
-rw-r--r--nova/openstack/common/rootwrap/__init__.py (renamed from nova/rootwrap/__init__.py)0
-rw-r--r--nova/openstack/common/rootwrap/filters.py (renamed from nova/rootwrap/filters.py)22
-rw-r--r--nova/openstack/common/rootwrap/wrapper.py (renamed from nova/rootwrap/wrapper.py)6
-rw-r--r--nova/openstack/common/rpc/impl_kombu.py2
-rw-r--r--nova/service.py12
-rw-r--r--nova/servicegroup/api.py12
-rw-r--r--nova/servicegroup/drivers/db.py29
-rw-r--r--nova/tests/api/ec2/test_cloud.py4
-rw-r--r--nova/tests/baremetal/test_pxe.py35
-rw-r--r--nova/tests/compute/test_compute.py61
-rw-r--r--nova/tests/compute/test_compute_utils.py3
-rw-r--r--nova/tests/conductor/test_conductor.py8
-rw-r--r--nova/tests/integrated/test_api_samples.py4
-rw-r--r--nova/tests/network/test_quantumv2.py48
-rw-r--r--nova/tests/test_api.py5
-rw-r--r--nova/tests/test_cinder.py14
-rw-r--r--nova/tests/test_nova_rootwrap.py198
-rw-r--r--nova/virt/baremetal/net-dhcp.ubuntu.template3
-rw-r--r--nova/virt/baremetal/net-static.ubuntu.template3
-rw-r--r--nova/virt/baremetal/pxe.py30
-rw-r--r--nova/virt/baremetal/volume_driver.py2
-rw-r--r--nova/virt/hyperv/volumeops.py2
-rw-r--r--nova/virt/images.py2
-rw-r--r--nova/virt/libvirt/driver.py2
-rw-r--r--nova/virt/xenapi/agent.py9
-rw-r--r--nova/virt/xenapi/pool_states.py10
-rw-r--r--nova/volume/cinder.py4
-rw-r--r--openstack-common.conf2
-rwxr-xr-xrun_tests.sh2
-rwxr-xr-xtools/hacking.py88
-rw-r--r--tools/test-requires3
-rw-r--r--tox.ini4
52 files changed, 493 insertions, 442 deletions
diff --git a/HACKING.rst b/HACKING.rst
index be894f072..35493e55b 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -9,6 +9,7 @@ Nova Style Commandments
General
-------
- Put two newlines between top-level code (funcs, classes, etc)
+- Use only UNIX style newlines ("\n"), not Windows style ("\r\n")
- Put one newline between methods in classes and anywhere else
- Long lines should be wrapped in parentheses
in preference to using a backslash for line continuation.
diff --git a/bin/nova-rootwrap b/bin/nova-rootwrap
index c8e880d79..72a8c6309 100755
--- a/bin/nova-rootwrap
+++ b/bin/nova-rootwrap
@@ -16,20 +16,18 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Root wrapper for Nova
+"""Root wrapper for OpenStack services
- Filters which commands nova is allowed to run as another user.
+ Filters which commands a service is allowed to run as another user.
- To use this, you should set the following in nova.conf:
+ To use this with nova, you should set the following in nova.conf:
rootwrap_config=/etc/nova/rootwrap.conf
You also need to let the nova user run nova-rootwrap as root in sudoers:
nova ALL = (root) NOPASSWD: /usr/bin/nova-rootwrap /etc/nova/rootwrap.conf *
- To make allowed commands node-specific, your packaging should only
- install {compute,network,volume}.filters respectively on compute, network
- and volume nodes (i.e. nova-api nodes should not have any of those files
- installed).
+ Service packaging should deploy .filters files only on nodes where they are
+ needed, to avoid allowing more than is necessary.
"""
import ConfigParser
@@ -75,7 +73,7 @@ if __name__ == '__main__':
if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
sys.path.insert(0, possible_topdir)
- from nova.rootwrap import wrapper
+ from nova.openstack.common.rootwrap import wrapper
# Load configuration
try:
diff --git a/doc/source/devref/aggregates.rst b/doc/source/devref/aggregates.rst
index 979179768..56d398717 100644
--- a/doc/source/devref/aggregates.rst
+++ b/doc/source/devref/aggregates.rst
@@ -23,7 +23,7 @@ Host aggregates can be regarded as a mechanism to further partition an availabil
Xen Pool Host Aggregates
===============
-Originally all aggregates were Xen resource pools, now a aggregate can be set up as a resource pool by giving the aggregate the correct key-value pair.
+Originally all aggregates were Xen resource pools, now an aggregate can be set up as a resource pool by giving the aggregate the correct key-value pair.
You can use aggregates for XenServer resource pools when you have multiple compute nodes installed (only XenServer/XCP via xenapi driver is currently supported), and you want to leverage the capabilities of the underlying hypervisor resource pools. For example, you want to enable VM live migration (i.e. VM migration within the pool) or enable host maintenance with zero-downtime for guest instances. Please, note that VM migration across pools (i.e. storage migration) is not yet supported in XenServer/XCP, but will be added when available. Bear in mind that the two migration techniques are not mutually exclusive and can be used in combination for a higher level of flexibility in your cloud management.
diff --git a/etc/nova/nova.conf.sample b/etc/nova/nova.conf.sample
index 96118eb76..36a7b0d9c 100644
--- a/etc/nova/nova.conf.sample
+++ b/etc/nova/nova.conf.sample
@@ -1756,7 +1756,7 @@
# value)
#hyperv_attaching_volume_retry_count=10
-# The seconds to wait between an volume attachment attempt
+# The seconds to wait between a volume attachment attempt
# (integer value)
#hyperv_wait_between_attach_retry=5
@@ -2282,6 +2282,10 @@
# value)
#cinder_http_retries=3
+# Allow to perform insecure SSL (https) requests to cinder
+# (boolean value)
+#cinder_api_insecure=false
+
[conductor]
@@ -2476,7 +2480,7 @@
#
# Do not set this out of dev/test environments. If a node does
-# not have an fixed PXE IP address, volumes are exported with
+# not have a fixed PXE IP address, volumes are exported with
# globally opened ACL (boolean value)
#use_unsafe_iscsi=false
@@ -2546,4 +2550,4 @@
#keymap=en-us
-# Total option count: 519
+# Total option count: 520
diff --git a/etc/nova/rootwrap.d/baremetal-compute-pxe.filters b/etc/nova/rootwrap.d/baremetal-compute-pxe.filters
deleted file mode 100644
index 35fa61723..000000000
--- a/etc/nova/rootwrap.d/baremetal-compute-pxe.filters
+++ /dev/null
@@ -1,11 +0,0 @@
-# nova-rootwrap command filters for compute nodes
-# This file should be owned by (and only-writeable by) the root user
-
-[Filters]
-
-# nova/virt/baremetal/pxe.py: 'dnsmasq', ...
-dnsmasq: CommandFilter, /usr/sbin/dnsmasq, root
-
-# nova/virt/baremetal/pxe.py: 'kill', '-TERM', str(dnsmasq_pid)
-kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -15, -TERM
-
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index 7cd7e1c7d..85b87e3e5 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -511,7 +511,13 @@ class Executor(wsgi.Application):
except exception.KeyPairExists as ex:
LOG.debug(_('KeyPairExists raised: %s'), unicode(ex),
context=context)
- return ec2_error(req, request_id, type(ex).__name__, unicode(ex))
+ code = 'InvalidKeyPair.Duplicate'
+ return ec2_error(req, request_id, code, unicode(ex))
+ except exception.InvalidKeypair as ex:
+ LOG.debug(_('InvalidKeypair raised: %s'), unicode(ex),
+ context)
+ code = 'InvalidKeyPair.Format'
+ return ec2_error(req, request_id, code, unicode(ex))
except exception.InvalidParameterValue as ex:
LOG.debug(_('InvalidParameterValue raised: %s'), unicode(ex),
context=context)
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 414b2e969..31f486b81 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -434,7 +434,8 @@ class CloudController(object):
#If looking for non existent key pair
if key_name is not None and not key_pairs:
msg = _('Could not find key pair(s): %s') % ','.join(key_name)
- raise exception.EC2APIError(msg)
+ raise exception.KeypairNotFound(msg,
+ code="InvalidKeyPair.Duplicate")
result = []
for key_pair in key_pairs:
@@ -457,13 +458,7 @@ class CloudController(object):
key_name)
except exception.KeypairLimitExceeded:
msg = _("Quota exceeded, too many key pairs.")
- raise exception.EC2APIError(msg)
- except exception.InvalidKeypair:
- msg = _("Keypair data is invalid")
- raise exception.EC2APIError(msg)
- except exception.KeyPairExists:
- msg = _("Key pair '%s' already exists.") % key_name
- raise exception.KeyPairExists(msg)
+ raise exception.EC2APIError(msg, code='ResourceLimitExceeded')
return {'keyName': key_name,
'keyFingerprint': keypair['fingerprint'],
'keyMaterial': keypair['private_key']}
@@ -486,9 +481,6 @@ class CloudController(object):
except exception.InvalidKeypair:
msg = _("Keypair data is invalid")
raise exception.EC2APIError(msg)
- except exception.KeyPairExists:
- msg = _("Key pair '%s' already exists.") % key_name
- raise exception.EC2APIError(msg)
return {'keyName': key_name,
'keyFingerprint': keypair['fingerprint']}
diff --git a/nova/api/ec2/ec2utils.py b/nova/api/ec2/ec2utils.py
index cfe0d7879..1ce880de4 100644
--- a/nova/api/ec2/ec2utils.py
+++ b/nova/api/ec2/ec2utils.py
@@ -178,7 +178,7 @@ def ec2_vol_id_to_uuid(ec2_id):
def is_ec2_timestamp_expired(request, expires=None):
- """Checks the timestamp or expiry time included in a EC2 request
+ """Checks the timestamp or expiry time included in an EC2 request
and returns true if the request is expired
"""
query_time = None
diff --git a/nova/api/openstack/compute/contrib/admin_actions.py b/nova/api/openstack/compute/contrib/admin_actions.py
index fa7836b37..1c053ea59 100644
--- a/nova/api/openstack/compute/contrib/admin_actions.py
+++ b/nova/api/openstack/compute/contrib/admin_actions.py
@@ -130,7 +130,7 @@ class AdminActionsController(wsgi.Controller):
@wsgi.action('resetNetwork')
def _reset_network(self, req, id, body):
- """Permit admins to reset networking on an server."""
+ """Permit admins to reset networking on a server."""
context = req.environ['nova.context']
authorize(context, 'resetNetwork')
try:
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 22ee82bbf..4b15a3e27 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -946,11 +946,10 @@ class API(base.Base):
if (old['vm_state'] != vm_states.SOFT_DELETED and
old['task_state'] not in (task_states.DELETING,
task_states.SOFT_DELETING)):
- reservations = QUOTAS.reserve(context,
- project_id=project_id,
- instances=-1,
- cores=-instance['vcpus'],
- ram=-instance['memory_mb'])
+ reservations = self._create_reservations(context,
+ old,
+ updated,
+ project_id)
if not host:
# Just update database, nothing else we can do
@@ -1026,6 +1025,45 @@ class API(base.Base):
reservations,
project_id=project_id)
+ def _create_reservations(self, context, old_instance, new_instance,
+ project_id):
+ instance_vcpus = old_instance['vcpus']
+ instance_memory_mb = old_instance['memory_mb']
+ # NOTE(wangpan): if the instance is resizing, and the resources
+ # are updated to new instance type, we should use
+ # the old instance type to create reservation.
+ # see https://bugs.launchpad.net/nova/+bug/1099729 for more details
+ if old_instance['task_state'] in (task_states.RESIZE_MIGRATED,
+ task_states.RESIZE_FINISH):
+ get_migration = self.db.migration_get_by_instance_and_status
+ try:
+ migration_ref = get_migration(context.elevated(),
+ old_instance['uuid'], 'post-migrating')
+ except exception.MigrationNotFoundByStatus:
+ migration_ref = None
+ if (migration_ref and
+ new_instance['instance_type_id'] ==
+ migration_ref['new_instance_type_id']):
+ old_inst_type_id = migration_ref['old_instance_type_id']
+ get_inst_type_by_id = instance_types.get_instance_type
+ try:
+ old_inst_type = get_inst_type_by_id(old_inst_type_id)
+ except exception.InstanceTypeNotFound:
+ LOG.warning(_("instance type %(old_inst_type_id)d "
+ "not found") % locals())
+ pass
+ else:
+ instance_vcpus = old_inst_type['vcpus']
+ instance_memory_mb = old_inst_type['memory_mb']
+ LOG.debug(_("going to delete a resizing instance"))
+
+ reservations = QUOTAS.reserve(context,
+ project_id=project_id,
+ instances=-1,
+ cores=-instance_vcpus,
+ ram=-instance_memory_mb)
+ return reservations
+
def _local_delete(self, context, instance, bdms):
LOG.warning(_("instance's host %s is down, deleting from "
"database") % instance['host'], instance=instance)
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 384866cbe..03c54a363 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -463,6 +463,11 @@ class ComputeManager(manager.SchedulerDependentManager):
except NotImplementedError:
LOG.warning(_('Hypervisor driver does not support '
'resume guests'), instance=instance)
+ except Exception:
+ # NOTE(vish): The instance failed to resume, so we set the
+ # instance to error and attempt to continue.
+ LOG.warning(_('Failed to resume instance'), instance=instance)
+ self._set_instance_error_state(context, instance['uuid'])
elif drv_state == power_state.RUNNING:
# VMwareAPI drivers will raise an exception
@@ -2459,8 +2464,11 @@ class ComputeManager(manager.SchedulerDependentManager):
@lockutils.synchronized(instance['uuid'], 'nova-')
def do_reserve():
+ bdms = self.conductor_api.block_device_mapping_get_all_by_instance(
+ context, instance)
result = compute_utils.get_device_name_for_instance(context,
instance,
+ bdms,
device)
# NOTE(vish): create bdm here to avoid race condition
values = {'instance_uuid': instance['uuid'],
diff --git a/nova/compute/utils.py b/nova/compute/utils.py
index f07346c6b..2b1286e16 100644
--- a/nova/compute/utils.py
+++ b/nova/compute/utils.py
@@ -71,7 +71,7 @@ def add_instance_fault_from_exc(context, instance, fault, exc_info=None):
db.instance_fault_create(context, values)
-def get_device_name_for_instance(context, instance, device):
+def get_device_name_for_instance(context, instance, bdms, device):
"""Validates (or generates) a device name for instance.
If device is not set, it will generate a unique device appropriate
@@ -88,8 +88,6 @@ def get_device_name_for_instance(context, instance, device):
req_prefix, req_letters = block_device.match_device(device)
except (TypeError, AttributeError, ValueError):
raise exception.InvalidDevicePath(path=device)
- bdms = db.block_device_mapping_get_all_by_instance(context,
- instance['uuid'])
mappings = block_device.instance_block_mapping(instance, bdms)
try:
prefix = block_device.match_device(mappings['root'])[0]
diff --git a/nova/conductor/api.py b/nova/conductor/api.py
index 31ee19601..138e72f70 100644
--- a/nova/conductor/api.py
+++ b/nova/conductor/api.py
@@ -285,6 +285,9 @@ class LocalAPI(object):
return self._manager.compute_node_update(context, node, values,
prune_stats)
+ def service_update(self, context, service, values):
+ return self._manager.service_update(context, service, values)
+
class API(object):
"""Conductor API that does updates via RPC to the ConductorManager."""
@@ -548,3 +551,6 @@ class API(object):
def compute_node_update(self, context, node, values, prune_stats=False):
return self.conductor_rpcapi.compute_node_update(context, node,
values, prune_stats)
+
+ def service_update(self, context, service, values):
+ return self.conductor_rpcapi.service_update(context, service, values)
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index 9b18d1e00..0ff2e1400 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -43,7 +43,7 @@ datetime_fields = ['launched_at', 'terminated_at']
class ConductorManager(manager.SchedulerDependentManager):
"""Mission: TBD."""
- RPC_API_VERSION = '1.33'
+ RPC_API_VERSION = '1.34'
def __init__(self, *args, **kwargs):
super(ConductorManager, self).__init__(service_name='conductor',
@@ -310,3 +310,8 @@ class ConductorManager(manager.SchedulerDependentManager):
result = self.db.compute_node_update(context, node['id'], values,
prune_stats)
return jsonutils.to_primitive(result)
+
+ @rpc_common.client_exceptions(exception.ServiceNotFound)
+ def service_update(self, context, service, values):
+ svc = self.db.service_update(context, service['id'], values)
+ return jsonutils.to_primitive(svc)
diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py
index 95e332840..6dc8aef04 100644
--- a/nova/conductor/rpcapi.py
+++ b/nova/conductor/rpcapi.py
@@ -66,6 +66,7 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
1.31 - Added migration_get_in_progress_by_host_and_node
1.32 - Added optional node to instance_get_all_by_host
1.33 - Added compute_node_create and compute_node_update
+ 1.34 - Added service_update
"""
BASE_RPC_API_VERSION = '1.0'
@@ -316,3 +317,8 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
msg = self.make_msg('compute_node_update', node=node_p, values=values,
prune_stats=prune_stats)
return self.call(context, msg, version='1.33')
+
+ def service_update(self, context, service, values):
+ service_p = jsonutils.to_primitive(service)
+ msg = self.make_msg('service_update', service=service_p, values=values)
+ return self.call(context, msg, version='1.34')
diff --git a/nova/db/api.py b/nova/db/api.py
index 13873936b..d8a16c52d 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -759,12 +759,13 @@ def instance_info_cache_update(context, instance_uuid, values,
:param values: = dict containing column values to update
"""
rv = IMPL.instance_info_cache_update(context, instance_uuid, values)
- try:
- cells_rpcapi.CellsAPI().instance_info_cache_update_at_top(context,
- rv)
- except Exception:
- LOG.exception(_("Failed to notify cells of instance info cache "
- "update"))
+ if update_cells:
+ try:
+ cells_rpcapi.CellsAPI().instance_info_cache_update_at_top(
+ context, rv)
+ except Exception:
+ LOG.exception(_("Failed to notify cells of instance info "
+ "cache update"))
return rv
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 4b350e516..8f5487c32 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -448,6 +448,7 @@ def service_update(context, service_id, values):
service_ref = service_get(context, service_id, session=session)
service_ref.update(values)
service_ref.save(session=session)
+ return service_ref
###################
@@ -4738,49 +4739,44 @@ def _ec2_instance_get_query(context, session=None):
@require_admin_context
-def task_log_get(context, task_name, period_beginning,
- period_ending, host, state=None, session=None):
+def _task_log_get_query(context, task_name, period_beginning,
+ period_ending, host=None, state=None, session=None):
query = model_query(context, models.TaskLog, session=session).\
filter_by(task_name=task_name).\
filter_by(period_beginning=period_beginning).\
- filter_by(period_ending=period_ending).\
- filter_by(host=host)
+ filter_by(period_ending=period_ending)
+ if host is not None:
+ query = query.filter_by(host=host)
if state is not None:
query = query.filter_by(state=state)
+ return query
+
- return query.first()
+@require_admin_context
+def task_log_get(context, task_name, period_beginning, period_ending, host,
+ state=None):
+ return _task_log_get_query(task_name, period_beginning, period_ending,
+ host, state).first()
@require_admin_context
-def task_log_get_all(context, task_name, period_beginning,
- period_ending, host=None, state=None, session=None):
- query = model_query(context, models.TaskLog, session=session).\
- filter_by(task_name=task_name).\
- filter_by(period_beginning=period_beginning).\
- filter_by(period_ending=period_ending)
- if host is not None:
- query = query.filter_by(host=host)
- if state is not None:
- query = query.filter_by(state=state)
- return query.all()
+def task_log_get_all(context, task_name, period_beginning, period_ending,
+ host=None, state=None):
+ return _task_log_get_query(task_name, period_beginning, period_ending,
+ host, state).all()
@require_admin_context
-def task_log_begin_task(context, task_name,
- period_beginning,
- period_ending,
- host,
- task_items=None,
- message=None,
- session=None):
- session = session or get_session()
+def task_log_begin_task(context, task_name, period_beginning, period_ending,
+ host, task_items=None, message=None):
+ # NOTE(boris-42): This method has a race condition and will be rewritten
+ # after bp/db-unique-keys implementation.
+ session = get_session()
with session.begin():
- task = task_log_get(context, task_name,
- period_beginning,
- period_ending,
- host,
- session=session)
- if task:
+ task_ref = _task_log_get_query(context, task_name, period_beginning,
+ period_ending, host, session=session).\
+ first()
+ if task_ref:
#It's already run(ning)!
raise exception.TaskAlreadyRunning(task_name=task_name, host=host)
task = models.TaskLog()
@@ -4794,30 +4790,20 @@ def task_log_begin_task(context, task_name,
if task_items:
task.task_items = task_items
task.save(session=session)
- return task
@require_admin_context
-def task_log_end_task(context, task_name,
- period_beginning,
- period_ending,
- host,
- errors,
- message=None,
- session=None):
- session = session or get_session()
+def task_log_end_task(context, task_name, period_beginning, period_ending,
+ host, errors, message=None):
+ values = dict(state="DONE", errors=errors)
+ if message:
+ values["message"] = message
+
+ session = get_session()
with session.begin():
- task = task_log_get(context, task_name,
- period_beginning,
- period_ending,
- host,
- session=session)
- if not task:
+ rows = _task_log_get_query(context, task_name, period_beginning,
+ period_ending, host, session=session).\
+ update(values)
+ if rows == 0:
#It's not running!
raise exception.TaskNotRunning(task_name=task_name, host=host)
- task.state = "DONE"
- if message:
- task.message = message
- task.errors = errors
- task.save(session=session)
- return task
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 91b1c92d7..e4a97f162 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -147,9 +147,6 @@ network_opts = [
cfg.BoolOpt('auto_assign_floating_ip',
default=False,
help='Autoassigning floating ip to VM'),
- cfg.StrOpt('network_host',
- default=socket.getfqdn(),
- help='Network host to use for ip allocation in flat modes'),
cfg.BoolOpt('fake_network',
default=False,
help='If passed, use fake network devices and addresses'),
@@ -482,7 +479,7 @@ class FloatingIP(object):
@wrap_check_policy
def deallocate_floating_ip(self, context, address,
affect_auto_assigned=False):
- """Returns an floating ip to the pool."""
+ """Returns a floating ip to the pool."""
floating_ip = self.db.floating_ip_get_by_address(context, address)
# handle auto_assigned
diff --git a/nova/network/quantumv2/api.py b/nova/network/quantumv2/api.py
index 78f5ad036..29e5e2f06 100644
--- a/nova/network/quantumv2/api.py
+++ b/nova/network/quantumv2/api.py
@@ -238,11 +238,62 @@ class API(base.Base):
def add_fixed_ip_to_instance(self, context, instance, network_id):
"""Add a fixed ip to the instance from specified network."""
- raise NotImplementedError()
+ search_opts = {'network_id': network_id}
+ data = quantumv2.get_client(context).list_subnets(**search_opts)
+ ipam_subnets = data.get('subnets', [])
+ if not ipam_subnets:
+ raise exception.NetworkNotFoundForInstance(
+ instance_id=instance['uuid'])
+
+ zone = 'compute:%s' % instance['availability_zone']
+ search_opts = {'device_id': instance['uuid'],
+ 'device_owner': zone,
+ 'network_id': network_id}
+ data = quantumv2.get_client(context).list_ports(**search_opts)
+ ports = data['ports']
+ for p in ports:
+ fixed_ips = p['fixed_ips']
+ for subnet in ipam_subnets:
+ fixed_ip = {'subnet_id': subnet['id']}
+ fixed_ips.append(fixed_ip)
+ port_req_body = {'port': {'fixed_ips': fixed_ips}}
+ try:
+ quantumv2.get_client(context).update_port(p['id'],
+ port_req_body)
+ except Exception as ex:
+ msg = _("Unable to update port %(portid)s with"
+ " failure: %(exception)s")
+ LOG.debug(msg, {'portid': p['id'], 'exception': ex})
+ return
+ raise exception.NetworkNotFoundForInstance(
+ instance_id=instance['uuid'])
def remove_fixed_ip_from_instance(self, context, instance, address):
"""Remove a fixed ip from the instance."""
- raise NotImplementedError()
+ zone = 'compute:%s' % instance['availability_zone']
+ search_opts = {'device_id': instance['uuid'],
+ 'device_owner': zone,
+ 'fixed_ips': 'ip_address=%s' % address}
+ data = quantumv2.get_client(context).list_ports(**search_opts)
+ ports = data['ports']
+ for p in ports:
+ fixed_ips = p['fixed_ips']
+ new_fixed_ips = []
+ for fixed_ip in fixed_ips:
+ if fixed_ip['ip_address'] != address:
+ new_fixed_ips.append(fixed_ip)
+ port_req_body = {'port': {'fixed_ips': new_fixed_ips}}
+ try:
+ quantumv2.get_client(context).update_port(p['id'],
+ port_req_body)
+ except Exception as ex:
+ msg = _("Unable to update port %(portid)s with"
+ " failure: %(exception)s")
+ LOG.debug(msg, {'portid': p['id'], 'exception': ex})
+ return
+
+ raise exception.FixedIpNotFoundForSpecificInstance(
+ instance_uuid=instance['uuid'], ip=address)
def validate_networks(self, context, requested_networks):
"""Validate that the tenant can use the requested networks."""
diff --git a/nova/openstack/common/eventlet_backdoor.py b/nova/openstack/common/eventlet_backdoor.py
index f18e84f6d..118385427 100644
--- a/nova/openstack/common/eventlet_backdoor.py
+++ b/nova/openstack/common/eventlet_backdoor.py
@@ -46,7 +46,7 @@ def _find_objects(t):
def _print_greenthreads():
- for i, gt in enumerate(find_objects(greenlet.greenlet)):
+ for i, gt in enumerate(_find_objects(greenlet.greenlet)):
print i, gt
traceback.print_stack(gt.gr_frame)
print
diff --git a/nova/rootwrap/__init__.py b/nova/openstack/common/rootwrap/__init__.py
index 671d3c173..671d3c173 100644
--- a/nova/rootwrap/__init__.py
+++ b/nova/openstack/common/rootwrap/__init__.py
diff --git a/nova/rootwrap/filters.py b/nova/openstack/common/rootwrap/filters.py
index 8958f1ba1..905bbabea 100644
--- a/nova/rootwrap/filters.py
+++ b/nova/openstack/common/rootwrap/filters.py
@@ -20,7 +20,7 @@ import re
class CommandFilter(object):
- """Command filter only checking that the 1st argument matches exec_path."""
+ """Command filter only checking that the 1st argument matches exec_path"""
def __init__(self, exec_path, run_as, *args):
self.name = ''
@@ -30,7 +30,7 @@ class CommandFilter(object):
self.real_exec = None
def get_exec(self, exec_dirs=[]):
- """Returns existing executable, or empty string if none found."""
+ """Returns existing executable, or empty string if none found"""
if self.real_exec is not None:
return self.real_exec
self.real_exec = ""
@@ -46,7 +46,7 @@ class CommandFilter(object):
return self.real_exec
def match(self, userargs):
- """Only check that the first argument (command) matches exec_path."""
+ """Only check that the first argument (command) matches exec_path"""
if (os.path.basename(self.exec_path) == userargs[0]):
return True
return False
@@ -60,12 +60,12 @@ class CommandFilter(object):
return [to_exec] + userargs[1:]
def get_environment(self, userargs):
- """Returns specific environment to set, None if none."""
+ """Returns specific environment to set, None if none"""
return None
class RegExpFilter(CommandFilter):
- """Command filter doing regexp matching for every argument."""
+ """Command filter doing regexp matching for every argument"""
def match(self, userargs):
# Early skip if command or number of args don't match
@@ -89,15 +89,15 @@ class RegExpFilter(CommandFilter):
class DnsmasqFilter(CommandFilter):
- """Specific filter for the dnsmasq call (which includes env)."""
+ """Specific filter for the dnsmasq call (which includes env)"""
CONFIG_FILE_ARG = 'CONFIG_FILE'
def match(self, userargs):
if (userargs[0] == 'env' and
- userargs[1].startswith(self.CONFIG_FILE_ARG) and
- userargs[2].startswith('NETWORK_ID=') and
- userargs[3] == 'dnsmasq'):
+ userargs[1].startswith(self.CONFIG_FILE_ARG) and
+ userargs[2].startswith('NETWORK_ID=') and
+ userargs[3] == 'dnsmasq'):
return True
return False
@@ -114,7 +114,7 @@ class DnsmasqFilter(CommandFilter):
class DeprecatedDnsmasqFilter(DnsmasqFilter):
- """Variant of dnsmasq filter to support old-style FLAGFILE."""
+ """Variant of dnsmasq filter to support old-style FLAGFILE"""
CONFIG_FILE_ARG = 'FLAGFILE'
@@ -164,7 +164,7 @@ class KillFilter(CommandFilter):
class ReadFileFilter(CommandFilter):
- """Specific filter for the utils.read_file_as_root call."""
+ """Specific filter for the utils.read_file_as_root call"""
def __init__(self, file_path, *args):
self.file_path = file_path
diff --git a/nova/rootwrap/wrapper.py b/nova/openstack/common/rootwrap/wrapper.py
index 70bd63c47..4452177fe 100644
--- a/nova/rootwrap/wrapper.py
+++ b/nova/openstack/common/rootwrap/wrapper.py
@@ -22,7 +22,7 @@ import logging.handlers
import os
import string
-from nova.rootwrap import filters
+from nova.openstack.common.rootwrap import filters
class NoFilterMatched(Exception):
@@ -93,7 +93,7 @@ def setup_syslog(execname, facility, level):
def build_filter(class_name, *args):
- """Returns a filter object of class class_name."""
+ """Returns a filter object of class class_name"""
if not hasattr(filters, class_name):
logging.warning("Skipping unknown filter class (%s) specified "
"in filter definitions" % class_name)
@@ -103,7 +103,7 @@ def build_filter(class_name, *args):
def load_filters(filters_path):
- """Load filters from a list of directories."""
+ """Load filters from a list of directories"""
filterlist = []
for filterdir in filters_path:
if not os.path.isdir(filterdir):
diff --git a/nova/openstack/common/rpc/impl_kombu.py b/nova/openstack/common/rpc/impl_kombu.py
index bf38201f5..305dc7877 100644
--- a/nova/openstack/common/rpc/impl_kombu.py
+++ b/nova/openstack/common/rpc/impl_kombu.py
@@ -175,7 +175,7 @@ class ConsumerBase(object):
try:
self.queue.cancel(self.tag)
except KeyError, e:
- # NOTE(comstud): Kludge to get around a amqplib bug
+ # NOTE(comstud): Kludge to get around an amqplib bug
if str(e) != "u'%s'" % self.tag:
raise
self.queue = None
diff --git a/nova/service.py b/nova/service.py
index 0fde14baa..df8cf020f 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -411,7 +411,7 @@ class Service(object):
self.db_allowed = db_allowed
self.conductor_api = conductor.API(use_local=db_allowed)
self.conductor_api.wait_until_ready(context.get_admin_context())
- self.servicegroup_api = servicegroup.API()
+ self.servicegroup_api = servicegroup.API(db_allowed=db_allowed)
def start(self):
verstr = version.version_string_with_package()
@@ -421,12 +421,11 @@ class Service(object):
self.model_disconnected = False
ctxt = context.get_admin_context()
try:
- service_ref = self.conductor_api.service_get_by_args(ctxt,
- self.host,
- self.binary)
- self.service_id = service_ref['id']
+ self.service_ref = self.conductor_api.service_get_by_args(ctxt,
+ self.host, self.binary)
+ self.service_id = self.service_ref['id']
except exception.NotFound:
- self._create_service_ref(ctxt)
+ self.service_ref = self._create_service_ref(ctxt)
if self.backdoor_port is not None:
self.manager.backdoor_port = self.backdoor_port
@@ -479,6 +478,7 @@ class Service(object):
}
service = self.conductor_api.service_create(context, svc_values)
self.service_id = service['id']
+ return service
def __getattr__(self, key):
manager = self.__dict__.get('manager', None)
diff --git a/nova/servicegroup/api.py b/nova/servicegroup/api.py
index ebd0ee6ac..358b7dcbc 100644
--- a/nova/servicegroup/api.py
+++ b/nova/servicegroup/api.py
@@ -45,6 +45,15 @@ class API(object):
@lockutils.synchronized('nova.servicegroup.api.new', 'nova-')
def __new__(cls, *args, **kwargs):
+ '''Create an instance of the servicegroup API.
+
+ args and kwargs are passed down to the servicegroup driver when it gets
+ created. No args currently exist, though. Valid kwargs are:
+
+ db_allowed - Boolean. False if direct db access is not allowed and
+ alternative data access (conductor) should be used
+ instead.
+ '''
if not cls._driver:
LOG.debug(_('ServiceGroup driver defined as an instance of %s'),
@@ -55,7 +64,8 @@ class API(object):
except KeyError:
raise TypeError(_("unknown ServiceGroup driver name: %s")
% driver_name)
- cls._driver = importutils.import_object(driver_class)
+ cls._driver = importutils.import_object(driver_class,
+ *args, **kwargs)
utils.check_isinstance(cls._driver, ServiceGroupDriver)
# we don't have to check that cls._driver is not NONE,
# check_isinstance does it
diff --git a/nova/servicegroup/drivers/db.py b/nova/servicegroup/drivers/db.py
index 075db3ed8..686ee728b 100644
--- a/nova/servicegroup/drivers/db.py
+++ b/nova/servicegroup/drivers/db.py
@@ -14,8 +14,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from nova import conductor
from nova import context
-from nova import db
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
@@ -32,6 +32,10 @@ LOG = logging.getLogger(__name__)
class DbDriver(api.ServiceGroupDriver):
+ def __init__(self, *args, **kwargs):
+ self.db_allowed = kwargs.get('db_allowed', True)
+ self.conductor_api = conductor.API(use_local=self.db_allowed)
+
def join(self, member_id, group_id, service=None):
"""Join the given service with it's group."""
@@ -53,6 +57,11 @@ class DbDriver(api.ServiceGroupDriver):
Check whether a service is up based on last heartbeat.
"""
last_heartbeat = service_ref['updated_at'] or service_ref['created_at']
+ if isinstance(last_heartbeat, basestring):
+ # NOTE(russellb) If this service_ref came in over rpc via
+ # conductor, then the timestamp will be a string and needs to be
+ # converted back to a datetime.
+ last_heartbeat = timeutils.parse_strtime(last_heartbeat)
# Timestamps in DB are UTC.
elapsed = utils.total_seconds(timeutils.utcnow() - last_heartbeat)
LOG.debug('DB_Driver.is_up last_heartbeat = %(lhb)s elapsed = %(el)s',
@@ -66,7 +75,8 @@ class DbDriver(api.ServiceGroupDriver):
LOG.debug(_('DB_Driver: get_all members of the %s group') % group_id)
rs = []
ctxt = context.get_admin_context()
- for service in db.service_get_all_by_topic(ctxt, group_id):
+ services = self.conductor_api.service_get_all_by_topic(ctxt, group_id)
+ for service in services:
if self.is_up(service):
rs.append(service['host'])
return rs
@@ -76,18 +86,11 @@ class DbDriver(api.ServiceGroupDriver):
ctxt = context.get_admin_context()
state_catalog = {}
try:
- try:
- service_ref = db.service_get(ctxt, service.service_id)
- except exception.NotFound:
- LOG.debug(_('The service database object disappeared, '
- 'Recreating it.'))
- service._create_service_ref(ctxt)
- service_ref = db.service_get(ctxt, service.service_id)
-
- state_catalog['report_count'] = service_ref['report_count'] + 1
+ report_count = service.service_ref['report_count'] + 1
+ state_catalog['report_count'] = report_count
- db.service_update(ctxt,
- service.service_id, state_catalog)
+ service.service_ref = self.conductor_api.service_update(ctxt,
+ service.service_ref, state_catalog)
# TODO(termie): make this pattern be more elegant.
if getattr(service, 'model_disconnected', False):
diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py
index 562473121..a00dceff1 100644
--- a/nova/tests/api/ec2/test_cloud.py
+++ b/nova/tests/api/ec2/test_cloud.py
@@ -1440,7 +1440,7 @@ class CloudTestCase(test.TestCase):
self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys))
def test_describe_bad_key_pairs(self):
- self.assertRaises(exception.EC2APIError,
+ self.assertRaises(exception.KeypairNotFound,
self.cloud.describe_key_pairs, self.context,
key_name=['DoesNotExist'])
@@ -1490,7 +1490,7 @@ class CloudTestCase(test.TestCase):
self.assertEqual(result['keyName'], key_name)
for key_name in bad_names:
- self.assertRaises(exception.EC2APIError,
+ self.assertRaises(exception.InvalidKeypair,
self.cloud.create_key_pair,
self.context,
key_name)
diff --git a/nova/tests/baremetal/test_pxe.py b/nova/tests/baremetal/test_pxe.py
index 45c9ede43..73ef8caa3 100644
--- a/nova/tests/baremetal/test_pxe.py
+++ b/nova/tests/baremetal/test_pxe.py
@@ -147,12 +147,6 @@ class PXEClassMethodsTestCase(BareMetalPXETestCase):
config = pxe.build_network_config(net)
self.assertIn('eth0', config)
self.assertNotIn('eth1', config)
- self.assertIn('hwaddress ether fake', config)
- self.assertNotIn('hwaddress ether aa:bb:cc:dd', config)
-
- net[0][1]['mac'] = 'aa:bb:cc:dd'
- config = pxe.build_network_config(net)
- self.assertIn('hwaddress ether aa:bb:cc:dd', config)
net = utils.get_test_network_info(2)
config = pxe.build_network_config(net)
@@ -254,6 +248,13 @@ class PXEClassMethodsTestCase(BareMetalPXETestCase):
pxe.get_tftp_image_info,
self.instance)
+ # Test that other non-true values also raise an exception
+ CONF.baremetal.deploy_kernel = ""
+ CONF.baremetal.deploy_ramdisk = ""
+ self.assertRaises(exception.NovaException,
+ pxe.get_tftp_image_info,
+ self.instance)
+
# Even if the instance includes kernel_id and ramdisk_id,
# we still need deploy_kernel_id and deploy_ramdisk_id.
# If those aren't present in instance[], and not specified in
@@ -295,6 +296,17 @@ class PXEClassMethodsTestCase(BareMetalPXETestCase):
self.assertEqual(res['deploy_kernel'][0], 'eeee')
self.assertEqual(res['deploy_ramdisk'][0], 'ffff')
+ # However, if invalid values are passed on the image extra_specs,
+ # this should still raise an exception.
+ extra_specs = {
+ 'deploy_kernel_id': '',
+ 'deploy_ramdisk_id': '',
+ }
+ self.instance['extra_specs'] = extra_specs
+ self.assertRaises(exception.NovaException,
+ pxe.get_tftp_image_info,
+ self.instance)
+
class PXEPrivateMethodsTestCase(BareMetalPXETestCase):
@@ -306,15 +318,6 @@ class PXEPrivateMethodsTestCase(BareMetalPXETestCase):
macs = self.driver._collect_mac_addresses(self.context, self.node)
self.assertEqual(macs, address_list)
- def test_generate_udev_rules(self):
- self._create_node()
- address_list = [nic['address'] for nic in self.nic_info]
- address_list.append(self.node_info['prov_mac_address'])
-
- rules = self.driver._generate_udev_rules(self.context, self.node)
- for address in address_list:
- self.assertIn('ATTR{address}=="%s"' % address, rules)
-
def test_cache_tftp_images(self):
self.instance['kernel_id'] = 'aaaa'
self.instance['ramdisk_id'] = 'bbbb'
@@ -357,8 +360,6 @@ class PXEPrivateMethodsTestCase(BareMetalPXETestCase):
# nova.virt.disk.api._inject_*_into_fs
self._create_node()
files = []
- files.append(('/etc/udev/rules.d/70-persistent-net.rules',
- self.driver._generate_udev_rules(self.context, self.node)))
self.instance['hostname'] = 'fake hostname'
files.append(('/etc/hostname', 'fake hostname'))
self.instance['key_data'] = 'fake ssh key'
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index 691991f60..092fd940a 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -3306,6 +3306,35 @@ class ComputeTestCase(BaseTestCase):
self.mox.VerifyAll()
self.mox.UnsetStubs()
+ def test_init_instance_failed_resume_sets_error(self):
+ instance = {
+ 'uuid': 'fake-uuid',
+ 'info_cache': None,
+ 'power_state': power_state.RUNNING,
+ 'vm_state': vm_states.ACTIVE,
+ }
+ self.flags(resume_guests_state_on_host_boot=True)
+ self.mox.StubOutWithMock(self.compute, '_get_power_state')
+ self.mox.StubOutWithMock(self.compute.driver, 'plug_vifs')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'resume_state_on_host_boot')
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instance_volume_block_device_info')
+ self.mox.StubOutWithMock(self.compute,
+ '_set_instance_error_state')
+ self.compute._get_power_state(mox.IgnoreArg(),
+ instance).AndReturn(power_state.SHUTDOWN)
+ self.compute.driver.plug_vifs(instance, mox.IgnoreArg())
+ self.compute._get_instance_volume_block_device_info(mox.IgnoreArg(),
+ instance['uuid']).AndReturn('fake-bdm')
+ self.compute.driver.resume_state_on_host_boot(mox.IgnoreArg(),
+ instance, mox.IgnoreArg(),
+ 'fake-bdm').AndRaise(test.TestingException)
+ self.compute._set_instance_error_state(mox.IgnoreArg(),
+ instance['uuid'])
+ self.mox.ReplayAll()
+ self.compute._init_instance('fake-context', instance)
+
def test_get_instances_on_driver(self):
fake_context = context.get_admin_context()
@@ -3958,6 +3987,38 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
+ def test_delete_in_resizing(self):
+ def fake_quotas_reserve(context, expire=None, project_id=None,
+ **deltas):
+ old_type = instance_types.get_instance_type_by_name('m1.tiny')
+ # ensure using old instance type to create reservations
+ self.assertEqual(deltas['cores'], -old_type['vcpus'])
+ self.assertEqual(deltas['ram'], -old_type['memory_mb'])
+
+ self.stubs.Set(QUOTAS, 'reserve', fake_quotas_reserve)
+
+ instance, instance_uuid = self._run_instance(params={
+ 'host': CONF.host})
+
+ # create a fake migration record (manager does this)
+ new_inst_type = instance_types.get_instance_type_by_name('m1.small')
+ db.migration_create(self.context.elevated(),
+ {'instance_uuid': instance['uuid'],
+ 'old_instance_type_id': instance['instance_type_id'],
+ 'new_instance_type_id': new_inst_type['id'],
+ 'status': 'post-migrating'})
+
+ # update instance type to resized one
+ db.instance_update(self.context, instance['uuid'],
+ {'instance_type_id': new_inst_type['id'],
+ 'vcpus': new_inst_type['vcpus'],
+ 'memory_mb': new_inst_type['memory_mb'],
+ 'task_state': task_states.RESIZE_FINISH})
+
+ self.compute_api.delete(self.context, instance)
+
+ db.instance_destroy(self.context, instance['uuid'])
+
def test_delete_in_resized(self):
instance, instance_uuid = self._run_instance(params={
'host': CONF.host})
diff --git a/nova/tests/compute/test_compute_utils.py b/nova/tests/compute/test_compute_utils.py
index f29c68627..6e7227d4c 100644
--- a/nova/tests/compute/test_compute_utils.py
+++ b/nova/tests/compute/test_compute_utils.py
@@ -69,8 +69,11 @@ class ComputeValidateDeviceTestCase(test.TestCase):
lambda context, instance: self.data)
def _validate_device(self, device=None):
+ bdms = db.block_device_mapping_get_all_by_instance(
+ self.context, self.instance['uuid'])
return compute_utils.get_device_name_for_instance(self.context,
self.instance,
+ bdms,
device)
@staticmethod
diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py
index b29db92e7..d010b454f 100644
--- a/nova/tests/conductor/test_conductor.py
+++ b/nova/tests/conductor/test_conductor.py
@@ -747,6 +747,14 @@ class ConductorAPITestCase(_BaseTestCase, test.TestCase):
def test_service_destroy(self):
self._test_stubbed('service_destroy', '', returns=False)
+ def test_service_update(self):
+ ctxt = self.context
+ self.mox.StubOutWithMock(db, 'service_update')
+ db.service_update(ctxt, '', {}).AndReturn('fake-result')
+ self.mox.ReplayAll()
+ result = self.conductor.service_update(self.context, {'id': ''}, {})
+ self.assertEqual(result, 'fake-result')
+
def test_instance_get_all_by_host(self):
self._test_stubbed('instance_get_all_by_host',
self.context.elevated(), 'host')
diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py
index aa41a8259..949f14177 100644
--- a/nova/tests/integrated/test_api_samples.py
+++ b/nova/tests/integrated/test_api_samples.py
@@ -672,7 +672,7 @@ class ImagesSampleJsonTest(ApiSampleTestBase):
return self._verify_response('images-details-get-resp', subs, response)
def test_image_metadata_get(self):
- # Get api sample of a image metadata request.
+ # Get api sample of an image metadata request.
image_id = fake.get_valid_image_id()
response = self._do_get('images/%s/metadata' % image_id)
subs = self._get_regexes()
@@ -701,7 +701,7 @@ class ImagesSampleJsonTest(ApiSampleTestBase):
subs, response)
def test_image_meta_key_get(self):
- # Get api sample of a image metadata key request.
+ # Get api sample of an image metadata key request.
image_id = fake.get_valid_image_id()
key = "kernel_id"
response = self._do_get('images/%s/metadata/%s' % (image_id, key))
diff --git a/nova/tests/network/test_quantumv2.py b/nova/tests/network/test_quantumv2.py
index 95b3a936b..c9b2e43b3 100644
--- a/nova/tests/network/test_quantumv2.py
+++ b/nova/tests/network/test_quantumv2.py
@@ -1002,6 +1002,54 @@ class TestQuantumv2(test.TestCase):
self.mox.ReplayAll()
api.disassociate_floating_ip(self.context, self.instance, address)
+ def test_add_fixed_ip_to_instance(self):
+ api = quantumapi.API()
+ network_id = 'my_netid1'
+ search_opts = {'network_id': network_id}
+ self.moxed_client.list_subnets(
+ **search_opts).AndReturn({'subnets': self.subnet_data1})
+
+ zone = 'compute:%s' % self.instance['availability_zone']
+ search_opts = {'device_id': self.instance['uuid'],
+ 'device_owner': 'compute:nova',
+ 'network_id': network_id}
+ self.moxed_client.list_ports(
+ **search_opts).AndReturn({'ports': self.port_data1})
+ port_req_body = {
+ 'port': {
+ 'fixed_ips': [{'subnet_id': 'my_subid1'}],
+ },
+ }
+ port = self.port_data1[0]
+ port['fixed_ips'] = [{'subnet_id': 'my_subid1'}]
+ self.moxed_client.update_port('my_portid1',
+ MyComparator(port_req_body)).AndReturn({'port': port})
+
+ self.mox.ReplayAll()
+ api.add_fixed_ip_to_instance(self.context, self.instance, network_id)
+
+ def test_remove_fixed_ip_from_instance(self):
+ api = quantumapi.API()
+ address = '10.0.0.3'
+ zone = 'compute:%s' % self.instance['availability_zone']
+ search_opts = {'device_id': self.instance['uuid'],
+ 'device_owner': zone,
+ 'fixed_ips': 'ip_address=%s' % address}
+ self.moxed_client.list_ports(
+ **search_opts).AndReturn({'ports': self.port_data1})
+ port_req_body = {
+ 'port': {
+ 'fixed_ips': [],
+ },
+ }
+ port = self.port_data1[0]
+ port['fixed_ips'] = []
+ self.moxed_client.update_port('my_portid1',
+ MyComparator(port_req_body)).AndReturn({'port': port})
+
+ self.mox.ReplayAll()
+ api.remove_fixed_ip_from_instance(self.context, self.instance, address)
+
class TestQuantumv2ModuleMethods(test.TestCase):
def test_ensure_requested_network_ordering_no_preference(self):
diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py
index 829a98334..fb2e76e45 100644
--- a/nova/tests/test_api.py
+++ b/nova/tests/test_api.py
@@ -309,11 +309,10 @@ class ApiEc2TestCase(test.TestCase):
try:
self.ec2.create_key_pair('test')
except boto_exc.EC2ResponseError, e:
- if e.code == 'KeyPairExists':
+ if e.code == 'InvalidKeyPair.Duplicate':
pass
else:
- self.fail("Unexpected EC2ResponseError: %s "
- "(expected KeyPairExists)" % e.code)
+ self.assertEqual('InvalidKeyPair.Duplicate', e.code)
else:
self.fail('Exception not raised.')
diff --git a/nova/tests/test_cinder.py b/nova/tests/test_cinder.py
index 29e2e978b..79b5ae66a 100644
--- a/nova/tests/test_cinder.py
+++ b/nova/tests/test_cinder.py
@@ -98,13 +98,14 @@ class FakeHTTPClient(cinder.cinder_client.client.HTTPClient):
class FakeCinderClient(cinder.cinder_client.Client):
def __init__(self, username, password, project_id=None, auth_url=None,
- retries=None):
+ insecure=False, retries=None):
super(FakeCinderClient, self).__init__(username, password,
project_id=project_id,
auth_url=auth_url,
+ insecure=insecure,
retries=retries)
self.client = FakeHTTPClient(username, password, project_id, auth_url,
- retries=retries)
+ insecure=insecure, retries=retries)
# keep a ref to the clients callstack for factory's assert_called
self.callstack = self.client.callstack = []
@@ -177,6 +178,15 @@ class CinderTestCase(test.TestCase):
self.assertTrue('volume_image_metadata' in volume)
self.assertEqual(volume['volume_image_metadata'], _image_metadata)
+ def test_cinder_api_insecure(self):
+ # The True/False negation is awkward, but better for the client
+ # to pass us insecure=True and we check verify_cert == False
+ self.flags(cinder_api_insecure=True)
+ volume = self.api.get(self.context, '1234')
+ self.assert_called('GET', '/volumes/1234')
+ self.assertEquals(
+ self.fake_client_factory.client.client.verify_cert, False)
+
def test_cinder_http_retries(self):
retries = 42
self.flags(cinder_http_retries=retries)
diff --git a/nova/tests/test_nova_rootwrap.py b/nova/tests/test_nova_rootwrap.py
deleted file mode 100644
index 1029e0c2c..000000000
--- a/nova/tests/test_nova_rootwrap.py
+++ /dev/null
@@ -1,198 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import ConfigParser
-import logging
-import logging.handlers
-import os
-import subprocess
-
-from nova.rootwrap import filters
-from nova.rootwrap import wrapper
-from nova import test
-
-
-class RootwrapTestCase(test.TestCase):
-
- def setUp(self):
- super(RootwrapTestCase, self).setUp()
- self.filters = [
- filters.RegExpFilter("/bin/ls", "root", 'ls', '/[a-z]+'),
- filters.CommandFilter("/usr/bin/foo_bar_not_exist", "root"),
- filters.RegExpFilter("/bin/cat", "root", 'cat', '/[a-z]+'),
- filters.CommandFilter("/nonexistent/cat", "root"),
- filters.CommandFilter("/bin/cat", "root") # Keep this one last
- ]
-
- def test_RegExpFilter_match(self):
- usercmd = ["ls", "/root"]
- filtermatch = wrapper.match_filter(self.filters, usercmd)
- self.assertFalse(filtermatch is None)
- self.assertEqual(filtermatch.get_command(usercmd),
- ["/bin/ls", "/root"])
-
- def test_RegExpFilter_reject(self):
- usercmd = ["ls", "root"]
- self.assertRaises(wrapper.NoFilterMatched,
- wrapper.match_filter, self.filters, usercmd)
-
- def test_missing_command(self):
- valid_but_missing = ["foo_bar_not_exist"]
- invalid = ["foo_bar_not_exist_and_not_matched"]
- self.assertRaises(wrapper.FilterMatchNotExecutable,
- wrapper.match_filter, self.filters, valid_but_missing)
- self.assertRaises(wrapper.NoFilterMatched,
- wrapper.match_filter, self.filters, invalid)
-
- def _test_DnsmasqFilter(self, filter_class, config_file_arg):
- usercmd = ['env', config_file_arg + '=A', 'NETWORK_ID=foobar',
- 'dnsmasq', 'foo']
- f = filter_class("/usr/bin/dnsmasq", "root")
- self.assertTrue(f.match(usercmd))
- self.assertEqual(f.get_command(usercmd), ['/usr/bin/dnsmasq', 'foo'])
- env = f.get_environment(usercmd)
- self.assertEqual(env.get(config_file_arg), 'A')
- self.assertEqual(env.get('NETWORK_ID'), 'foobar')
-
- def test_DnsmasqFilter(self):
- self._test_DnsmasqFilter(filters.DnsmasqFilter, 'CONFIG_FILE')
-
- def test_DeprecatedDnsmasqFilter(self):
- self._test_DnsmasqFilter(filters.DeprecatedDnsmasqFilter, 'FLAGFILE')
-
- def test_KillFilter(self):
- if not os.path.exists("/proc/%d" % os.getpid()):
- self.skipTest("Test requires /proc filesystem (procfs)")
- p = subprocess.Popen(["cat"], stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- try:
- f = filters.KillFilter("root", "/bin/cat", "-9", "-HUP")
- f2 = filters.KillFilter("root", "/usr/bin/cat", "-9", "-HUP")
- usercmd = ['kill', '-ALRM', p.pid]
- # Incorrect signal should fail
- self.assertFalse(f.match(usercmd) or f2.match(usercmd))
- usercmd = ['kill', p.pid]
- # Providing no signal should fail
- self.assertFalse(f.match(usercmd) or f2.match(usercmd))
- # Providing matching signal should be allowed
- usercmd = ['kill', '-9', p.pid]
- self.assertTrue(f.match(usercmd) or f2.match(usercmd))
-
- f = filters.KillFilter("root", "/bin/cat")
- f2 = filters.KillFilter("root", "/usr/bin/cat")
- usercmd = ['kill', os.getpid()]
- # Our own PID does not match /bin/sleep, so it should fail
- self.assertFalse(f.match(usercmd) or f2.match(usercmd))
- usercmd = ['kill', 999999]
- # Nonexistent PID should fail
- self.assertFalse(f.match(usercmd) or f2.match(usercmd))
- usercmd = ['kill', p.pid]
- # Providing no signal should work
- self.assertTrue(f.match(usercmd) or f2.match(usercmd))
- finally:
- # Terminate the "cat" process and wait for it to finish
- p.terminate()
- p.wait()
-
- def test_KillFilter_no_raise(self):
- # Makes sure ValueError from bug 926412 is gone.
- f = filters.KillFilter("root", "")
- # Providing anything other than kill should be False
- usercmd = ['notkill', 999999]
- self.assertFalse(f.match(usercmd))
- # Providing something that is not a pid should be False
- usercmd = ['kill', 'notapid']
- self.assertFalse(f.match(usercmd))
-
- def test_KillFilter_deleted_exe(self):
- # Makes sure deleted exe's are killed correctly.
- # See bug #967931.
- def fake_readlink(blah):
- return '/bin/commandddddd (deleted)'
-
- f = filters.KillFilter("root", "/bin/commandddddd")
- usercmd = ['kill', 1234]
- # Providing no signal should work
- self.stubs.Set(os, 'readlink', fake_readlink)
- self.assertTrue(f.match(usercmd))
-
- def test_ReadFileFilter(self):
- goodfn = '/good/file.name'
- f = filters.ReadFileFilter(goodfn)
- usercmd = ['cat', '/bad/file']
- self.assertFalse(f.match(['cat', '/bad/file']))
- usercmd = ['cat', goodfn]
- self.assertEqual(f.get_command(usercmd), ['/bin/cat', goodfn])
- self.assertTrue(f.match(usercmd))
-
- def test_exec_dirs_search(self):
- # This test supposes you have /bin/cat or /usr/bin/cat locally
- f = filters.CommandFilter("cat", "root")
- usercmd = ['cat', '/f']
- self.assertTrue(f.match(usercmd))
- self.assertTrue(f.get_command(usercmd, exec_dirs=['/bin',
- '/usr/bin']) in (['/bin/cat', '/f'], ['/usr/bin/cat', '/f']))
-
- def test_skips(self):
- # Check that all filters are skipped and that the last matches
- usercmd = ["cat", "/"]
- filtermatch = wrapper.match_filter(self.filters, usercmd)
- self.assertTrue(filtermatch is self.filters[-1])
-
- def test_RootwrapConfig(self):
- raw = ConfigParser.RawConfigParser()
-
- # Empty config should raise ConfigParser.Error
- self.assertRaises(ConfigParser.Error, wrapper.RootwrapConfig, raw)
-
- # Check default values
- raw.set('DEFAULT', 'filters_path', '/a,/b')
- config = wrapper.RootwrapConfig(raw)
- self.assertEqual(config.filters_path, ['/a', '/b'])
- self.assertEqual(config.exec_dirs, os.environ["PATH"].split(':'))
- self.assertFalse(config.use_syslog)
- self.assertEqual(config.syslog_log_facility,
- logging.handlers.SysLogHandler.LOG_SYSLOG)
- self.assertEqual(config.syslog_log_level, logging.ERROR)
-
- # Check general values
- raw.set('DEFAULT', 'exec_dirs', '/a,/x')
- config = wrapper.RootwrapConfig(raw)
- self.assertEqual(config.exec_dirs, ['/a', '/x'])
-
- raw.set('DEFAULT', 'use_syslog', 'oui')
- self.assertRaises(ValueError, wrapper.RootwrapConfig, raw)
- raw.set('DEFAULT', 'use_syslog', 'true')
- config = wrapper.RootwrapConfig(raw)
- self.assertTrue(config.use_syslog)
-
- raw.set('DEFAULT', 'syslog_log_facility', 'moo')
- self.assertRaises(ValueError, wrapper.RootwrapConfig, raw)
- raw.set('DEFAULT', 'syslog_log_facility', 'local0')
- config = wrapper.RootwrapConfig(raw)
- self.assertEqual(config.syslog_log_facility,
- logging.handlers.SysLogHandler.LOG_LOCAL0)
- raw.set('DEFAULT', 'syslog_log_facility', 'LOG_AUTH')
- config = wrapper.RootwrapConfig(raw)
- self.assertEqual(config.syslog_log_facility,
- logging.handlers.SysLogHandler.LOG_AUTH)
-
- raw.set('DEFAULT', 'syslog_log_level', 'bar')
- self.assertRaises(ValueError, wrapper.RootwrapConfig, raw)
- raw.set('DEFAULT', 'syslog_log_level', 'INFO')
- config = wrapper.RootwrapConfig(raw)
- self.assertEqual(config.syslog_log_level, logging.INFO)
diff --git a/nova/virt/baremetal/net-dhcp.ubuntu.template b/nova/virt/baremetal/net-dhcp.ubuntu.template
index e8824a88d..34a9e8be7 100644
--- a/nova/virt/baremetal/net-dhcp.ubuntu.template
+++ b/nova/virt/baremetal/net-dhcp.ubuntu.template
@@ -10,9 +10,6 @@ iface lo inet loopback
#for $ifc in $interfaces
auto ${ifc.name}
iface ${ifc.name} inet dhcp
-#if $ifc.hwaddress
- hwaddress ether ${ifc.hwaddress}
-#end if
#if $use_ipv6
iface ${ifc.name} inet6 dhcp
diff --git a/nova/virt/baremetal/net-static.ubuntu.template b/nova/virt/baremetal/net-static.ubuntu.template
index f14f0ce8c..1fe5a1ab8 100644
--- a/nova/virt/baremetal/net-static.ubuntu.template
+++ b/nova/virt/baremetal/net-static.ubuntu.template
@@ -16,9 +16,6 @@ iface ${ifc.name} inet static
#if $ifc.dns
dns-nameservers ${ifc.dns}
#end if
-#if $ifc.hwaddress
- hwaddress ether ${ifc.hwaddress}
-#end if
#if $use_ipv6
iface ${ifc.name} inet6 static
diff --git a/nova/virt/baremetal/pxe.py b/nova/virt/baremetal/pxe.py
index b94ac9032..0daac1d46 100644
--- a/nova/virt/baremetal/pxe.py
+++ b/nova/virt/baremetal/pxe.py
@@ -121,7 +121,6 @@ def build_network_config(network_info):
gateway_v6 = mapping['gateway_v6']
interface = {
'name': 'eth%d' % id,
- 'hwaddress': mapping['mac'],
'address': mapping['ips'][0]['ip'],
'gateway': mapping['gateway'],
'netmask': mapping['ips'][0]['netmask'],
@@ -219,7 +218,7 @@ def get_tftp_image_info(instance):
missing_labels = []
for label in image_info.keys():
(uuid, path) = image_info[label]
- if uuid is None:
+ if not uuid:
missing_labels.append(label)
else:
image_info[label][1] = os.path.join(CONF.baremetal.tftp_root,
@@ -238,27 +237,12 @@ class PXE(base.NodeDriver):
super(PXE, self).__init__()
def _collect_mac_addresses(self, context, node):
- macs = []
- macs.append(db.bm_node_get(context, node['id'])['prov_mac_address'])
+ macs = set()
+ macs.add(db.bm_node_get(context, node['id'])['prov_mac_address'])
for nic in db.bm_interface_get_all_by_bm_node_id(context, node['id']):
if nic['address']:
- macs.append(nic['address'])
- macs.sort()
- return macs
-
- def _generate_udev_rules(self, context, node):
- # TODO(deva): fix assumption that device names begin with "eth"
- # and fix assumption of ordering
- macs = self._collect_mac_addresses(context, node)
- rules = ''
- for (i, mac) in enumerate(macs):
- rules += 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ' \
- 'ATTR{address}=="%(mac)s", ATTR{dev_id}=="0x0", ' \
- 'ATTR{type}=="1", KERNEL=="eth*", NAME="%(name)s"\n' \
- % {'mac': mac.lower(),
- 'name': 'eth%d' % i,
- }
- return rules
+ macs.add(nic['address'])
+ return sorted(macs)
def _cache_tftp_images(self, context, instance, image_info):
"""Fetch the necessary kernels and ramdisks for the instance."""
@@ -330,9 +314,6 @@ class PXE(base.NodeDriver):
injected_files = []
net_config = build_network_config(network_info)
- udev_rules = self._generate_udev_rules(context, node)
- injected_files.append(
- ('/etc/udev/rules.d/70-persistent-net.rules', udev_rules))
if instance['hostname']:
injected_files.append(('/etc/hostname', instance['hostname']))
@@ -385,7 +366,6 @@ class PXE(base.NodeDriver):
config
./pxelinux.cfg/
{mac} -> ../{uuid}/config
-
"""
image_info = get_tftp_image_info(instance)
(root_mb, swap_mb) = get_partition_sizes(instance)
diff --git a/nova/virt/baremetal/volume_driver.py b/nova/virt/baremetal/volume_driver.py
index 2e6f82b93..0a05dfedd 100644
--- a/nova/virt/baremetal/volume_driver.py
+++ b/nova/virt/baremetal/volume_driver.py
@@ -31,7 +31,7 @@ opts = [
cfg.BoolOpt('use_unsafe_iscsi',
default=False,
help='Do not set this out of dev/test environments. '
- 'If a node does not have an fixed PXE IP address, '
+ 'If a node does not have a fixed PXE IP address, '
'volumes are exported with globally opened ACL'),
cfg.StrOpt('iscsi_iqn_prefix',
default='iqn.2010-10.org.openstack.baremetal',
diff --git a/nova/virt/hyperv/volumeops.py b/nova/virt/hyperv/volumeops.py
index 200236233..192d6834c 100644
--- a/nova/virt/hyperv/volumeops.py
+++ b/nova/virt/hyperv/volumeops.py
@@ -37,7 +37,7 @@ hyper_volumeops_opts = [
help='The number of times we retry on attaching volume '),
cfg.IntOpt('hyperv_wait_between_attach_retry',
default=5,
- help='The seconds to wait between an volume attachment attempt'),
+ help='The seconds to wait between a volume attachment attempt'),
cfg.BoolOpt('force_volumeutils_v1',
default=False,
help='Force volumeutils v1'),
diff --git a/nova/virt/images.py b/nova/virt/images.py
index f80c19999..9788a2b42 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -175,7 +175,7 @@ class QemuImgInfo(object):
def qemu_img_info(path):
- """Return a object containing the parsed output from qemu-img info."""
+ """Return an object containing the parsed output from qemu-img info."""
if not os.path.exists(path):
return QemuImgInfo()
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 597aa39a0..115c6cd02 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -1995,7 +1995,7 @@ class LibvirtDriver(driver.ComputeDriver):
def get_interfaces(self, xml):
"""
- Note that this function takes an domain xml.
+ Note that this function takes a domain xml.
Returns a list of all network interfaces for this instance.
"""
diff --git a/nova/virt/xenapi/agent.py b/nova/virt/xenapi/agent.py
index 61cfa9631..ef08edbc1 100644
--- a/nova/virt/xenapi/agent.py
+++ b/nova/virt/xenapi/agent.py
@@ -21,6 +21,9 @@ import os
import time
import uuid
+from nova.api.metadata import password
+from nova import context
+from nova import crypto
from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
@@ -207,6 +210,12 @@ class XenAPIBasedAgent(object):
LOG.error(msg, instance=self.instance)
raise Exception(msg)
+ sshkey = self.instance.get('key_data')
+ if sshkey:
+ enc = crypto.ssh_encrypt_text(sshkey, new_pass)
+ password.set_password(context.get_admin_context(),
+ self.instance['uuid'], base64.b64encode(enc))
+
return resp['message']
def inject_file(self, path, contents):
diff --git a/nova/virt/xenapi/pool_states.py b/nova/virt/xenapi/pool_states.py
index 5bf326117..138f84831 100644
--- a/nova/virt/xenapi/pool_states.py
+++ b/nova/virt/xenapi/pool_states.py
@@ -19,10 +19,10 @@
A pool may be 'created', in which case the admin has triggered its
creation, but the underlying hypervisor pool has not actually being set up
-yet. An pool may be 'changing', meaning that the underlying hypervisor
-pool is being setup. An pool may be 'active', in which case the underlying
-hypervisor pool is up and running. An pool may be 'dismissed' when it has
-no hosts and it has been deleted. An pool may be in 'error' in all other
+yet. A pool may be 'changing', meaning that the underlying hypervisor
+pool is being setup. A pool may be 'active', in which case the underlying
+hypervisor pool is up and running. A pool may be 'dismissed' when it has
+no hosts and it has been deleted. A pool may be in 'error' in all other
cases.
A 'created' pool becomes 'changing' during the first request of
adding a host. During a 'changing' status no other requests will be accepted;
@@ -34,7 +34,7 @@ All other operations (e.g. add/remove hosts) that succeed will keep the
pool in the 'active' state. If a number of continuous requests fail,
an 'active' pool goes into an 'error' state. To recover from such a state,
admin intervention is required. Currently an error state is irreversible,
-that is, in order to recover from it an pool must be deleted.
+that is, in order to recover from it a pool must be deleted.
"""
CREATED = 'created'
diff --git a/nova/volume/cinder.py b/nova/volume/cinder.py
index fccdedac8..3e1ccc66b 100644
--- a/nova/volume/cinder.py
+++ b/nova/volume/cinder.py
@@ -48,6 +48,9 @@ cinder_opts = [
cfg.IntOpt('cinder_http_retries',
default=3,
help='Number of cinderclient retries on failed http calls'),
+ cfg.BoolOpt('cinder_api_insecure',
+ default=False,
+ help='Allow to perform insecure SSL requests to cinder'),
]
CONF = cfg.CONF
@@ -88,6 +91,7 @@ def cinderclient(context):
context.auth_token,
project_id=context.project_id,
auth_url=url,
+ insecure=CONF.cinder_api_insecure,
retries=CONF.cinder_http_retries)
# noauth extracts user_id:project_id from auth_token
c.client.auth_token = context.auth_token or '%s:%s' % (context.user_id,
diff --git a/openstack-common.conf b/openstack-common.conf
index ea33ab235..a0b14e651 100644
--- a/openstack-common.conf
+++ b/openstack-common.conf
@@ -1,7 +1,7 @@
[DEFAULT]
# The list of modules to copy from openstack-common
-modules=cfg,cliutils,context,excutils,eventlet_backdoor,fileutils,gettextutils,importutils,iniparser,jsonutils,local,lockutils,log,network_utils,notifier,plugin,policy,setup,timeutils,rpc,uuidutils
+modules=cfg,cliutils,context,excutils,eventlet_backdoor,fileutils,gettextutils,importutils,iniparser,jsonutils,local,lockutils,log,network_utils,notifier,plugin,policy,rootwrap,setup,timeutils,rpc,uuidutils
# The base module to hold the copy of openstack.common
base=nova
diff --git a/run_tests.sh b/run_tests.sh
index 3a579ca36..39176d78b 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -121,7 +121,7 @@ function run_pep8 {
srcfiles+=" setup.py"
# Until all these issues get fixed, ignore.
- ignore='--ignore=E12,E711,E721,E712'
+ ignore='--ignore=E12,E711,E721,E712,N403,N404'
# First run the hacking selftest, to make sure it's right
echo "Running hacking.py self test"
diff --git a/tools/hacking.py b/tools/hacking.py
index ed22956eb..56f6694bd 100755
--- a/tools/hacking.py
+++ b/tools/hacking.py
@@ -21,7 +21,6 @@
built on top of pep8.py
"""
-import fnmatch
import inspect
import logging
import os
@@ -46,16 +45,15 @@ logging.disable('LOG')
#N8xx git commit messages
IMPORT_EXCEPTIONS = ['sqlalchemy', 'migrate', 'nova.db.sqlalchemy.session']
-DOCSTRING_TRIPLE = ['"""', "'''"]
+START_DOCSTRING_TRIPLE = ['u"""', 'r"""', '"""', "u'''", "r'''", "'''"]
+END_DOCSTRING_TRIPLE = ['"""', "'''"]
VERBOSE_MISSING_IMPORT = os.getenv('HACKING_VERBOSE_MISSING_IMPORT', 'False')
# Monkey patch broken excluded filter in pep8
# See https://github.com/jcrocholl/pep8/pull/111
def excluded(self, filename):
- """
- Check if options.exclude contains a pattern that matches filename.
- """
+ """Check if options.exclude contains a pattern that matches filename."""
basename = os.path.basename(filename)
return any((pep8.filename_match(filename, self.options.exclude,
default=False),
@@ -120,7 +118,7 @@ def nova_todo_format(physical_line):
pos2 = physical_line.find('#') # make sure it's a comment
# TODO(sdague): should be smarter on this test
this_test = physical_line.find('N101: #TODO fail')
- if (pos != pos1 and pos2 >= 0 and pos2 < pos and this_test == -1):
+ if pos != pos1 and pos2 >= 0 and pos2 < pos and this_test == -1:
return pos, "N101: Use TODO(NAME)"
@@ -187,7 +185,8 @@ def nova_import_module_only(logical_line):
# TODO(sdague) actually get these tests working
def importModuleCheck(mod, parent=None, added=False):
- """
+ """Import Module helper function.
+
If can't find module on first try, recursively check for relative
imports
"""
@@ -258,8 +257,7 @@ def nova_import_module_only(logical_line):
def nova_import_alphabetical(logical_line, blank_lines, previous_logical,
indent_level, previous_indent_level):
- r"""
- Check for imports in alphabetical order.
+ r"""Check for imports in alphabetical order.
nova HACKING guide recommendation for imports:
imports in human alphabetical order
@@ -294,6 +292,11 @@ def nova_import_no_db_in_virt(logical_line, filename):
yield (0, "N307: nova.db import not allowed in nova/virt/*")
+def in_docstring_position(previous_logical):
+ return (previous_logical.startswith("def ") or
+ previous_logical.startswith("class "))
+
+
def nova_docstring_start_space(physical_line, previous_logical):
r"""Check for docstring not start with space.
@@ -311,11 +314,10 @@ def nova_docstring_start_space(physical_line, previous_logical):
# it's important that we determine this is actually a docstring,
# and not a doc block used somewhere after the first line of a
# function def
- if (previous_logical.startswith("def ") or
- previous_logical.startswith("class ")):
- pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE])
- if (pos != -1 and len(physical_line) > pos + 4):
- if (physical_line[pos + 3] == ' '):
+ if in_docstring_position(previous_logical):
+ pos = max([physical_line.find(i) for i in START_DOCSTRING_TRIPLE])
+ if pos != -1 and len(physical_line) > pos + 4:
+ if physical_line[pos + 3] == ' ':
return (pos, "N401: docstring should not start with"
" a space")
@@ -330,33 +332,63 @@ def nova_docstring_one_line(physical_line):
N402: '''This is not'''
N402: '''Bad punctuation,'''
"""
+ #TODO(jogo) make this apply to multi line docstrings as well
line = physical_line.lstrip()
if line.startswith('"') or line.startswith("'"):
- pos = max([line.find(i) for i in DOCSTRING_TRIPLE]) # start
- end = max([line[-4:-1] == i for i in DOCSTRING_TRIPLE]) # end
+ pos = max([line.find(i) for i in START_DOCSTRING_TRIPLE]) # start
+ end = max([line[-4:-1] == i for i in END_DOCSTRING_TRIPLE]) # end
- if (pos != -1 and end and len(line) > pos + 4):
- if (line[-5] not in ['.', '?', '!']):
+ if pos != -1 and end and len(line) > pos + 4:
+ if line[-5] not in ['.', '?', '!']:
return pos, "N402: one line docstring needs punctuation."
-def nova_docstring_multiline_end(physical_line):
+def nova_docstring_multiline_end(physical_line, previous_logical):
r"""Check multi line docstring end.
nova HACKING guide recommendation for docstring:
Docstring should end on a new line
- Okay: '''\nfoo\nbar\n'''
- # This test is not triggered, don't think it's right, removing
- # the colon prevents it from running
- N403 '''\nfoo\nbar\n ''' \n\n
+ Okay: '''foobar\nfoo\nbar\n'''
+ N403: def foo():\n'''foobar\nfoo\nbar\n d'''\n\n
"""
- # TODO(sdague) actually get these tests working
- pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
- if (pos != -1 and len(physical_line) == pos):
- if (physical_line[pos + 3] == ' '):
- return (pos, "N403: multi line docstring end on new line")
+ if in_docstring_position(previous_logical):
+ pos = max(physical_line.find(i) for i in END_DOCSTRING_TRIPLE)
+ if pos != -1 and len(physical_line) == pos + 4:
+ if physical_line.strip() not in START_DOCSTRING_TRIPLE:
+ return (pos, "N403: multi line docstring end on new line")
+
+
+def nova_docstring_multiline_start(physical_line, previous_logical, tokens):
+ r"""Check multi line docstring start with summary.
+
+ nova HACKING guide recommendation for docstring:
+ Docstring should start with A multi line docstring has a one-line summary
+
+ Okay: '''foobar\nfoo\nbar\n'''
+ N404: def foo():\n'''\nfoo\nbar\n''' \n\n
+ """
+ if in_docstring_position(previous_logical):
+ pos = max([physical_line.find(i) for i in START_DOCSTRING_TRIPLE])
+ # start of docstring when len(tokens)==0
+ if len(tokens) == 0 and pos != -1 and len(physical_line) == pos + 4:
+ if physical_line.strip() in START_DOCSTRING_TRIPLE:
+ return (pos, "N404: multi line docstring "
+ "should start with a summary")
+
+
+def nova_no_cr(physical_line):
+ r"""Check that we only use newlines not cariage returns.
+
+ Okay: import os\nimport sys
+ # pep8 doesn't yet replace \r in strings, will work on an
+ # upstream fix
+ N901 import os\r\nimport sys
+ """
+ pos = physical_line.find('\r')
+ if pos != -1 and pos == (len(physical_line) - 2):
+ return (pos, "N901: Windows style line endings not allowed in code")
FORMAT_RE = re.compile("%(?:"
diff --git a/tools/test-requires b/tools/test-requires
index 5f195d5c1..c691a6bca 100644
--- a/tools/test-requires
+++ b/tools/test-requires
@@ -12,4 +12,5 @@ pylint==0.25.2
python-subunit
sphinx>=1.1.2
testrepository>=0.0.13
-testtools>=0.9.22
+# testtools 0.9.25 is broken, change this when upstream is fixed (bug 1102400)
+testtools>=0.9.22,<=0.9.24
diff --git a/tox.ini b/tox.ini
index e3322e044..58468accb 100644
--- a/tox.ini
+++ b/tox.ini
@@ -18,9 +18,9 @@ downloadcache = ~/cache/pip
deps=pep8==1.3.3
commands =
python tools/hacking.py --doctest
- python tools/hacking.py --ignore=E12,E711,E721,E712 --repeat --show-source \
+ python tools/hacking.py --ignore=E12,E711,E721,E712,N403,N404 --show-source \
--exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg .
- python tools/hacking.py --ignore=E12,E711,E721,E712 --repeat --show-source \
+ python tools/hacking.py --ignore=E12,E711,E721,E712,N403,N404 --show-source \
--filename=nova* bin
[testenv:pylint]