summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore2
-rw-r--r--HACKING.rst1
-rwxr-xr-xbin/nova-baremetal-deploy-helper5
-rwxr-xr-xbin/nova-novncproxy1
-rwxr-xr-xbin/nova-rootwrap14
-rwxr-xr-xbin/nova-spicehtml5proxy1
-rw-r--r--doc/api_samples/os-flavor-manage/flavor-create-post-req.json2
-rw-r--r--doc/source/devref/aggregates.rst4
-rw-r--r--etc/nova/api-paste.ini20
-rw-r--r--etc/nova/nova.conf.sample149
-rw-r--r--etc/nova/rootwrap.d/baremetal-compute-pxe.filters11
-rw-r--r--etc/nova/rootwrap.d/compute.filters2
-rw-r--r--nova/api/ec2/__init__.py8
-rw-r--r--nova/api/ec2/cloud.py14
-rw-r--r--nova/api/ec2/ec2utils.py5
-rw-r--r--nova/api/openstack/compute/contrib/admin_actions.py2
-rw-r--r--nova/api/openstack/compute/server_metadata.py9
-rw-r--r--nova/api/openstack/compute/servers.py27
-rw-r--r--nova/api/openstack/wsgi.py4
-rw-r--r--nova/availability_zones.py6
-rw-r--r--nova/compute/api.py61
-rw-r--r--nova/compute/manager.py39
-rw-r--r--nova/compute/utils.py9
-rw-r--r--nova/conductor/api.py16
-rw-r--r--nova/conductor/manager.py19
-rw-r--r--nova/conductor/rpcapi.py14
-rw-r--r--nova/crypto.py11
-rw-r--r--nova/db/api.py18
-rw-r--r--nova/db/sqlalchemy/api.py233
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/150_add_host_to_instance_faults.py36
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/151_change_task_log_column_type.py52
-rw-r--r--nova/db/sqlalchemy/models.py5
-rw-r--r--nova/exception.py4
-rw-r--r--nova/image/glance.py20
-rw-r--r--nova/network/api.py2
-rw-r--r--nova/network/linux_net.py43
-rw-r--r--nova/network/manager.py39
-rw-r--r--nova/network/model.py2
-rw-r--r--nova/network/quantumv2/api.py80
-rw-r--r--nova/network/rpcapi.py6
-rw-r--r--nova/openstack/common/cfg.py76
-rw-r--r--nova/openstack/common/eventlet_backdoor.py2
-rw-r--r--nova/openstack/common/iniparser.py2
-rw-r--r--nova/openstack/common/lockutils.py6
-rw-r--r--nova/openstack/common/log.py100
-rw-r--r--nova/openstack/common/rootwrap/__init__.py (renamed from nova/rootwrap/__init__.py)0
-rw-r--r--nova/openstack/common/rootwrap/filters.py (renamed from nova/rootwrap/filters.py)22
-rw-r--r--nova/openstack/common/rootwrap/wrapper.py (renamed from nova/rootwrap/wrapper.py)6
-rw-r--r--nova/openstack/common/rpc/impl_kombu.py2
-rw-r--r--nova/scheduler/driver.py4
-rw-r--r--nova/scheduler/filters/trusted_filter.py141
-rw-r--r--nova/scheduler/manager.py4
-rw-r--r--nova/service.py12
-rw-r--r--nova/servicegroup/api.py12
-rw-r--r--nova/servicegroup/drivers/db.py29
-rw-r--r--nova/tests/api/ec2/test_cinder_cloud.py5
-rw-r--r--nova/tests/api/ec2/test_cloud.py4
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_admin_actions_with_cells.py10
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_floating_ips.py18
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_services.py4
-rw-r--r--nova/tests/api/openstack/compute/test_limits.py2
-rw-r--r--nova/tests/api/openstack/compute/test_server_metadata.py62
-rw-r--r--nova/tests/api/openstack/compute/test_servers.py74
-rw-r--r--nova/tests/baremetal/test_nova_baremetal_deploy_helper.py256
-rw-r--r--nova/tests/baremetal/test_pxe.py35
-rw-r--r--nova/tests/compute/test_compute.py143
-rw-r--r--nova/tests/compute/test_compute_utils.py3
-rw-r--r--nova/tests/conductor/test_conductor.py23
-rw-r--r--nova/tests/image/test_glance.py37
-rw-r--r--nova/tests/image/test_s3.py4
-rw-r--r--nova/tests/integrated/test_api_samples.py33
-rw-r--r--nova/tests/network/test_linux_net.py16
-rw-r--r--nova/tests/network/test_manager.py20
-rw-r--r--nova/tests/network/test_quantumv2.py115
-rw-r--r--nova/tests/network/test_rpcapi.py5
-rw-r--r--nova/tests/scheduler/test_chance_scheduler.py6
-rw-r--r--nova/tests/scheduler/test_filter_scheduler.py16
-rw-r--r--nova/tests/scheduler/test_host_filters.py98
-rw-r--r--nova/tests/scheduler/test_scheduler.py24
-rw-r--r--nova/tests/test_api.py5
-rw-r--r--nova/tests/test_availability_zones.py114
-rw-r--r--nova/tests/test_cinder.py14
-rw-r--r--nova/tests/test_db_api.py26
-rw-r--r--nova/tests/test_driver.py60
-rw-r--r--nova/tests/test_imagebackend.py2
-rw-r--r--nova/tests/test_imagecache.py4
-rw-r--r--nova/tests/test_instance_types.py61
-rw-r--r--nova/tests/test_instance_types_extra_specs.py8
-rw-r--r--nova/tests/test_libvirt.py68
-rw-r--r--nova/tests/test_migrations.py22
-rw-r--r--nova/tests/test_nova_rootwrap.py198
-rw-r--r--nova/tests/test_pipelib.py8
-rw-r--r--nova/tests/test_utils.py21
-rw-r--r--nova/tests/test_xenapi.py25
-rw-r--r--nova/tests/xenapi/stubs.py14
-rw-r--r--nova/utils.py8
-rw-r--r--nova/virt/baremetal/net-dhcp.ubuntu.template3
-rw-r--r--nova/virt/baremetal/net-static.ubuntu.template3
-rw-r--r--nova/virt/baremetal/pxe.py30
-rw-r--r--nova/virt/baremetal/volume_driver.py2
-rw-r--r--nova/virt/disk/api.py10
-rw-r--r--nova/virt/driver.py14
-rw-r--r--nova/virt/fake.py6
-rw-r--r--nova/virt/firewall.py2
-rw-r--r--nova/virt/hyperv/driver.py2
-rw-r--r--nova/virt/hyperv/volumeops.py4
-rw-r--r--nova/virt/images.py4
-rw-r--r--nova/virt/libvirt/driver.py100
-rw-r--r--nova/virt/libvirt/firewall.py4
-rw-r--r--nova/virt/libvirt/imagebackend.py2
-rw-r--r--nova/virt/libvirt/imagecache.py2
-rw-r--r--nova/virt/powervm/operator.py2
-rw-r--r--nova/virt/vmwareapi/network_util.py2
-rw-r--r--nova/virt/xenapi/agent.py9
-rw-r--r--nova/virt/xenapi/driver.py5
-rw-r--r--nova/virt/xenapi/pool_states.py10
-rw-r--r--nova/virt/xenapi/vm_utils.py2
-rw-r--r--nova/volume/cinder.py4
-rw-r--r--openstack-common.conf2
-rwxr-xr-xplugins/xenserver/xenapi/etc/xapi.d/plugins/migration2
-rwxr-xr-xrun_tests.sh16
-rw-r--r--tools/conf/extract_opts.py35
-rwxr-xr-xtools/hacking.py88
-rw-r--r--tools/test-requires2
-rwxr-xr-xtools/xenserver/vm_vdi_cleaner.py1
-rw-r--r--tox.ini6
126 files changed, 2394 insertions, 1065 deletions
diff --git a/.gitignore b/.gitignore
index efb88c781..6028b8a44 100644
--- a/.gitignore
+++ b/.gitignore
@@ -37,3 +37,5 @@ nosetests.xml
nova/tests/cover/*
nova/vcsversion.py
tools/conf/nova.conf*
+tools/lintstack.head.py
+tools/pylint_exceptions
diff --git a/HACKING.rst b/HACKING.rst
index be894f072..35493e55b 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -9,6 +9,7 @@ Nova Style Commandments
General
-------
- Put two newlines between top-level code (funcs, classes, etc)
+- Use only UNIX style newlines ("\n"), not Windows style ("\r\n")
- Put one newline between methods in classes and anywhere else
- Long lines should be wrapped in parentheses
in preference to using a backslash for line continuation.
diff --git a/bin/nova-baremetal-deploy-helper b/bin/nova-baremetal-deploy-helper
index f8a487d37..894a42003 100755
--- a/bin/nova-baremetal-deploy-helper
+++ b/bin/nova-baremetal-deploy-helper
@@ -18,7 +18,10 @@
"""Starter script for Bare-Metal Deployment Service."""
import eventlet
-eventlet.monkey_patch()
+
+# Do not monkey_patch in unittest
+if __name__ == '__main__':
+ eventlet.monkey_patch()
import os
import sys
diff --git a/bin/nova-novncproxy b/bin/nova-novncproxy
index 8562acc53..477510b99 100755
--- a/bin/nova-novncproxy
+++ b/bin/nova-novncproxy
@@ -61,6 +61,7 @@ opts = [
CONF = cfg.CONF
CONF.register_cli_opts(opts)
+CONF.import_opt('debug', 'nova.openstack.common.log')
if __name__ == '__main__':
diff --git a/bin/nova-rootwrap b/bin/nova-rootwrap
index c8e880d79..72a8c6309 100755
--- a/bin/nova-rootwrap
+++ b/bin/nova-rootwrap
@@ -16,20 +16,18 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Root wrapper for Nova
+"""Root wrapper for OpenStack services
- Filters which commands nova is allowed to run as another user.
+ Filters which commands a service is allowed to run as another user.
- To use this, you should set the following in nova.conf:
+ To use this with nova, you should set the following in nova.conf:
rootwrap_config=/etc/nova/rootwrap.conf
You also need to let the nova user run nova-rootwrap as root in sudoers:
nova ALL = (root) NOPASSWD: /usr/bin/nova-rootwrap /etc/nova/rootwrap.conf *
- To make allowed commands node-specific, your packaging should only
- install {compute,network,volume}.filters respectively on compute, network
- and volume nodes (i.e. nova-api nodes should not have any of those files
- installed).
+ Service packaging should deploy .filters files only on nodes where they are
+ needed, to avoid allowing more than is necessary.
"""
import ConfigParser
@@ -75,7 +73,7 @@ if __name__ == '__main__':
if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
sys.path.insert(0, possible_topdir)
- from nova.rootwrap import wrapper
+ from nova.openstack.common.rootwrap import wrapper
# Load configuration
try:
diff --git a/bin/nova-spicehtml5proxy b/bin/nova-spicehtml5proxy
index b1882bbea..089ff9d71 100755
--- a/bin/nova-spicehtml5proxy
+++ b/bin/nova-spicehtml5proxy
@@ -61,6 +61,7 @@ opts = [
CONF = cfg.CONF
CONF.register_cli_opts(opts)
+CONF.import_opt('debug', 'nova.openstack.common.log')
if __name__ == '__main__':
diff --git a/doc/api_samples/os-flavor-manage/flavor-create-post-req.json b/doc/api_samples/os-flavor-manage/flavor-create-post-req.json
index 8a3830f09..0c5914a01 100644
--- a/doc/api_samples/os-flavor-manage/flavor-create-post-req.json
+++ b/doc/api_samples/os-flavor-manage/flavor-create-post-req.json
@@ -4,6 +4,6 @@
"ram": 1024,
"vcpus": 2,
"disk": 10,
- "id": "10",
+ "id": "10"
}
}
diff --git a/doc/source/devref/aggregates.rst b/doc/source/devref/aggregates.rst
index 979179768..ecc6329ba 100644
--- a/doc/source/devref/aggregates.rst
+++ b/doc/source/devref/aggregates.rst
@@ -23,7 +23,7 @@ Host aggregates can be regarded as a mechanism to further partition an availabil
Xen Pool Host Aggregates
===============
-Originally all aggregates were Xen resource pools, now a aggregate can be set up as a resource pool by giving the aggregate the correct key-value pair.
+Originally all aggregates were Xen resource pools, now an aggregate can be set up as a resource pool by giving the aggregate the correct key-value pair.
You can use aggregates for XenServer resource pools when you have multiple compute nodes installed (only XenServer/XCP via xenapi driver is currently supported), and you want to leverage the capabilities of the underlying hypervisor resource pools. For example, you want to enable VM live migration (i.e. VM migration within the pool) or enable host maintenance with zero-downtime for guest instances. Please, note that VM migration across pools (i.e. storage migration) is not yet supported in XenServer/XCP, but will be added when available. Bear in mind that the two migration techniques are not mutually exclusive and can be used in combination for a higher level of flexibility in your cloud management.
@@ -65,7 +65,7 @@ Usage
* aggregate-add-host <id> <host> Add the host to the specified aggregate.
* aggregate-remove-host <id> <host> Remove the specified host from the specfied aggregate.
* aggregate-set-metadata <id> <key=value> [<key=value> ...] Update the metadata associated with the aggregate.
- * aggregate-update <id> <name> [<availability_zone>] Update the aggregate's name and optionally availablity zone.
+ * aggregate-update <id> <name> [<availability_zone>] Update the aggregate's name and optionally availability zone.
* host-list List all hosts by service
* host-update --maintenance [enable | disable] Put/resume host into/from maintenance.
diff --git a/etc/nova/api-paste.ini b/etc/nova/api-paste.ini
index 85603fe59..08d59c521 100644
--- a/etc/nova/api-paste.ini
+++ b/etc/nova/api-paste.ini
@@ -62,23 +62,12 @@ use = call:nova.api.openstack.urlmap:urlmap_factory
/v1.1: openstack_compute_api_v2
/v2: openstack_compute_api_v2
-[composite:osapi_volume]
-use = call:nova.api.openstack.urlmap:urlmap_factory
-/: osvolumeversions
-/v1: openstack_volume_api_v1
-
[composite:openstack_compute_api_v2]
use = call:nova.api.auth:pipeline_factory
noauth = faultwrap sizelimit noauth ratelimit osapi_compute_app_v2
keystone = faultwrap sizelimit authtoken keystonecontext ratelimit osapi_compute_app_v2
keystone_nolimit = faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v2
-[composite:openstack_volume_api_v1]
-use = call:nova.api.auth:pipeline_factory
-noauth = faultwrap sizelimit noauth ratelimit osapi_volume_app_v1
-keystone = faultwrap sizelimit authtoken keystonecontext ratelimit osapi_volume_app_v1
-keystone_nolimit = faultwrap sizelimit authtoken keystonecontext osapi_volume_app_v1
-
[filter:faultwrap]
paste.filter_factory = nova.api.openstack:FaultWrapper.factory
@@ -97,18 +86,9 @@ paste.app_factory = nova.api.openstack.compute:APIRouter.factory
[pipeline:oscomputeversions]
pipeline = faultwrap oscomputeversionapp
-[app:osapi_volume_app_v1]
-paste.app_factory = nova.api.openstack.volume:APIRouter.factory
-
[app:oscomputeversionapp]
paste.app_factory = nova.api.openstack.compute.versions:Versions.factory
-[pipeline:osvolumeversions]
-pipeline = faultwrap osvolumeversionapp
-
-[app:osvolumeversionapp]
-paste.app_factory = nova.api.openstack.volume.versions:Versions.factory
-
##########
# Shared #
##########
diff --git a/etc/nova/nova.conf.sample b/etc/nova/nova.conf.sample
index 96118eb76..a5f945618 100644
--- a/etc/nova/nova.conf.sample
+++ b/etc/nova/nova.conf.sample
@@ -1,47 +1,6 @@
[DEFAULT]
#
-# Options defined in nova.openstack.common.cfg:CommonConfigOpts
-#
-
-# Print debugging output (boolean value)
-#debug=false
-
-# Print more verbose output (boolean value)
-#verbose=false
-
-# If this option is specified, the logging configuration file
-# specified is used and overrides any other logging options
-# specified. Please see the Python logging module
-# documentation for details on logging configuration files.
-# (string value)
-#log_config=<None>
-
-# A logging.Formatter log message format string which may use
-# any of the available logging.LogRecord attributes. Default:
-# %(default)s (string value)
-#log_format=%(asctime)s %(levelname)8s [%(name)s] %(message)s
-
-# Format string for %%(asctime)s in log records. Default:
-# %(default)s (string value)
-#log_date_format=%Y-%m-%d %H:%M:%S
-
-# (Optional) Name of log file to output to. If not set,
-# logging will go to stdout. (string value)
-#log_file=<None>
-
-# (Optional) The directory to keep log files in (will be
-# prepended to --log-file) (string value)
-#log_dir=<None>
-
-# Use syslog for logging. (boolean value)
-#use_syslog=false
-
-# syslog facility to receive log lines (string value)
-#syslog_log_facility=LOG_USER
-
-
-#
# Options defined in nova.availability_zones
#
@@ -486,6 +445,22 @@
#
+# Options defined in nova.api.openstack.compute.contrib.os_tenant_networks
+#
+
+# Enables or disables quotaing of tenant networks (boolean
+# value)
+#enable_network_quota=false
+
+# Control for checking for default networks (string value)
+#use_quantum_default_nets=False
+
+# Default tenant id when creating quantum networks (string
+# value)
+#quantum_default_tenant_id=default
+
+
+#
# Options defined in nova.api.openstack.compute.extensions
#
@@ -1123,10 +1098,6 @@
# Autoassigning floating ip to VM (boolean value)
#auto_assign_floating_ip=false
-# Network host to use for ip allocation in flat modes (string
-# value)
-#network_host=nova
-
# If passed, use fake network devices and addresses (boolean
# value)
#fake_network=false
@@ -1207,6 +1178,10 @@
# (string value)
#quantum_auth_strategy=keystone
+# Name of Integration Bridge used by Open vSwitch (string
+# value)
+#quantum_ovs_bridge=br-int
+
#
# Options defined in nova.network.rpcapi
@@ -1253,6 +1228,14 @@
# Options defined in nova.openstack.common.log
#
+# Print debugging output (set logging level to DEBUG instead
+# of default WARNING level). (boolean value)
+#debug=false
+
+# Print more verbose output (set logging level to INFO instead
+# of default WARNING level). (boolean value)
+#verbose=false
+
# Log output to standard error (boolean value)
#use_stderr=true
@@ -1262,11 +1245,11 @@
# format string to use for log messages with context (string
# value)
-#logging_context_format_string=%(asctime)s %(levelname)s %(name)s [%(request_id)s %(user_id)s %(project_id)s] %(instance)s%(message)s
+#logging_context_format_string=%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user)s %(tenant)s] %(instance)s%(message)s
# format string to use for log messages without context
# (string value)
-#logging_default_format_string=%(asctime)s %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
# data to append to log format when level is DEBUG (string
# value)
@@ -1274,7 +1257,7 @@
# prefix each line of exception output with this format
# (string value)
-#logging_exception_prefix=%(asctime)s %(process)d TRACE %(name)s %(instance)s
+#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
# list of logger=LEVEL pairs (list value)
#default_log_levels=amqplib=WARN,sqlalchemy=WARN,boto=WARN,suds=INFO,keystone=INFO,eventlet.wsgi.server=WARN
@@ -1293,6 +1276,36 @@
# it like this (string value)
#instance_uuid_format="[instance: %(uuid)s] "
+# If this option is specified, the logging configuration file
+# specified is used and overrides any other logging options
+# specified. Please see the Python logging module
+# documentation for details on logging configuration files.
+# (string value)
+#log_config=<None>
+
+# A logging.Formatter log message format string which may use
+# any of the available logging.LogRecord attributes. Default:
+# %(default)s (string value)
+#log_format=%(asctime)s %(levelname)8s [%(name)s] %(message)s
+
+# Format string for %%(asctime)s in log records. Default:
+# %(default)s (string value)
+#log_date_format=%Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to output to. If not set,
+# logging will go to stdout. (string value)
+#log_file=<None>
+
+# (Optional) The directory to keep log files in (will be
+# prepended to --log-file) (string value)
+#log_dir=<None>
+
+# Use syslog for logging. (boolean value)
+#use_syslog=false
+
+# syslog facility to receive log lines (string value)
+#syslog_log_facility=LOG_USER
+
#
# Options defined in nova.openstack.common.notifier.api
@@ -1724,13 +1737,18 @@
#
-# Options defined in nova.virt.hyperv.vmops
+# Options defined in nova.virt.hyperv.vif
#
-# Default vSwitch Name, if none provided first external is
-# used (string value)
+# External virtual switch Name, if not provided, the first
+# external virtual switch is used (string value)
#vswitch_name=<None>
+
+#
+# Options defined in nova.virt.hyperv.vmops
+#
+
# Required for live migration among hosts with different CPU
# features (boolean value)
#limit_cpu_features=false
@@ -1756,7 +1774,7 @@
# value)
#hyperv_attaching_volume_retry_count=10
-# The seconds to wait between an volume attachment attempt
+# The seconds to wait between a volume attachment attempt
# (integer value)
#hyperv_wait_between_attach_retry=5
@@ -1985,26 +2003,26 @@
# Options defined in nova.virt.vmwareapi.driver
#
-# URL for connection to VMWare ESX host.Required if
-# compute_driver is vmwareapi.VMWareESXDriver. (string value)
+# URL for connection to VMware ESX host.Required if
+# compute_driver is vmwareapi.VMwareESXDriver. (string value)
#vmwareapi_host_ip=<None>
-# Username for connection to VMWare ESX host. Used only if
-# compute_driver is vmwareapi.VMWareESXDriver. (string value)
+# Username for connection to VMware ESX host. Used only if
+# compute_driver is vmwareapi.VMwareESXDriver. (string value)
#vmwareapi_host_username=<None>
-# Password for connection to VMWare ESX host. Used only if
-# compute_driver is vmwareapi.VMWareESXDriver. (string value)
+# Password for connection to VMware ESX host. Used only if
+# compute_driver is vmwareapi.VMwareESXDriver. (string value)
#vmwareapi_host_password=<None>
# The interval used for polling of remote tasks. Used only if
-# compute_driver is vmwareapi.VMWareESXDriver. (floating point
+# compute_driver is vmwareapi.VMwareESXDriver. (floating point
# value)
#vmwareapi_task_poll_interval=5.0
# The number of times we retry on failures, e.g., socket
# error, etc. Used only if compute_driver is
-# vmwareapi.VMWareESXDriver. (integer value)
+# vmwareapi.VMwareESXDriver. (integer value)
#vmwareapi_api_retry_count=10
@@ -2278,10 +2296,17 @@
# (string value)
#cinder_endpoint_template=<None>
+# region name of this node (string value)
+#os_region_name=<None>
+
# Number of cinderclient retries on failed http calls (integer
# value)
#cinder_http_retries=3
+# Allow to perform insecure SSL requests to cinder (boolean
+# value)
+#cinder_api_insecure=false
+
[conductor]
@@ -2476,7 +2501,7 @@
#
# Do not set this out of dev/test environments. If a node does
-# not have an fixed PXE IP address, volumes are exported with
+# not have a fixed PXE IP address, volumes are exported with
# globally opened ACL (boolean value)
#use_unsafe_iscsi=false
@@ -2546,4 +2571,4 @@
#keymap=en-us
-# Total option count: 519
+# Total option count: 525
diff --git a/etc/nova/rootwrap.d/baremetal-compute-pxe.filters b/etc/nova/rootwrap.d/baremetal-compute-pxe.filters
deleted file mode 100644
index 35fa61723..000000000
--- a/etc/nova/rootwrap.d/baremetal-compute-pxe.filters
+++ /dev/null
@@ -1,11 +0,0 @@
-# nova-rootwrap command filters for compute nodes
-# This file should be owned by (and only-writeable by) the root user
-
-[Filters]
-
-# nova/virt/baremetal/pxe.py: 'dnsmasq', ...
-dnsmasq: CommandFilter, /usr/sbin/dnsmasq, root
-
-# nova/virt/baremetal/pxe.py: 'kill', '-TERM', str(dnsmasq_pid)
-kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -15, -TERM
-
diff --git a/etc/nova/rootwrap.d/compute.filters b/etc/nova/rootwrap.d/compute.filters
index f344a1b1c..e1113a9e7 100644
--- a/etc/nova/rootwrap.d/compute.filters
+++ b/etc/nova/rootwrap.d/compute.filters
@@ -99,9 +99,11 @@ pygrub: CommandFilter, /usr/bin/pygrub, root
fdisk: CommandFilter, /sbin/fdisk, root
# nova/virt/xenapi/vm_utils.py: e2fsck, -f, -p, partition_path
+# nova/virt/disk/api.py: e2fsck, -f, -p, image
e2fsck: CommandFilter, /sbin/e2fsck, root
# nova/virt/xenapi/vm_utils.py: resize2fs, partition_path
+# nova/virt/disk/api.py: resize2fs, image
resize2fs: CommandFilter, /sbin/resize2fs, root
# nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index 7cd7e1c7d..85b87e3e5 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -511,7 +511,13 @@ class Executor(wsgi.Application):
except exception.KeyPairExists as ex:
LOG.debug(_('KeyPairExists raised: %s'), unicode(ex),
context=context)
- return ec2_error(req, request_id, type(ex).__name__, unicode(ex))
+ code = 'InvalidKeyPair.Duplicate'
+ return ec2_error(req, request_id, code, unicode(ex))
+ except exception.InvalidKeypair as ex:
+ LOG.debug(_('InvalidKeypair raised: %s'), unicode(ex),
+ context)
+ code = 'InvalidKeyPair.Format'
+ return ec2_error(req, request_id, code, unicode(ex))
except exception.InvalidParameterValue as ex:
LOG.debug(_('InvalidParameterValue raised: %s'), unicode(ex),
context=context)
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 414b2e969..31f486b81 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -434,7 +434,8 @@ class CloudController(object):
#If looking for non existent key pair
if key_name is not None and not key_pairs:
msg = _('Could not find key pair(s): %s') % ','.join(key_name)
- raise exception.EC2APIError(msg)
+ raise exception.KeypairNotFound(msg,
+ code="InvalidKeyPair.Duplicate")
result = []
for key_pair in key_pairs:
@@ -457,13 +458,7 @@ class CloudController(object):
key_name)
except exception.KeypairLimitExceeded:
msg = _("Quota exceeded, too many key pairs.")
- raise exception.EC2APIError(msg)
- except exception.InvalidKeypair:
- msg = _("Keypair data is invalid")
- raise exception.EC2APIError(msg)
- except exception.KeyPairExists:
- msg = _("Key pair '%s' already exists.") % key_name
- raise exception.KeyPairExists(msg)
+ raise exception.EC2APIError(msg, code='ResourceLimitExceeded')
return {'keyName': key_name,
'keyFingerprint': keypair['fingerprint'],
'keyMaterial': keypair['private_key']}
@@ -486,9 +481,6 @@ class CloudController(object):
except exception.InvalidKeypair:
msg = _("Keypair data is invalid")
raise exception.EC2APIError(msg)
- except exception.KeyPairExists:
- msg = _("Key pair '%s' already exists.") % key_name
- raise exception.EC2APIError(msg)
return {'keyName': key_name,
'keyFingerprint': keypair['fingerprint']}
diff --git a/nova/api/ec2/ec2utils.py b/nova/api/ec2/ec2utils.py
index cfe0d7879..bc47b3e0d 100644
--- a/nova/api/ec2/ec2utils.py
+++ b/nova/api/ec2/ec2utils.py
@@ -117,7 +117,8 @@ def get_ip_info_for_instance(context, instance):
def get_availability_zone_by_host(services, host):
if len(services) > 0:
- return availability_zones.get_host_availability_zone(context, host)
+ return availability_zones.get_host_availability_zone(
+ context.get_admin_context(), host)
return 'unknown zone'
@@ -178,7 +179,7 @@ def ec2_vol_id_to_uuid(ec2_id):
def is_ec2_timestamp_expired(request, expires=None):
- """Checks the timestamp or expiry time included in a EC2 request
+ """Checks the timestamp or expiry time included in an EC2 request
and returns true if the request is expired
"""
query_time = None
diff --git a/nova/api/openstack/compute/contrib/admin_actions.py b/nova/api/openstack/compute/contrib/admin_actions.py
index fa7836b37..1c053ea59 100644
--- a/nova/api/openstack/compute/contrib/admin_actions.py
+++ b/nova/api/openstack/compute/contrib/admin_actions.py
@@ -130,7 +130,7 @@ class AdminActionsController(wsgi.Controller):
@wsgi.action('resetNetwork')
def _reset_network(self, req, id, body):
- """Permit admins to reset networking on an server."""
+ """Permit admins to reset networking on a server."""
context = req.environ['nova.context']
authorize(context, 'resetNetwork')
try:
diff --git a/nova/api/openstack/compute/server_metadata.py b/nova/api/openstack/compute/server_metadata.py
index 023a054d0..0de5d536f 100644
--- a/nova/api/openstack/compute/server_metadata.py
+++ b/nova/api/openstack/compute/server_metadata.py
@@ -136,6 +136,10 @@ class Controller(object):
raise exc.HTTPRequestEntityTooLarge(explanation=unicode(error),
headers={'Retry-After': 0})
+ except exception.InstanceInvalidState as state_error:
+ common.raise_http_conflict_for_instance_invalid_state(state_error,
+ 'update metadata')
+
@wsgi.serializers(xml=common.MetaItemTemplate)
def show(self, req, server_id, id):
"""Return a single metadata item."""
@@ -162,10 +166,15 @@ class Controller(object):
try:
server = self.compute_api.get(context, server_id)
self.compute_api.delete_instance_metadata(context, server, id)
+
except exception.InstanceNotFound:
msg = _('Server does not exist')
raise exc.HTTPNotFound(explanation=msg)
+ except exception.InstanceInvalidState as state_error:
+ common.raise_http_conflict_for_instance_invalid_state(state_error,
+ 'delete metadata')
+
def create_resource():
return wsgi.Resource(Controller())
diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py
index f0fdb5a15..f7f186870 100644
--- a/nova/api/openstack/compute/servers.py
+++ b/nova/api/openstack/compute/servers.py
@@ -540,8 +540,9 @@ class Controller(wsgi.Controller):
msg = _('marker [%s] not found') % marker
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound as e:
- msg = _("Flavor could not be found")
- raise exc.HTTPUnprocessableEntity(explanation=msg)
+ log_msg = _("Flavor '%s' could not be found ")
+ LOG.debug(log_msg, search_opts['flavor'])
+ instance_list = []
if is_detail:
self._add_instance_faults(context, instance_list)
@@ -561,17 +562,28 @@ class Controller(wsgi.Controller):
req.cache_db_instance(instance)
return instance
- def _validate_server_name(self, value):
+ def _check_string_length(self, value, name, max_length=None):
if not isinstance(value, basestring):
- msg = _("Server name is not a string or unicode")
+ msg = _("%s is not a string or unicode") % name
raise exc.HTTPBadRequest(explanation=msg)
if not value.strip():
- msg = _("Server name is an empty string")
+ msg = _("%s is an empty string") % name
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ if max_length and len(value) > max_length:
+ msg = _("%(name)s can be at most %(max_length)s "
+ "characters.") % locals()
raise exc.HTTPBadRequest(explanation=msg)
- if not len(value) < 256:
- msg = _("Server name must be less than 256 characters.")
+ def _validate_server_name(self, value):
+ self._check_string_length(value, 'Server name', max_length=255)
+
+ def _validate_device_name(self, value):
+ self._check_string_length(value, 'Device name', max_length=255)
+
+ if ' ' in value:
+ msg = _("Device name cannot include spaces.")
raise exc.HTTPBadRequest(explanation=msg)
def _get_injected_files(self, personality):
@@ -809,6 +821,7 @@ class Controller(wsgi.Controller):
if self.ext_mgr.is_loaded('os-volumes'):
block_device_mapping = server_dict.get('block_device_mapping', [])
for bdm in block_device_mapping:
+ self._validate_device_name(bdm["device_name"])
if 'delete_on_termination' in bdm:
bdm['delete_on_termination'] = utils.bool_from_str(
bdm['delete_on_termination'])
diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py
index 519669134..733685b14 100644
--- a/nova/api/openstack/wsgi.py
+++ b/nova/api/openstack/wsgi.py
@@ -919,6 +919,10 @@ class Resource(wsgi.Application):
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
+ if body:
+ LOG.info(_("Action: '%(action)s', body: %(body)s") % locals())
+ LOG.debug(_("Calling method %s") % meth)
+
# Now, deserialize the request body...
try:
if content_type:
diff --git a/nova/availability_zones.py b/nova/availability_zones.py
index cb5cce591..62c83f6ed 100644
--- a/nova/availability_zones.py
+++ b/nova/availability_zones.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""utilities for multiple APIs."""
+"""Availability zone helper functions."""
from nova import db
from nova.openstack.common import cfg
@@ -46,7 +46,7 @@ def set_availability_zones(context, services):
az = CONF.internal_service_availability_zone
if service['topic'] == "compute":
if metadata.get(service['host']):
- az = str(metadata[service['host']])[5:-2]
+ az = u','.join(list(metadata[service['host']]))
else:
az = CONF.default_availability_zone
service['availability_zone'] = az
@@ -55,7 +55,7 @@ def set_availability_zones(context, services):
def get_host_availability_zone(context, host):
metadata = db.aggregate_metadata_get_by_host(
- context.get_admin_context(), host, key='availability_zone')
+ context, host, key='availability_zone')
if 'availability_zone' in metadata:
return list(metadata['availability_zone'])[0]
else:
diff --git a/nova/compute/api.py b/nova/compute/api.py
index c7ca0640d..f6090b40c 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -946,11 +946,10 @@ class API(base.Base):
if (old['vm_state'] != vm_states.SOFT_DELETED and
old['task_state'] not in (task_states.DELETING,
task_states.SOFT_DELETING)):
- reservations = QUOTAS.reserve(context,
- project_id=project_id,
- instances=-1,
- cores=-instance['vcpus'],
- ram=-instance['memory_mb'])
+ reservations = self._create_reservations(context,
+ old,
+ updated,
+ project_id)
if not host:
# Just update database, nothing else we can do
@@ -1026,6 +1025,45 @@ class API(base.Base):
reservations,
project_id=project_id)
+ def _create_reservations(self, context, old_instance, new_instance,
+ project_id):
+ instance_vcpus = old_instance['vcpus']
+ instance_memory_mb = old_instance['memory_mb']
+ # NOTE(wangpan): if the instance is resizing, and the resources
+ # are updated to new instance type, we should use
+ # the old instance type to create reservation.
+ # see https://bugs.launchpad.net/nova/+bug/1099729 for more details
+ if old_instance['task_state'] in (task_states.RESIZE_MIGRATED,
+ task_states.RESIZE_FINISH):
+ get_migration = self.db.migration_get_by_instance_and_status
+ try:
+ migration_ref = get_migration(context.elevated(),
+ old_instance['uuid'], 'post-migrating')
+ except exception.MigrationNotFoundByStatus:
+ migration_ref = None
+ if (migration_ref and
+ new_instance['instance_type_id'] ==
+ migration_ref['new_instance_type_id']):
+ old_inst_type_id = migration_ref['old_instance_type_id']
+ get_inst_type_by_id = instance_types.get_instance_type
+ try:
+ old_inst_type = get_inst_type_by_id(old_inst_type_id)
+ except exception.InstanceTypeNotFound:
+ LOG.warning(_("instance type %(old_inst_type_id)d "
+ "not found") % locals())
+ pass
+ else:
+ instance_vcpus = old_inst_type['vcpus']
+ instance_memory_mb = old_inst_type['memory_mb']
+ LOG.debug(_("going to delete a resizing instance"))
+
+ reservations = QUOTAS.reserve(context,
+ project_id=project_id,
+ instances=-1,
+ cores=-instance_vcpus,
+ ram=-instance_memory_mb)
+ return reservations
+
def _local_delete(self, context, instance, bdms):
LOG.warning(_("instance's host %s is down, deleting from "
"database") % instance['host'], instance=instance)
@@ -1182,8 +1220,10 @@ class API(base.Base):
# NOTE(ameade): we still need to support integer ids for ec2
if uuidutils.is_uuid_like(instance_id):
instance = self.db.instance_get_by_uuid(context, instance_id)
- else:
+ elif utils.is_int_like(instance_id):
instance = self.db.instance_get(context, instance_id)
+ else:
+ raise exception.InstanceNotFound(instance_id=instance_id)
check_policy(context, 'get', instance)
@@ -1330,7 +1370,8 @@ class API(base.Base):
return image_meta
@wrap_check_policy
- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
+ @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
+ vm_states.PAUSED, vm_states.SUSPENDED])
def snapshot(self, context, instance, name, extra_properties=None,
image_id=None):
"""Snapshot the given instance.
@@ -2158,6 +2199,9 @@ class API(base.Base):
@wrap_check_policy
@check_instance_lock
+ @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
+ vm_states.SUSPENDED, vm_states.STOPPED],
+ task_state=None)
def delete_instance_metadata(self, context, instance, key):
"""Delete the given metadata item from an instance."""
self.db.instance_metadata_delete(context, instance['uuid'], key)
@@ -2169,6 +2213,9 @@ class API(base.Base):
@wrap_check_policy
@check_instance_lock
+ @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
+ vm_states.SUSPENDED, vm_states.STOPPED],
+ task_state=None)
def update_instance_metadata(self, context, instance,
metadata, delete=False):
"""Updates or creates instance metadata.
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index fa1746b92..d006ea049 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -230,7 +230,7 @@ def wrap_instance_fault(function):
with excutils.save_and_reraise_exception():
compute_utils.add_instance_fault_from_exc(context,
- kwargs['instance']['uuid'], e, sys.exc_info())
+ kwargs['instance'], e, sys.exc_info())
return decorated_function
@@ -463,6 +463,11 @@ class ComputeManager(manager.SchedulerDependentManager):
except NotImplementedError:
LOG.warning(_('Hypervisor driver does not support '
'resume guests'), instance=instance)
+ except Exception:
+ # NOTE(vish): The instance failed to resume, so we set the
+ # instance to error and attempt to continue.
+ LOG.warning(_('Failed to resume instance'), instance=instance)
+ self._set_instance_error_state(context, instance['uuid'])
elif drv_state == power_state.RUNNING:
# VMwareAPI drivers will raise an exception
@@ -730,7 +735,7 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_uuid = instance['uuid']
rescheduled = False
- compute_utils.add_instance_fault_from_exc(context, instance_uuid,
+ compute_utils.add_instance_fault_from_exc(context, instance,
exc_info[1], exc_info=exc_info)
try:
@@ -1464,7 +1469,7 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.error(_('Cannot reboot instance: %(exc)s'), locals(),
context=context, instance=instance)
compute_utils.add_instance_fault_from_exc(context,
- instance['uuid'], exc, sys.exc_info())
+ instance, exc, sys.exc_info())
# Fall through and reset task_state to None
current_power_state = self._get_power_state(context, instance)
@@ -1995,7 +2000,7 @@ class ComputeManager(manager.SchedulerDependentManager):
rescheduled = False
instance_uuid = instance['uuid']
- compute_utils.add_instance_fault_from_exc(context, instance_uuid,
+ compute_utils.add_instance_fault_from_exc(context, instance,
exc_info[0], exc_info=exc_info)
try:
@@ -2459,8 +2464,11 @@ class ComputeManager(manager.SchedulerDependentManager):
@lockutils.synchronized(instance['uuid'], 'nova-')
def do_reserve():
+ bdms = self.conductor_api.block_device_mapping_get_all_by_instance(
+ context, instance)
result = compute_utils.get_device_name_for_instance(context,
instance,
+ bdms,
device)
# NOTE(vish): create bdm here to avoid race condition
values = {'instance_uuid': instance['uuid'],
@@ -2558,7 +2566,7 @@ class ComputeManager(manager.SchedulerDependentManager):
mp)
except Exception: # pylint: disable=W0702
with excutils.save_and_reraise_exception():
- msg = _("Faild to detach volume %(volume_id)s from %(mp)s")
+ msg = _("Failed to detach volume %(volume_id)s from %(mp)s")
LOG.exception(msg % locals(), context=context,
instance=instance)
volume = self.volume_api.get(context, volume_id)
@@ -2864,9 +2872,12 @@ class ComputeManager(manager.SchedulerDependentManager):
self.network_api.migrate_instance_finish(context, instance, migration)
network_info = self._get_instance_nw_info(context, instance)
+ block_device_info = self._get_instance_volume_block_device_info(
+ context, instance)
+
self.driver.post_live_migration_at_destination(context, instance,
self._legacy_nw_info(network_info),
- block_migration)
+ block_migration, block_device_info)
# Restore instance state
current_power_state = self._get_power_state(context, instance)
instance = self._instance_update(context, instance['uuid'],
@@ -3374,10 +3385,8 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.exception(_("error during stop() in "
"sync_power_state."),
instance=db_instance)
- elif vm_power_state in (power_state.PAUSED,
- power_state.SUSPENDED):
- LOG.warn(_("Instance is paused or suspended "
- "unexpectedly. Calling "
+ elif vm_power_state == power_state.SUSPENDED:
+ LOG.warn(_("Instance is suspended unexpectedly. Calling "
"the stop API."), instance=db_instance)
try:
self.compute_api.stop(context, db_instance)
@@ -3385,6 +3394,16 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.exception(_("error during stop() in "
"sync_power_state."),
instance=db_instance)
+ elif vm_power_state == power_state.PAUSED:
+ # Note(maoy): a VM may get into the paused state not only
+ # because the user request via API calls, but also
+ # due to (temporary) external instrumentations.
+ # Before the virt layer can reliably report the reason,
+ # we simply ignore the state discrepancy. In many cases,
+ # the VM state will go back to running after the external
+ # instrumentation is done. See bug 1097806 for details.
+ LOG.warn(_("Instance is paused unexpectedly. Ignore."),
+ instance=db_instance)
elif vm_state == vm_states.STOPPED:
if vm_power_state not in (power_state.NOSTATE,
power_state.SHUTDOWN,
diff --git a/nova/compute/utils.py b/nova/compute/utils.py
index 0c475d082..2b1286e16 100644
--- a/nova/compute/utils.py
+++ b/nova/compute/utils.py
@@ -44,7 +44,7 @@ def metadata_to_dict(metadata):
return result
-def add_instance_fault_from_exc(context, instance_uuid, fault, exc_info=None):
+def add_instance_fault_from_exc(context, instance, fault, exc_info=None):
"""Adds the specified fault to the database."""
code = 500
@@ -62,15 +62,16 @@ def add_instance_fault_from_exc(context, instance_uuid, fault, exc_info=None):
details += '\n' + ''.join(traceback.format_tb(tb))
values = {
- 'instance_uuid': instance_uuid,
+ 'instance_uuid': instance['uuid'],
'code': code,
'message': unicode(message),
'details': unicode(details),
+ 'host': CONF.host
}
db.instance_fault_create(context, values)
-def get_device_name_for_instance(context, instance, device):
+def get_device_name_for_instance(context, instance, bdms, device):
"""Validates (or generates) a device name for instance.
If device is not set, it will generate a unique device appropriate
@@ -87,8 +88,6 @@ def get_device_name_for_instance(context, instance, device):
req_prefix, req_letters = block_device.match_device(device)
except (TypeError, AttributeError, ValueError):
raise exception.InvalidDevicePath(path=device)
- bdms = db.block_device_mapping_get_all_by_instance(context,
- instance['uuid'])
mappings = block_device.instance_block_mapping(instance, bdms)
try:
prefix = block_device.match_device(mappings['root'])[0]
diff --git a/nova/conductor/api.py b/nova/conductor/api.py
index 31ee19601..d05c94877 100644
--- a/nova/conductor/api.py
+++ b/nova/conductor/api.py
@@ -117,6 +117,11 @@ class LocalAPI(object):
return self._manager.instance_get_active_by_window(
context, begin, end, project_id, host)
+ def instance_get_active_by_window_joined(self, context, begin, end=None,
+ project_id=None, host=None):
+ return self._manager.instance_get_active_by_window_joined(
+ context, begin, end, project_id, host)
+
def instance_info_cache_update(self, context, instance, values):
return self._manager.instance_info_cache_update(context,
instance,
@@ -285,6 +290,9 @@ class LocalAPI(object):
return self._manager.compute_node_update(context, node, values,
prune_stats)
+ def service_update(self, context, service, values):
+ return self._manager.service_update(context, service, values)
+
class API(object):
"""Conductor API that does updates via RPC to the ConductorManager."""
@@ -366,6 +374,11 @@ class API(object):
return self.conductor_rpcapi.instance_get_active_by_window(
context, begin, end, project_id, host)
+ def instance_get_active_by_window_joined(self, context, begin, end=None,
+ project_id=None, host=None):
+ return self.conductor_rpcapi.instance_get_active_by_window_joined(
+ context, begin, end, project_id, host)
+
def instance_info_cache_update(self, context, instance, values):
return self.conductor_rpcapi.instance_info_cache_update(context,
instance, values)
@@ -548,3 +561,6 @@ class API(object):
def compute_node_update(self, context, node, values, prune_stats=False):
return self.conductor_rpcapi.compute_node_update(context, node,
values, prune_stats)
+
+ def service_update(self, context, service, values):
+ return self.conductor_rpcapi.service_update(context, service, values)
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index 9b18d1e00..87b143912 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -43,7 +43,7 @@ datetime_fields = ['launched_at', 'terminated_at']
class ConductorManager(manager.SchedulerDependentManager):
"""Mission: TBD."""
- RPC_API_VERSION = '1.33'
+ RPC_API_VERSION = '1.35'
def __init__(self, *args, **kwargs):
super(ConductorManager, self).__init__(service_name='conductor',
@@ -234,10 +234,14 @@ class ConductorManager(manager.SchedulerDependentManager):
def instance_get_active_by_window(self, context, begin, end=None,
project_id=None, host=None):
- result = self.db.instance_get_active_by_window_joined(context,
- begin, end,
- project_id,
- host)
+ result = self.db.instance_get_active_by_window(context, begin, end,
+ project_id, host)
+ return jsonutils.to_primitive(result)
+
+ def instance_get_active_by_window_joined(self, context, begin, end=None,
+ project_id=None, host=None):
+ result = self.db.instance_get_active_by_window_joined(
+ context, begin, end, project_id, host)
return jsonutils.to_primitive(result)
def instance_destroy(self, context, instance):
@@ -310,3 +314,8 @@ class ConductorManager(manager.SchedulerDependentManager):
result = self.db.compute_node_update(context, node['id'], values,
prune_stats)
return jsonutils.to_primitive(result)
+
+ @rpc_common.client_exceptions(exception.ServiceNotFound)
+ def service_update(self, context, service, values):
+ svc = self.db.service_update(context, service['id'], values)
+ return jsonutils.to_primitive(svc)
diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py
index 95e332840..1699c85ed 100644
--- a/nova/conductor/rpcapi.py
+++ b/nova/conductor/rpcapi.py
@@ -66,6 +66,8 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
1.31 - Added migration_get_in_progress_by_host_and_node
1.32 - Added optional node to instance_get_all_by_host
1.33 - Added compute_node_create and compute_node_update
+ 1.34 - Added service_update
+ 1.35 - Added instance_get_active_by_window_joined
"""
BASE_RPC_API_VERSION = '1.0'
@@ -240,6 +242,13 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
host=host)
return self.call(context, msg, version='1.15')
+ def instance_get_active_by_window_joined(self, context, begin, end=None,
+ project_id=None, host=None):
+ msg = self.make_msg('instance_get_active_by_window_joined',
+ begin=begin, end=end, project_id=project_id,
+ host=host)
+ return self.call(context, msg, version='1.35')
+
def instance_destroy(self, context, instance):
instance_p = jsonutils.to_primitive(instance)
msg = self.make_msg('instance_destroy', instance=instance_p)
@@ -316,3 +325,8 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
msg = self.make_msg('compute_node_update', node=node_p, values=values,
prune_stats=prune_stats)
return self.call(context, msg, version='1.33')
+
+ def service_update(self, context, service, values):
+ service_p = jsonutils.to_primitive(service)
+ msg = self.make_msg('service_update', service=service_p, values=values)
+ return self.call(context, msg, version='1.34')
diff --git a/nova/crypto.py b/nova/crypto.py
index 68d25e650..5c48c60b6 100644
--- a/nova/crypto.py
+++ b/nova/crypto.py
@@ -135,13 +135,14 @@ def generate_fingerprint(public_key):
raise exception.InvalidKeypair()
-def generate_key_pair(bits=1024):
- # what is the magic 65537?
-
+def generate_key_pair(bits=None):
with utils.tempdir() as tmpdir:
keyfile = os.path.join(tmpdir, 'temp')
- utils.execute('ssh-keygen', '-q', '-b', bits, '-N', '',
- '-t', 'rsa', '-f', keyfile, '-C', 'Generated by Nova')
+ args = ['ssh-keygen', '-q', '-N', '', '-t', 'rsa',
+ '-f', keyfile, '-C', 'Generated by Nova']
+ if bits is not None:
+ args.extend(['-b', bits])
+ utils.execute(*args)
fingerprint = _generate_fingerprint('%s.pub' % (keyfile))
if not os.path.exists(keyfile):
raise exception.FileNotFound(keyfile)
diff --git a/nova/db/api.py b/nova/db/api.py
index ecfcfab15..d8a16c52d 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -497,6 +497,11 @@ def fixed_ip_get_by_address_detailed(context, address):
return IMPL.fixed_ip_get_by_address_detailed(context, address)
+def fixed_ip_get_by_floating_address(context, floating_address):
+ """Get a fixed ip by a floating address."""
+ return IMPL.fixed_ip_get_by_floating_address(context, floating_address)
+
+
def fixed_ip_get_by_instance(context, instance_uuid):
"""Get fixed ips by instance or raise if none exist."""
return IMPL.fixed_ip_get_by_instance(context, instance_uuid)
@@ -754,12 +759,13 @@ def instance_info_cache_update(context, instance_uuid, values,
:param values: = dict containing column values to update
"""
rv = IMPL.instance_info_cache_update(context, instance_uuid, values)
- try:
- cells_rpcapi.CellsAPI().instance_info_cache_update_at_top(context,
- rv)
- except Exception:
- LOG.exception(_("Failed to notify cells of instance info cache "
- "update"))
+ if update_cells:
+ try:
+ cells_rpcapi.CellsAPI().instance_info_cache_update_at_top(
+ context, rv)
+ except Exception:
+ LOG.exception(_("Failed to notify cells of instance info "
+ "cache update"))
return rv
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index cb3d69f78..5317487cd 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -448,6 +448,7 @@ def service_update(context, service_id, values):
service_ref = service_get(context, service_id, session=session)
service_ref.update(values)
service_ref.save(session=session)
+ return service_ref
###################
@@ -890,15 +891,12 @@ def _floating_ip_get_by_address(context, address, session=None):
@require_context
def floating_ip_get_by_fixed_address(context, fixed_address):
- subq = model_query(context, models.FixedIp.id).\
- filter_by(address=fixed_address).\
- limit(1).\
- subquery()
return model_query(context, models.FloatingIp).\
- filter_by(fixed_ip_id=subq.as_scalar()).\
- all()
-
- # NOTE(tr3buchet) please don't invent an exception here, empty list is fine
+ outerjoin(models.FixedIp,
+ models.FixedIp.id ==
+ models.FloatingIp.fixed_ip_id).\
+ filter(models.FixedIp.address == fixed_address).\
+ all()
@require_context
@@ -1195,6 +1193,17 @@ def fixed_ip_get_by_address_detailed(context, address, session=None):
@require_context
+def fixed_ip_get_by_floating_address(context, floating_address):
+ return model_query(context, models.FixedIp).\
+ outerjoin(models.FloatingIp,
+ models.FloatingIp.fixed_ip_id ==
+ models.FixedIp.id).\
+ filter(models.FloatingIp.address == floating_address).\
+ first()
+ # NOTE(tr3buchet) please don't invent an exception here, empty list is fine
+
+
+@require_context
def fixed_ip_get_by_instance(context, instance_uuid):
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
@@ -3657,7 +3666,7 @@ def instance_type_destroy(context, name):
@require_context
def _instance_type_access_query(context, session=None):
return model_query(context, models.InstanceTypeProjects, session=session,
- read_deleted="yes")
+ read_deleted="no")
@require_admin_context
@@ -3673,6 +3682,8 @@ def instance_type_access_get_by_flavor_id(context, flavor_id):
@require_admin_context
def instance_type_access_add(context, flavor_id, project_id):
"""Add given tenant to the flavor access list."""
+ # NOTE(boris-42): There is a race condition in this method and it will be
+ # rewritten after bp/db-unique-keys implementation.
session = get_session()
with session.begin():
instance_type_ref = instance_type_get_by_flavor_id(context, flavor_id,
@@ -3680,21 +3691,16 @@ def instance_type_access_add(context, flavor_id, project_id):
instance_type_id = instance_type_ref['id']
access_ref = _instance_type_access_query(context, session=session).\
filter_by(instance_type_id=instance_type_id).\
- filter_by(project_id=project_id).first()
-
- if not access_ref:
- access_ref = models.InstanceTypeProjects()
- access_ref.instance_type_id = instance_type_id
- access_ref.project_id = project_id
- access_ref.save(session=session)
- elif access_ref.deleted:
- access_ref.update({'deleted': False,
- 'deleted_at': None})
- access_ref.save(session=session)
- else:
+ filter_by(project_id=project_id).\
+ first()
+ if access_ref:
raise exception.FlavorAccessExists(flavor_id=flavor_id,
project_id=project_id)
+ access_ref = models.InstanceTypeProjects()
+ access_ref.update({"instance_type_id": instance_type_id,
+ "project_id": project_id})
+ access_ref.save(session=session)
return access_ref
@@ -3710,7 +3716,6 @@ def instance_type_access_remove(context, flavor_id, project_id):
filter_by(instance_type_id=instance_type_id).\
filter_by(project_id=project_id).\
soft_delete()
-
if count == 0:
raise exception.FlavorAccessNotFound(flavor_id=flavor_id,
project_id=project_id)
@@ -4078,21 +4083,42 @@ def instance_type_extra_specs_get_item(context, flavor_id, key,
@require_context
-def instance_type_extra_specs_update_or_create(context, flavor_id,
- specs):
+def instance_type_extra_specs_update_or_create(context, flavor_id, specs):
+ # NOTE(boris-42): There is a race condition in this method. We should add
+ # UniqueConstraint on (instance_type_id, key, deleted) to
+ # avoid duplicated instance_type_extra_specs. This will be
+ # possible after bp/db-unique-keys implementation.
session = get_session()
- spec_ref = None
- instance_type = instance_type_get_by_flavor_id(context, flavor_id)
- for key, value in specs.iteritems():
- try:
- spec_ref = instance_type_extra_specs_get_item(
- context, flavor_id, key, session)
- except exception.InstanceTypeExtraSpecsNotFound:
+ with session.begin():
+ instance_type_id = model_query(context, models.InstanceTypes.id,
+ session=session, read_deleted="no").\
+ filter(models.InstanceTypes.flavorid == flavor_id).\
+ first()
+ if not instance_type_id:
+ raise exception.FlavorNotFound(flavor_id=flavor_id)
+
+ instance_type_id = instance_type_id.id
+
+ spec_refs = model_query(context, models.InstanceTypeExtraSpecs,
+ session=session, read_deleted="no").\
+ filter_by(instance_type_id=instance_type_id).\
+ filter(models.InstanceTypeExtraSpecs.key.in_(specs.keys())).\
+ all()
+
+ existing_keys = set()
+ for spec_ref in spec_refs:
+ key = spec_ref["key"]
+ existing_keys.add(key)
+ spec_ref.update({"value": specs[key]})
+
+ for key, value in specs.iteritems():
+ if key in existing_keys:
+ continue
spec_ref = models.InstanceTypeExtraSpecs()
- spec_ref.update({"key": key, "value": value,
- "instance_type_id": instance_type["id"],
- "deleted": False})
- spec_ref.save(session=session)
+ spec_ref.update({"key": key, "value": value,
+ "instance_type_id": instance_type_id})
+ session.add(spec_ref)
+
return specs
@@ -4422,28 +4448,33 @@ def aggregate_metadata_get_item(context, aggregate_id, key, session=None):
@require_admin_context
@require_aggregate_exists
def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False):
+ # NOTE(boris-42): There is a race condition in this method. We should add
+ # UniqueConstraint on (start_period, uuid, mac, deleted) to
+ # avoid duplicated aggregate_metadata. This will be
+ # possible after bp/db-unique-keys implementation.
session = get_session()
all_keys = metadata.keys()
with session.begin():
query = aggregate_metadata_get_query(context, aggregate_id,
+ read_deleted='no',
session=session)
if set_delete:
query.filter(~models.AggregateMetadata.key.in_(all_keys)).\
soft_delete(synchronize_session=False)
query = query.filter(models.AggregateMetadata.key.in_(all_keys))
- already_existing_keys = []
+ already_existing_keys = set()
for meta_ref in query.all():
key = meta_ref.key
- meta_ref.update({"value": metadata[key],
- "deleted": False,
- "deleted_at": None})
- already_existing_keys.append(key)
+ meta_ref.update({"value": metadata[key]})
+ already_existing_keys.add(key)
- for key in set(all_keys) - set(already_existing_keys):
+ for key, value in metadata.iteritems():
+ if key in already_existing_keys:
+ continue
meta_ref = models.AggregateMetadata()
meta_ref.update({"key": key,
- "value": metadata[key],
+ "value": value,
"aggregate_id": aggregate_id})
session.add(meta_ref)
@@ -4477,25 +4508,24 @@ def aggregate_host_delete(context, aggregate_id, host):
@require_admin_context
@require_aggregate_exists
def aggregate_host_add(context, aggregate_id, host):
+ # NOTE(boris-42): There is a race condition in this method and it will be
+ # rewritten after bp/db-unique-keys implementation.
session = get_session()
- host_ref = _aggregate_get_query(context,
- models.AggregateHost,
- models.AggregateHost.aggregate_id,
- aggregate_id,
- session=session,
- read_deleted='yes').\
- filter_by(host=host).first()
- if not host_ref:
+ with session.begin():
+ host_ref = _aggregate_get_query(context,
+ models.AggregateHost,
+ models.AggregateHost.aggregate_id,
+ aggregate_id,
+ session=session,
+ read_deleted='no').\
+ filter_by(host=host).\
+ first()
+ if host_ref:
+ raise exception.AggregateHostExists(host=host,
+ aggregate_id=aggregate_id)
host_ref = models.AggregateHost()
- values = {"host": host, "aggregate_id": aggregate_id, }
- host_ref.update(values)
+ host_ref.update({"host": host, "aggregate_id": aggregate_id})
host_ref.save(session=session)
- elif host_ref.deleted:
- host_ref.update({'deleted': False, 'deleted_at': None})
- host_ref.save(session=session)
- else:
- raise exception.AggregateHostExists(host=host,
- aggregate_id=aggregate_id)
return host_ref
@@ -4699,49 +4729,44 @@ def _ec2_instance_get_query(context, session=None):
@require_admin_context
-def task_log_get(context, task_name, period_beginning,
- period_ending, host, state=None, session=None):
+def _task_log_get_query(context, task_name, period_beginning,
+ period_ending, host=None, state=None, session=None):
query = model_query(context, models.TaskLog, session=session).\
filter_by(task_name=task_name).\
filter_by(period_beginning=period_beginning).\
- filter_by(period_ending=period_ending).\
- filter_by(host=host)
+ filter_by(period_ending=period_ending)
+ if host is not None:
+ query = query.filter_by(host=host)
if state is not None:
query = query.filter_by(state=state)
+ return query
- return query.first()
+
+@require_admin_context
+def task_log_get(context, task_name, period_beginning, period_ending, host,
+ state=None):
+ return _task_log_get_query(task_name, period_beginning, period_ending,
+ host, state).first()
@require_admin_context
-def task_log_get_all(context, task_name, period_beginning,
- period_ending, host=None, state=None, session=None):
- query = model_query(context, models.TaskLog, session=session).\
- filter_by(task_name=task_name).\
- filter_by(period_beginning=period_beginning).\
- filter_by(period_ending=period_ending)
- if host is not None:
- query = query.filter_by(host=host)
- if state is not None:
- query = query.filter_by(state=state)
- return query.all()
+def task_log_get_all(context, task_name, period_beginning, period_ending,
+ host=None, state=None):
+ return _task_log_get_query(task_name, period_beginning, period_ending,
+ host, state).all()
@require_admin_context
-def task_log_begin_task(context, task_name,
- period_beginning,
- period_ending,
- host,
- task_items=None,
- message=None,
- session=None):
- session = session or get_session()
+def task_log_begin_task(context, task_name, period_beginning, period_ending,
+ host, task_items=None, message=None):
+ # NOTE(boris-42): This method has a race condition and will be rewritten
+ # after bp/db-unique-keys implementation.
+ session = get_session()
with session.begin():
- task = task_log_get(context, task_name,
- period_beginning,
- period_ending,
- host,
- session=session)
- if task:
+ task_ref = _task_log_get_query(context, task_name, period_beginning,
+ period_ending, host, session=session).\
+ first()
+ if task_ref:
#It's already run(ning)!
raise exception.TaskAlreadyRunning(task_name=task_name, host=host)
task = models.TaskLog()
@@ -4755,30 +4780,20 @@ def task_log_begin_task(context, task_name,
if task_items:
task.task_items = task_items
task.save(session=session)
- return task
@require_admin_context
-def task_log_end_task(context, task_name,
- period_beginning,
- period_ending,
- host,
- errors,
- message=None,
- session=None):
- session = session or get_session()
+def task_log_end_task(context, task_name, period_beginning, period_ending,
+ host, errors, message=None):
+ values = dict(state="DONE", errors=errors)
+ if message:
+ values["message"] = message
+
+ session = get_session()
with session.begin():
- task = task_log_get(context, task_name,
- period_beginning,
- period_ending,
- host,
- session=session)
- if not task:
+ rows = _task_log_get_query(context, task_name, period_beginning,
+ period_ending, host, session=session).\
+ update(values)
+ if rows == 0:
#It's not running!
raise exception.TaskNotRunning(task_name=task_name, host=host)
- task.state = "DONE"
- if message:
- task.message = message
- task.errors = errors
- task.save(session=session)
- return task
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/150_add_host_to_instance_faults.py b/nova/db/sqlalchemy/migrate_repo/versions/150_add_host_to_instance_faults.py
new file mode 100644
index 000000000..3fd87e1e1
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/150_add_host_to_instance_faults.py
@@ -0,0 +1,36 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Column, Index, MetaData, String, Table
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ instance_faults = Table('instance_faults', meta, autoload=True)
+ host = Column('host', String(length=255))
+ instance_faults.create_column(host)
+ Index('instance_faults_host_idx', instance_faults.c.host).create(
+ migrate_engine)
+
+
+def downgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ instance_faults = Table('instance_faults', meta, autoload=True)
+ instance_faults.drop_column('host')
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/151_change_task_log_column_type.py b/nova/db/sqlalchemy/migrate_repo/versions/151_change_task_log_column_type.py
new file mode 100644
index 000000000..44c3aa41f
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/151_change_task_log_column_type.py
@@ -0,0 +1,52 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (C) 2013 Wenhao Xu <xuwenhao2008@gmail.com>.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import MetaData, String, Table, DateTime
+from sqlalchemy.dialects import postgresql
+
+
+def upgrade(migrate_engine):
+ """Convert period_beginning and period_ending to DateTime."""
+ meta = MetaData()
+ meta.bind = migrate_engine
+ dialect = migrate_engine.url.get_dialect()
+
+ if dialect is postgresql.dialect:
+ # We need to handle postresql specially.
+ # Can't use migrate's alter() because it does not support
+ # explicit casting
+ for column in ('period_beginning', 'period_ending'):
+ migrate_engine.execute(
+ "ALTER TABLE task_log "
+ "ALTER COLUMN %s TYPE TIMESTAMP WITHOUT TIME ZONE "
+ "USING %s::TIMESTAMP WITHOUT TIME ZONE"
+ % (column, column))
+ else:
+ migrations = Table('task_log', meta, autoload=True)
+ migrations.c.period_beginning.alter(DateTime)
+ migrations.c.period_ending.alter(DateTime)
+
+
+def downgrade(migrate_engine):
+ """Convert columns back to String(255)."""
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ # don't need to handle postgresql here.
+ migrations = Table('task_log', meta, autoload=True)
+ migrations.c.period_beginning.alter(String(255))
+ migrations.c.period_ending.alter(String(255))
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 56a4d944a..baa966dbc 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -992,6 +992,7 @@ class InstanceFault(BASE, NovaBase):
code = Column(Integer(), nullable=False)
message = Column(String(255))
details = Column(Text)
+ host = Column(String(255))
class InstanceAction(BASE, NovaBase):
@@ -1037,8 +1038,8 @@ class TaskLog(BASE, NovaBase):
task_name = Column(String(255), nullable=False)
state = Column(String(255), nullable=False)
host = Column(String(255))
- period_beginning = Column(String(255), default=timeutils.utcnow)
- period_ending = Column(String(255), default=timeutils.utcnow)
+ period_beginning = Column(DateTime, default=timeutils.utcnow)
+ period_ending = Column(DateTime, default=timeutils.utcnow)
message = Column(String(255), nullable=False)
task_items = Column(Integer(), default=0)
errors = Column(Integer(), default=0)
diff --git a/nova/exception.py b/nova/exception.py
index 1af92cd08..c15fc1e43 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -530,6 +530,10 @@ class PortNotUsable(NovaException):
message = _("Port %(port_id)s not usable for instance %(instance)s.")
+class PortNotFree(NovaException):
+ message = _("No free port available for instance %(instance)s.")
+
+
class FixedIpNotFound(NotFound):
message = _("No fixed IP associated with id %(id)s.")
diff --git a/nova/image/glance.py b/nova/image/glance.py
index 75551d35c..1a6bba62f 100644
--- a/nova/image/glance.py
+++ b/nova/image/glance.py
@@ -22,6 +22,7 @@ from __future__ import absolute_import
import copy
import itertools
import random
+import shutil
import sys
import time
import urlparse
@@ -58,7 +59,12 @@ glance_opts = [
cfg.IntOpt('glance_num_retries',
default=0,
help='Number retries when downloading an image from glance'),
-]
+ cfg.ListOpt('allowed_direct_url_schemes',
+ default=[],
+ help='A list of url scheme that can be downloaded directly '
+ 'via the direct_url. Currently supported schemes: '
+ '[file].'),
+ ]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@@ -254,6 +260,18 @@ class GlanceImageService(object):
def download(self, context, image_id, data):
"""Calls out to Glance for metadata and data and writes data."""
+ if 'file' in CONF.allowed_direct_url_schemes:
+ location = self.get_location(context, image_id)
+ o = urlparse.urlparse(location)
+ if o.scheme == "file":
+ with open(o.path, "r") as f:
+ # FIXME(jbresnah) a system call to cp could have
+ # significant performance advantages, however we
+ # do not have the path to files at this point in
+ # the abstraction.
+ shutil.copyfileobj(f, data)
+ return
+
try:
image_chunks = self._client.call(context, 1, 'data', image_id)
except Exception:
diff --git a/nova/network/api.py b/nova/network/api.py
index 976be93ed..5e3762e89 100644
--- a/nova/network/api.py
+++ b/nova/network/api.py
@@ -111,7 +111,7 @@ class API(base.Base):
return self.network_rpcapi.get_floating_ip(context, id)
def get_floating_ip_pools(self, context):
- return self.network_rpcapi.get_floating_pools(context)
+ return self.network_rpcapi.get_floating_ip_pools(context)
def get_floating_ip_by_address(self, context, address):
return self.network_rpcapi.get_floating_ip_by_address(context, address)
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index e6abde609..4fefb2db4 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -371,19 +371,32 @@ class IptablesManager(object):
s += [('ip6tables', self.ipv6)]
for cmd, tables in s:
+ all_tables, _err = self.execute('%s-save' % (cmd,), '-c',
+ run_as_root=True,
+ attempts=5)
+ all_lines = all_tables.split('\n')
for table in tables:
- current_table, _err = self.execute('%s-save' % (cmd,), '-c',
- '-t', '%s' % (table,),
- run_as_root=True,
- attempts=5)
- current_lines = current_table.split('\n')
- new_filter = self._modify_rules(current_lines,
- tables[table])
- self.execute('%s-restore' % (cmd,), '-c', run_as_root=True,
- process_input='\n'.join(new_filter),
- attempts=5)
+ start, end = self._find_table(all_lines, table)
+ all_lines[start:end] = self._modify_rules(
+ all_lines[start:end], tables[table])
+ self.execute('%s-restore' % (cmd,), '-c', run_as_root=True,
+ process_input='\n'.join(all_lines),
+ attempts=5)
LOG.debug(_("IPTablesManager.apply completed with success"))
+ def _find_table(self, lines, table_name):
+ if len(lines) < 3:
+ # length only <2 when fake iptables
+ return (0, 0)
+ try:
+ start = lines.index('*%s' % table_name) - 1
+ except ValueError:
+ # Couldn't find table_name
+ # For Unit Tests
+ return (0, 0)
+ end = lines[start:].index('COMMIT') + start + 2
+ return (start, end)
+
def _modify_rules(self, current_lines, table, binary=None):
unwrapped_chains = table.unwrapped_chains
chains = table.chains
@@ -1150,7 +1163,7 @@ class LinuxNetInterfaceDriver(object):
raise NotImplementedError()
def unplug(self, network):
- """Destory Linux device, return device name."""
+ """Destroy Linux device, return device name."""
raise NotImplementedError()
def get_dev(self, network):
@@ -1390,7 +1403,7 @@ def remove_ebtables_rules(rules):
def isolate_dhcp_address(interface, address):
- # block arp traffic to address accross the interface
+ # block arp traffic to address across the interface
rules = []
rules.append('INPUT -p ARP -i %s --arp-ip-dst %s -j DROP'
% (interface, address))
@@ -1406,7 +1419,7 @@ def isolate_dhcp_address(interface, address):
ipv4_filter.add_rule('FORWARD',
'-m physdev --physdev-out %s -d 255.255.255.255 '
'-p udp --dport 67 -j DROP' % interface, top=True)
- # block ip traffic to address accross the interface
+ # block ip traffic to address across the interface
ipv4_filter.add_rule('FORWARD',
'-m physdev --physdev-in %s -d %s -j DROP'
% (interface, address), top=True)
@@ -1416,7 +1429,7 @@ def isolate_dhcp_address(interface, address):
def remove_isolate_dhcp_address(interface, address):
- # block arp traffic to address accross the interface
+ # block arp traffic to address across the interface
rules = []
rules.append('INPUT -p ARP -i %s --arp-ip-dst %s -j DROP'
% (interface, address))
@@ -1432,7 +1445,7 @@ def remove_isolate_dhcp_address(interface, address):
ipv4_filter.remove_rule('FORWARD',
'-m physdev --physdev-out %s -d 255.255.255.255 '
'-p udp --dport 67 -j DROP' % interface, top=True)
- # block ip traffic to address accross the interface
+ # block ip traffic to address across the interface
ipv4_filter.remove_rule('FORWARD',
'-m physdev --physdev-in %s -d %s -j DROP'
% (interface, address), top=True)
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 7b69c7a36..9ca7680a5 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -147,9 +147,6 @@ network_opts = [
cfg.BoolOpt('auto_assign_floating_ip',
default=False,
help='Autoassigning floating ip to VM'),
- cfg.StrOpt('network_host',
- default=socket.getfqdn(),
- help='Network host to use for ip allocation in flat modes'),
cfg.BoolOpt('fake_network',
default=False,
help='If passed, use fake network devices and addresses'),
@@ -482,7 +479,7 @@ class FloatingIP(object):
@wrap_check_policy
def deallocate_floating_ip(self, context, address,
affect_auto_assigned=False):
- """Returns an floating ip to the pool."""
+ """Returns a floating ip to the pool."""
floating_ip = self.db.floating_ip_get_by_address(context, address)
# handle auto_assigned
@@ -681,7 +678,7 @@ class FloatingIP(object):
# actually remove the ip address on the host. We are
# safe from races on this host due to the decorator,
# but another host might grab the ip right away. We
- # don't worry about this case because the miniscule
+ # don't worry about this case because the minuscule
# window where the ip is on both hosts shouldn't cause
# any problems.
fixed_address = self.db.floating_ip_disassociate(context, address)
@@ -711,6 +708,13 @@ class FloatingIP(object):
@wrap_check_policy
def get_floating_pools(self, context):
"""Returns list of floating pools."""
+ # NOTE(maurosr) This method should be removed in future, replaced by
+ # get_floating_ip_pools. See bug #1091668
+ return self.get_floating_ip_pools(context)
+
+ @wrap_check_policy
+ def get_floating_ip_pools(self, context):
+ """Returns list of floating ip pools."""
pools = self.db.floating_ip_get_pools(context)
return [dict(pool.iteritems()) for pool in pools]
@@ -1926,21 +1930,11 @@ class NetworkManager(manager.SchedulerDependentManager):
def get_instance_id_by_floating_address(self, context, address):
"""Returns the instance id a floating ip's fixed ip is allocated to."""
- floating_ip = self.db.floating_ip_get_by_address(context, address)
- if floating_ip['fixed_ip_id'] is None:
+ fixed_ip = self.db.fixed_ip_get_by_floating_address(context, address)
+ if fixed_ip is None:
return None
-
- fixed_ip = self.db.fixed_ip_get(context, floating_ip['fixed_ip_id'])
-
- # NOTE(tr3buchet): this can be None
- # NOTE(mikal): we need to return the instance id here because its used
- # by ec2 (and possibly others)
- uuid = fixed_ip['instance_uuid']
- if not uuid:
- return uuid
-
- instance = self.db.instance_get_by_uuid(context, uuid)
- return instance['id']
+ else:
+ return fixed_ip['instance_uuid']
@wrap_check_policy
def get_network(self, context, network_uuid):
@@ -2078,6 +2072,13 @@ class FlatManager(NetworkManager):
@wrap_check_policy
def get_floating_pools(self, context):
"""Returns list of floating pools."""
+ # NOTE(maurosr) This method should be removed in future, replaced by
+ # get_floating_ip_pools. See bug #1091668
+ return {}
+
+ @wrap_check_policy
+ def get_floating_ip_pools(self, context):
+ """Returns list of floating ip pools."""
return {}
@wrap_check_policy
diff --git a/nova/network/model.py b/nova/network/model.py
index e4fe0d54c..0771156c1 100644
--- a/nova/network/model.py
+++ b/nova/network/model.py
@@ -250,7 +250,7 @@ class VIF(Model):
'meta': {...}}]
"""
if self['network']:
- # remove unecessary fields on fixed_ips
+ # remove unnecessary fields on fixed_ips
ips = [IP(**ensure_string_keys(ip)) for ip in self.fixed_ips()]
for ip in ips:
# remove floating ips from IP, since this is a flat structure
diff --git a/nova/network/quantumv2/api.py b/nova/network/quantumv2/api.py
index 0deb3a4bb..29e5e2f06 100644
--- a/nova/network/quantumv2/api.py
+++ b/nova/network/quantumv2/api.py
@@ -111,9 +111,19 @@ class API(base.Base):
:param macs: None or a set of MAC addresses that the instance
should use. macs is supplied by the hypervisor driver (contrast
with requested_networks which is user supplied).
- NB: QuantumV2 does not yet honour mac address limits.
+ NB: QuantumV2 currently assigns hypervisor supplied MAC addresses
+ to arbitrary networks, which requires openflow switches to
+ function correctly if more than one network is being used with
+ the bare metal hypervisor (which is the only one known to limit
+ MAC addresses).
"""
hypervisor_macs = kwargs.get('macs', None)
+ available_macs = None
+ if hypervisor_macs is not None:
+ # Make a copy we can mutate: records macs that have not been used
+ # to create a port on a network. If we find a mac with a
+ # pre-allocated port we also remove it from this set.
+ available_macs = set(hypervisor_macs)
quantum = quantumv2.get_client(context)
LOG.debug(_('allocate_for_instance() for %s'),
instance['display_name'])
@@ -133,6 +143,12 @@ class API(base.Base):
if port['mac_address'] not in hypervisor_macs:
raise exception.PortNotUsable(port_id=port_id,
instance=instance['display_name'])
+ else:
+ # Don't try to use this MAC if we need to create a
+ # port on the fly later. Identical MACs may be
+ # configured by users into multiple ports so we
+ # discard rather than popping.
+ available_macs.discard(port['mac_address'])
network_id = port['network_id']
ports[network_id] = port
elif fixed_ip:
@@ -141,7 +157,6 @@ class API(base.Base):
nets = self._get_available_networks(context, instance['project_id'],
net_ids)
-
touched_port_ids = []
created_port_ids = []
for network in nets:
@@ -161,6 +176,12 @@ class API(base.Base):
port_req_body['port']['network_id'] = network_id
port_req_body['port']['admin_state_up'] = True
port_req_body['port']['tenant_id'] = instance['project_id']
+ if available_macs is not None:
+ if not available_macs:
+ raise exception.PortNotFree(
+ instance=instance['display_name'])
+ mac_address = available_macs.pop()
+ port_req_body['port']['mac_address'] = mac_address
created_port_ids.append(
quantum.create_port(port_req_body)['port']['id'])
except Exception:
@@ -217,11 +238,62 @@ class API(base.Base):
def add_fixed_ip_to_instance(self, context, instance, network_id):
"""Add a fixed ip to the instance from specified network."""
- raise NotImplementedError()
+ search_opts = {'network_id': network_id}
+ data = quantumv2.get_client(context).list_subnets(**search_opts)
+ ipam_subnets = data.get('subnets', [])
+ if not ipam_subnets:
+ raise exception.NetworkNotFoundForInstance(
+ instance_id=instance['uuid'])
+
+ zone = 'compute:%s' % instance['availability_zone']
+ search_opts = {'device_id': instance['uuid'],
+ 'device_owner': zone,
+ 'network_id': network_id}
+ data = quantumv2.get_client(context).list_ports(**search_opts)
+ ports = data['ports']
+ for p in ports:
+ fixed_ips = p['fixed_ips']
+ for subnet in ipam_subnets:
+ fixed_ip = {'subnet_id': subnet['id']}
+ fixed_ips.append(fixed_ip)
+ port_req_body = {'port': {'fixed_ips': fixed_ips}}
+ try:
+ quantumv2.get_client(context).update_port(p['id'],
+ port_req_body)
+ except Exception as ex:
+ msg = _("Unable to update port %(portid)s with"
+ " failure: %(exception)s")
+ LOG.debug(msg, {'portid': p['id'], 'exception': ex})
+ return
+ raise exception.NetworkNotFoundForInstance(
+ instance_id=instance['uuid'])
def remove_fixed_ip_from_instance(self, context, instance, address):
"""Remove a fixed ip from the instance."""
- raise NotImplementedError()
+ zone = 'compute:%s' % instance['availability_zone']
+ search_opts = {'device_id': instance['uuid'],
+ 'device_owner': zone,
+ 'fixed_ips': 'ip_address=%s' % address}
+ data = quantumv2.get_client(context).list_ports(**search_opts)
+ ports = data['ports']
+ for p in ports:
+ fixed_ips = p['fixed_ips']
+ new_fixed_ips = []
+ for fixed_ip in fixed_ips:
+ if fixed_ip['ip_address'] != address:
+ new_fixed_ips.append(fixed_ip)
+ port_req_body = {'port': {'fixed_ips': new_fixed_ips}}
+ try:
+ quantumv2.get_client(context).update_port(p['id'],
+ port_req_body)
+ except Exception as ex:
+ msg = _("Unable to update port %(portid)s with"
+ " failure: %(exception)s")
+ LOG.debug(msg, {'portid': p['id'], 'exception': ex})
+ return
+
+ raise exception.FixedIpNotFoundForSpecificInstance(
+ instance_uuid=instance['uuid'], ip=address)
def validate_networks(self, context, requested_networks):
"""Validate that the tenant can use the requested networks."""
diff --git a/nova/network/rpcapi.py b/nova/network/rpcapi.py
index 2f52add57..a7bffe17a 100644
--- a/nova/network/rpcapi.py
+++ b/nova/network/rpcapi.py
@@ -45,6 +45,7 @@ class NetworkAPI(rpc_proxy.RpcProxy):
1.4 - Add get_backdoor_port()
1.5 - Adds associate
1.6 - Adds instance_uuid to _{dis,}associate_floating_ip
+ 1.7 - Adds method get_floating_ip_pools to replace get_floating_pools
'''
#
@@ -94,8 +95,9 @@ class NetworkAPI(rpc_proxy.RpcProxy):
def get_floating_ip(self, ctxt, id):
return self.call(ctxt, self.make_msg('get_floating_ip', id=id))
- def get_floating_pools(self, ctxt):
- return self.call(ctxt, self.make_msg('get_floating_pools'))
+ def get_floating_ip_pools(self, ctxt):
+ return self.call(ctxt, self.make_msg('get_floating_ip_pools'),
+ version="1.7")
def get_floating_ip_by_address(self, ctxt, address):
return self.call(ctxt, self.make_msg('get_floating_ip_by_address',
diff --git a/nova/openstack/common/cfg.py b/nova/openstack/common/cfg.py
index ad1f2a8a6..534a610c0 100644
--- a/nova/openstack/common/cfg.py
+++ b/nova/openstack/common/cfg.py
@@ -217,7 +217,7 @@ log files::
...
]
-This module also contains a global instance of the CommonConfigOpts class
+This module also contains a global instance of the ConfigOpts class
in order to support a common usage pattern in OpenStack::
from nova.openstack.common import cfg
@@ -236,10 +236,11 @@ in order to support a common usage pattern in OpenStack::
Positional command line arguments are supported via a 'positional' Opt
constructor argument::
- >>> CONF.register_cli_opt(MultiStrOpt('bar', positional=True))
+ >>> conf = ConfigOpts()
+ >>> conf.register_cli_opt(MultiStrOpt('bar', positional=True))
True
- >>> CONF(['a', 'b'])
- >>> CONF.bar
+ >>> conf(['a', 'b'])
+ >>> conf.bar
['a', 'b']
It is also possible to use argparse "sub-parsers" to parse additional
@@ -249,10 +250,11 @@ command line arguments using the SubCommandOpt class:
... list_action = subparsers.add_parser('list')
... list_action.add_argument('id')
...
- >>> CONF.register_cli_opt(SubCommandOpt('action', handler=add_parsers))
+ >>> conf = ConfigOpts()
+ >>> conf.register_cli_opt(SubCommandOpt('action', handler=add_parsers))
True
- >>> CONF(['list', '10'])
- >>> CONF.action.name, CONF.action.id
+ >>> conf(args=['list', '10'])
+ >>> conf.action.name, conf.action.id
('list', '10')
"""
@@ -1726,62 +1728,4 @@ class ConfigOpts(collections.Mapping):
return value
-class CommonConfigOpts(ConfigOpts):
-
- DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
- DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
-
- common_cli_opts = [
- BoolOpt('debug',
- short='d',
- default=False,
- help='Print debugging output'),
- BoolOpt('verbose',
- short='v',
- default=False,
- help='Print more verbose output'),
- ]
-
- logging_cli_opts = [
- StrOpt('log-config',
- metavar='PATH',
- help='If this option is specified, the logging configuration '
- 'file specified is used and overrides any other logging '
- 'options specified. Please see the Python logging module '
- 'documentation for details on logging configuration '
- 'files.'),
- StrOpt('log-format',
- default=DEFAULT_LOG_FORMAT,
- metavar='FORMAT',
- help='A logging.Formatter log message format string which may '
- 'use any of the available logging.LogRecord attributes. '
- 'Default: %(default)s'),
- StrOpt('log-date-format',
- default=DEFAULT_LOG_DATE_FORMAT,
- metavar='DATE_FORMAT',
- help='Format string for %%(asctime)s in log records. '
- 'Default: %(default)s'),
- StrOpt('log-file',
- metavar='PATH',
- deprecated_name='logfile',
- help='(Optional) Name of log file to output to. '
- 'If not set, logging will go to stdout.'),
- StrOpt('log-dir',
- deprecated_name='logdir',
- help='(Optional) The directory to keep log files in '
- '(will be prepended to --log-file)'),
- BoolOpt('use-syslog',
- default=False,
- help='Use syslog for logging.'),
- StrOpt('syslog-log-facility',
- default='LOG_USER',
- help='syslog facility to receive log lines')
- ]
-
- def __init__(self):
- super(CommonConfigOpts, self).__init__()
- self.register_cli_opts(self.common_cli_opts)
- self.register_cli_opts(self.logging_cli_opts)
-
-
-CONF = CommonConfigOpts()
+CONF = ConfigOpts()
diff --git a/nova/openstack/common/eventlet_backdoor.py b/nova/openstack/common/eventlet_backdoor.py
index f18e84f6d..118385427 100644
--- a/nova/openstack/common/eventlet_backdoor.py
+++ b/nova/openstack/common/eventlet_backdoor.py
@@ -46,7 +46,7 @@ def _find_objects(t):
def _print_greenthreads():
- for i, gt in enumerate(find_objects(greenlet.greenlet)):
+ for i, gt in enumerate(_find_objects(greenlet.greenlet)):
print i, gt
traceback.print_stack(gt.gr_frame)
print
diff --git a/nova/openstack/common/iniparser.py b/nova/openstack/common/iniparser.py
index 241284449..9bf399f0c 100644
--- a/nova/openstack/common/iniparser.py
+++ b/nova/openstack/common/iniparser.py
@@ -54,7 +54,7 @@ class BaseParser(object):
value = value.strip()
if ((value and value[0] == value[-1]) and
- (value[0] == "\"" or value[0] == "'")):
+ (value[0] == "\"" or value[0] == "'")):
value = value[1:-1]
return key.strip(), [value]
diff --git a/nova/openstack/common/lockutils.py b/nova/openstack/common/lockutils.py
index ba390dc69..6f80a1f67 100644
--- a/nova/openstack/common/lockutils.py
+++ b/nova/openstack/common/lockutils.py
@@ -28,6 +28,7 @@ from eventlet import semaphore
from nova.openstack.common import cfg
from nova.openstack.common import fileutils
+from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
@@ -219,6 +220,11 @@ def synchronized(name, lock_file_prefix, external=False, lock_path=None):
'method': f.__name__})
retval = f(*args, **kwargs)
finally:
+ LOG.debug(_('Released file lock "%(lock)s" at %(path)s'
+ ' for method "%(method)s"...'),
+ {'lock': name,
+ 'path': lock_file_path,
+ 'method': f.__name__})
# NOTE(vish): This removes the tempdir if we needed
# to create one. This is used to cleanup
# the locks left behind by unit tests.
diff --git a/nova/openstack/common/log.py b/nova/openstack/common/log.py
index 6e25bb597..32513bb32 100644
--- a/nova/openstack/common/log.py
+++ b/nova/openstack/common/log.py
@@ -47,21 +47,82 @@ from nova.openstack.common import local
from nova.openstack.common import notifier
+_DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
+_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
+
+common_cli_opts = [
+ cfg.BoolOpt('debug',
+ short='d',
+ default=False,
+ help='Print debugging output (set logging level to '
+ 'DEBUG instead of default WARNING level).'),
+ cfg.BoolOpt('verbose',
+ short='v',
+ default=False,
+ help='Print more verbose output (set logging level to '
+ 'INFO instead of default WARNING level).'),
+]
+
+logging_cli_opts = [
+ cfg.StrOpt('log-config',
+ metavar='PATH',
+ help='If this option is specified, the logging configuration '
+ 'file specified is used and overrides any other logging '
+ 'options specified. Please see the Python logging module '
+ 'documentation for details on logging configuration '
+ 'files.'),
+ cfg.StrOpt('log-format',
+ default=_DEFAULT_LOG_FORMAT,
+ metavar='FORMAT',
+ help='A logging.Formatter log message format string which may '
+ 'use any of the available logging.LogRecord attributes. '
+ 'Default: %(default)s'),
+ cfg.StrOpt('log-date-format',
+ default=_DEFAULT_LOG_DATE_FORMAT,
+ metavar='DATE_FORMAT',
+ help='Format string for %%(asctime)s in log records. '
+ 'Default: %(default)s'),
+ cfg.StrOpt('log-file',
+ metavar='PATH',
+ deprecated_name='logfile',
+ help='(Optional) Name of log file to output to. '
+ 'If not set, logging will go to stdout.'),
+ cfg.StrOpt('log-dir',
+ deprecated_name='logdir',
+ help='(Optional) The directory to keep log files in '
+ '(will be prepended to --log-file)'),
+ cfg.BoolOpt('use-syslog',
+ default=False,
+ help='Use syslog for logging.'),
+ cfg.StrOpt('syslog-log-facility',
+ default='LOG_USER',
+ help='syslog facility to receive log lines')
+]
+
+generic_log_opts = [
+ cfg.BoolOpt('use_stderr',
+ default=True,
+ help='Log output to standard error'),
+ cfg.StrOpt('logfile_mode',
+ default='0644',
+ help='Default file mode used when creating log files'),
+]
+
log_opts = [
cfg.StrOpt('logging_context_format_string',
- default='%(asctime)s.%(msecs)d %(levelname)s %(name)s '
+ default='%(asctime)s.%(msecs)03d %(levelname)s %(name)s '
'[%(request_id)s %(user)s %(tenant)s] %(instance)s'
'%(message)s',
help='format string to use for log messages with context'),
cfg.StrOpt('logging_default_format_string',
- default='%(asctime)s.%(msecs)d %(process)d %(levelname)s '
+ default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='format string to use for log messages without context'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='data to append to log format when level is DEBUG'),
cfg.StrOpt('logging_exception_prefix',
- default='%(asctime)s.%(msecs)d %(process)d TRACE %(name)s '
+ default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='prefix each line of exception output with this format'),
cfg.ListOpt('default_log_levels',
@@ -94,24 +155,9 @@ log_opts = [
'format it like this'),
]
-
-generic_log_opts = [
- cfg.StrOpt('logdir',
- default=None,
- help='Log output to a per-service log file in named directory'),
- cfg.StrOpt('logfile',
- default=None,
- help='Log output to a named file'),
- cfg.BoolOpt('use_stderr',
- default=True,
- help='Log output to standard error'),
- cfg.StrOpt('logfile_mode',
- default='0644',
- help='Default file mode used when creating log files'),
-]
-
-
CONF = cfg.CONF
+CONF.register_cli_opts(common_cli_opts)
+CONF.register_cli_opts(logging_cli_opts)
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
@@ -149,8 +195,8 @@ def _get_binary_name():
def _get_log_file_path(binary=None):
- logfile = CONF.log_file or CONF.logfile
- logdir = CONF.log_dir or CONF.logdir
+ logfile = CONF.log_file
+ logdir = CONF.log_dir
if logfile and not logdir:
return logfile
@@ -259,7 +305,7 @@ class JSONFormatter(logging.Formatter):
class PublishErrorsHandler(logging.Handler):
def emit(self, record):
if ('nova.openstack.common.notifier.log_notifier' in
- CONF.notification_driver):
+ CONF.notification_driver):
return
notifier.api.notify(None, 'error.publisher',
'error_notification',
@@ -361,10 +407,12 @@ def _setup_logging_from_conf(product_name):
datefmt=datefmt))
handler.setFormatter(LegacyFormatter(datefmt=datefmt))
- if CONF.verbose or CONF.debug:
+ if CONF.debug:
log_root.setLevel(logging.DEBUG)
- else:
+ elif CONF.verbose:
log_root.setLevel(logging.INFO)
+ else:
+ log_root.setLevel(logging.WARNING)
level = logging.NOTSET
for pair in CONF.default_log_levels:
@@ -425,7 +473,7 @@ class LegacyFormatter(logging.Formatter):
self._fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
- CONF.logging_debug_format_suffix):
+ CONF.logging_debug_format_suffix):
self._fmt += " " + CONF.logging_debug_format_suffix
# Cache this on the record, Logger will respect our formated copy
diff --git a/nova/rootwrap/__init__.py b/nova/openstack/common/rootwrap/__init__.py
index 671d3c173..671d3c173 100644
--- a/nova/rootwrap/__init__.py
+++ b/nova/openstack/common/rootwrap/__init__.py
diff --git a/nova/rootwrap/filters.py b/nova/openstack/common/rootwrap/filters.py
index 8958f1ba1..905bbabea 100644
--- a/nova/rootwrap/filters.py
+++ b/nova/openstack/common/rootwrap/filters.py
@@ -20,7 +20,7 @@ import re
class CommandFilter(object):
- """Command filter only checking that the 1st argument matches exec_path."""
+ """Command filter only checking that the 1st argument matches exec_path"""
def __init__(self, exec_path, run_as, *args):
self.name = ''
@@ -30,7 +30,7 @@ class CommandFilter(object):
self.real_exec = None
def get_exec(self, exec_dirs=[]):
- """Returns existing executable, or empty string if none found."""
+ """Returns existing executable, or empty string if none found"""
if self.real_exec is not None:
return self.real_exec
self.real_exec = ""
@@ -46,7 +46,7 @@ class CommandFilter(object):
return self.real_exec
def match(self, userargs):
- """Only check that the first argument (command) matches exec_path."""
+ """Only check that the first argument (command) matches exec_path"""
if (os.path.basename(self.exec_path) == userargs[0]):
return True
return False
@@ -60,12 +60,12 @@ class CommandFilter(object):
return [to_exec] + userargs[1:]
def get_environment(self, userargs):
- """Returns specific environment to set, None if none."""
+ """Returns specific environment to set, None if none"""
return None
class RegExpFilter(CommandFilter):
- """Command filter doing regexp matching for every argument."""
+ """Command filter doing regexp matching for every argument"""
def match(self, userargs):
# Early skip if command or number of args don't match
@@ -89,15 +89,15 @@ class RegExpFilter(CommandFilter):
class DnsmasqFilter(CommandFilter):
- """Specific filter for the dnsmasq call (which includes env)."""
+ """Specific filter for the dnsmasq call (which includes env)"""
CONFIG_FILE_ARG = 'CONFIG_FILE'
def match(self, userargs):
if (userargs[0] == 'env' and
- userargs[1].startswith(self.CONFIG_FILE_ARG) and
- userargs[2].startswith('NETWORK_ID=') and
- userargs[3] == 'dnsmasq'):
+ userargs[1].startswith(self.CONFIG_FILE_ARG) and
+ userargs[2].startswith('NETWORK_ID=') and
+ userargs[3] == 'dnsmasq'):
return True
return False
@@ -114,7 +114,7 @@ class DnsmasqFilter(CommandFilter):
class DeprecatedDnsmasqFilter(DnsmasqFilter):
- """Variant of dnsmasq filter to support old-style FLAGFILE."""
+ """Variant of dnsmasq filter to support old-style FLAGFILE"""
CONFIG_FILE_ARG = 'FLAGFILE'
@@ -164,7 +164,7 @@ class KillFilter(CommandFilter):
class ReadFileFilter(CommandFilter):
- """Specific filter for the utils.read_file_as_root call."""
+ """Specific filter for the utils.read_file_as_root call"""
def __init__(self, file_path, *args):
self.file_path = file_path
diff --git a/nova/rootwrap/wrapper.py b/nova/openstack/common/rootwrap/wrapper.py
index 70bd63c47..4452177fe 100644
--- a/nova/rootwrap/wrapper.py
+++ b/nova/openstack/common/rootwrap/wrapper.py
@@ -22,7 +22,7 @@ import logging.handlers
import os
import string
-from nova.rootwrap import filters
+from nova.openstack.common.rootwrap import filters
class NoFilterMatched(Exception):
@@ -93,7 +93,7 @@ def setup_syslog(execname, facility, level):
def build_filter(class_name, *args):
- """Returns a filter object of class class_name."""
+ """Returns a filter object of class class_name"""
if not hasattr(filters, class_name):
logging.warning("Skipping unknown filter class (%s) specified "
"in filter definitions" % class_name)
@@ -103,7 +103,7 @@ def build_filter(class_name, *args):
def load_filters(filters_path):
- """Load filters from a list of directories."""
+ """Load filters from a list of directories"""
filterlist = []
for filterdir in filters_path:
if not os.path.isdir(filterdir):
diff --git a/nova/openstack/common/rpc/impl_kombu.py b/nova/openstack/common/rpc/impl_kombu.py
index bf38201f5..305dc7877 100644
--- a/nova/openstack/common/rpc/impl_kombu.py
+++ b/nova/openstack/common/rpc/impl_kombu.py
@@ -175,7 +175,7 @@ class ConsumerBase(object):
try:
self.queue.cancel(self.tag)
except KeyError, e:
- # NOTE(comstud): Kludge to get around a amqplib bug
+ # NOTE(comstud): Kludge to get around an amqplib bug
if str(e) != "u'%s'" % self.tag:
raise
self.queue = None
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index d1ae1cd6e..09de10388 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -56,8 +56,6 @@ CONF.register_opts(scheduler_driver_opts)
def handle_schedule_error(context, ex, instance_uuid, request_spec):
if not isinstance(ex, exception.NoValidHost):
LOG.exception(_("Exception during scheduler.run_instance"))
- compute_utils.add_instance_fault_from_exc(context,
- instance_uuid, ex, sys.exc_info())
state = vm_states.ERROR.upper()
LOG.warning(_('Setting instance to %(state)s state.'),
locals(), instance_uuid=instance_uuid)
@@ -68,6 +66,8 @@ def handle_schedule_error(context, ex, instance_uuid, request_spec):
'task_state': None})
notifications.send_update(context, old_ref, new_ref,
service="scheduler")
+ compute_utils.add_instance_fault_from_exc(context,
+ new_ref, ex, sys.exc_info())
properties = request_spec.get('instance_properties', {})
payload = dict(request_spec=request_spec,
diff --git a/nova/scheduler/filters/trusted_filter.py b/nova/scheduler/filters/trusted_filter.py
index 4d0f2305f..302d2b3a8 100644
--- a/nova/scheduler/filters/trusted_filter.py
+++ b/nova/scheduler/filters/trusted_filter.py
@@ -48,9 +48,12 @@ import httplib
import socket
import ssl
+from nova import context
+from nova import db
from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
+from nova.openstack.common import timeutils
from nova.scheduler import filters
@@ -78,6 +81,9 @@ trusted_opts = [
deprecated_name='auth_blob',
default=None,
help='attestation authorization blob - must change'),
+ cfg.IntOpt('attestation_auth_timeout',
+ default=60,
+ help='Attestation status cache valid period length'),
]
CONF = cfg.CONF
@@ -119,7 +125,7 @@ class HTTPSClientAuthConnection(httplib.HTTPSConnection):
cert_reqs=ssl.CERT_REQUIRED)
-class AttestationService(httplib.HTTPSConnection):
+class AttestationService(object):
# Provide access wrapper to attestation server to get integrity report.
def __init__(self):
@@ -156,10 +162,10 @@ class AttestationService(httplib.HTTPSConnection):
except (socket.error, IOError) as e:
return IOError, None
- def _request(self, cmd, subcmd, host):
+ def _request(self, cmd, subcmd, hosts):
body = {}
- body['count'] = 1
- body['hosts'] = host
+ body['count'] = len(hosts)
+ body['hosts'] = hosts
cooked = jsonutils.dumps(body)
headers = {}
headers['content-type'] = 'application/json'
@@ -173,39 +179,124 @@ class AttestationService(httplib.HTTPSConnection):
else:
return status, None
- def _check_trust(self, data, host):
- for item in data:
- for state in item['hosts']:
- if state['host_name'] == host:
- return state['trust_lvl']
- return ""
+ def do_attestation(self, hosts):
+ """Attests compute nodes through OAT service.
- def do_attestation(self, host):
- state = []
- status, data = self._request("POST", "PollHosts", host)
- if status != httplib.OK:
- return {}
- state.append(data)
- return self._check_trust(state, host)
+ :param hosts: hosts list to be attested
+ :returns: dictionary for trust level and validate time
+ """
+ result = None
+ status, data = self._request("POST", "PollHosts", hosts)
+ if data != None:
+ result = data.get('hosts')
-class TrustedFilter(filters.BaseHostFilter):
- """Trusted filter to support Trusted Compute Pools."""
+ return result
+
+
+class ComputeAttestationCache(object):
+ """Cache for compute node attestation
+
+ Cache compute node's trust level for sometime,
+ if the cache is out of date, poll OAT service to flush the
+ cache.
+
+ OAT service may have cache also. OAT service's cache valid time
+ should be set shorter than trusted filter's cache valid time.
+ """
def __init__(self):
- self.attestation_service = AttestationService()
+ self.attestservice = AttestationService()
+ self.compute_nodes = {}
+ admin = context.get_admin_context()
+
+ # Fetch compute node list to initialize the compute_nodes,
+ # so that we don't need poll OAT service one by one for each
+ # host in the first round that scheduler invokes us.
+ computes = db.compute_node_get_all(admin)
+ for compute in computes:
+ service = compute['service']
+ if not service:
+ LOG.warn(_("No service for compute ID %s") % compute['id'])
+ continue
+ host = service['host']
+ self._init_cache_entry(host)
+
+ def _cache_valid(self, host):
+ cachevalid = False
+ if host in self.compute_nodes:
+ node_stats = self.compute_nodes.get(host)
+ if not timeutils.is_older_than(
+ node_stats['vtime'],
+ CONF.trusted_computing.attestation_auth_timeout):
+ cachevalid = True
+ return cachevalid
+
+ def _init_cache_entry(self, host):
+ self.compute_nodes[host] = {
+ 'trust_lvl': 'unknown',
+ 'vtime': timeutils.normalize_time(
+ timeutils.parse_isotime("1970-01-01T00:00:00Z"))}
+
+ def _invalidate_caches(self):
+ for host in self.compute_nodes:
+ self._init_cache_entry(host)
+
+ def _update_cache_entry(self, state):
+ entry = {}
+
+ host = state['host_name']
+ entry['trust_lvl'] = state['trust_lvl']
- def _is_trusted(self, host, trust):
- level = self.attestation_service.do_attestation(host)
- LOG.debug(_("TCP: trust state of "
- "%(host)s:%(level)s(%(trust)s)") % locals())
+ try:
+ # Normalize as naive object to interoperate with utcnow().
+ entry['vtime'] = timeutils.normalize_time(
+ timeutils.parse_isotime(state['vtime']))
+ except ValueError:
+ # Mark the system as un-trusted if get invalid vtime.
+ entry['trust_lvl'] = 'unknown'
+ entry['vtime'] = timeutils.utcnow()
+
+ self.compute_nodes[host] = entry
+
+ def _update_cache(self):
+ self._invalidate_caches()
+ states = self.attestservice.do_attestation(self.compute_nodes.keys())
+ if states is None:
+ return
+ for state in states:
+ self._update_cache_entry(state)
+
+ def get_host_attestation(self, host):
+ """Check host's trust level."""
+ if not host in self.compute_nodes:
+ self._init_cache_entry(host)
+ if not self._cache_valid(host):
+ self._update_cache()
+ level = self.compute_nodes.get(host).get('trust_lvl')
+ return level
+
+
+class ComputeAttestation(object):
+ def __init__(self):
+ self.caches = ComputeAttestationCache()
+
+ def is_trusted(self, host, trust):
+ level = self.caches.get_host_attestation(host)
return trust == level
+
+class TrustedFilter(filters.BaseHostFilter):
+ """Trusted filter to support Trusted Compute Pools."""
+
+ def __init__(self):
+ self.compute_attestation = ComputeAttestation()
+
def host_passes(self, host_state, filter_properties):
instance = filter_properties.get('instance_type', {})
extra = instance.get('extra_specs', {})
trust = extra.get('trust:trusted_host')
host = host_state.host
if trust:
- return self._is_trusted(host, trust)
+ return self.compute_attestation.is_trusted(host, trust)
return True
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 84bdcddb5..23e64cd7c 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -180,8 +180,6 @@ class SchedulerManager(manager.Manager):
uuids = [properties.get('uuid')]
for instance_uuid in request_spec.get('instance_uuids') or uuids:
if instance_uuid:
- compute_utils.add_instance_fault_from_exc(context,
- instance_uuid, ex, sys.exc_info())
state = vm_state.upper()
LOG.warning(_('Setting instance to %(state)s state.'),
locals(), instance_uuid=instance_uuid)
@@ -191,6 +189,8 @@ class SchedulerManager(manager.Manager):
context, instance_uuid, updates)
notifications.send_update(context, old_ref, new_ref,
service="scheduler")
+ compute_utils.add_instance_fault_from_exc(context,
+ new_ref, ex, sys.exc_info())
payload = dict(request_spec=request_spec,
instance_properties=properties,
diff --git a/nova/service.py b/nova/service.py
index 0fde14baa..df8cf020f 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -411,7 +411,7 @@ class Service(object):
self.db_allowed = db_allowed
self.conductor_api = conductor.API(use_local=db_allowed)
self.conductor_api.wait_until_ready(context.get_admin_context())
- self.servicegroup_api = servicegroup.API()
+ self.servicegroup_api = servicegroup.API(db_allowed=db_allowed)
def start(self):
verstr = version.version_string_with_package()
@@ -421,12 +421,11 @@ class Service(object):
self.model_disconnected = False
ctxt = context.get_admin_context()
try:
- service_ref = self.conductor_api.service_get_by_args(ctxt,
- self.host,
- self.binary)
- self.service_id = service_ref['id']
+ self.service_ref = self.conductor_api.service_get_by_args(ctxt,
+ self.host, self.binary)
+ self.service_id = self.service_ref['id']
except exception.NotFound:
- self._create_service_ref(ctxt)
+ self.service_ref = self._create_service_ref(ctxt)
if self.backdoor_port is not None:
self.manager.backdoor_port = self.backdoor_port
@@ -479,6 +478,7 @@ class Service(object):
}
service = self.conductor_api.service_create(context, svc_values)
self.service_id = service['id']
+ return service
def __getattr__(self, key):
manager = self.__dict__.get('manager', None)
diff --git a/nova/servicegroup/api.py b/nova/servicegroup/api.py
index ebd0ee6ac..358b7dcbc 100644
--- a/nova/servicegroup/api.py
+++ b/nova/servicegroup/api.py
@@ -45,6 +45,15 @@ class API(object):
@lockutils.synchronized('nova.servicegroup.api.new', 'nova-')
def __new__(cls, *args, **kwargs):
+ '''Create an instance of the servicegroup API.
+
+ args and kwargs are passed down to the servicegroup driver when it gets
+ created. No args currently exist, though. Valid kwargs are:
+
+ db_allowed - Boolean. False if direct db access is not allowed and
+ alternative data access (conductor) should be used
+ instead.
+ '''
if not cls._driver:
LOG.debug(_('ServiceGroup driver defined as an instance of %s'),
@@ -55,7 +64,8 @@ class API(object):
except KeyError:
raise TypeError(_("unknown ServiceGroup driver name: %s")
% driver_name)
- cls._driver = importutils.import_object(driver_class)
+ cls._driver = importutils.import_object(driver_class,
+ *args, **kwargs)
utils.check_isinstance(cls._driver, ServiceGroupDriver)
# we don't have to check that cls._driver is not NONE,
# check_isinstance does it
diff --git a/nova/servicegroup/drivers/db.py b/nova/servicegroup/drivers/db.py
index 075db3ed8..686ee728b 100644
--- a/nova/servicegroup/drivers/db.py
+++ b/nova/servicegroup/drivers/db.py
@@ -14,8 +14,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from nova import conductor
from nova import context
-from nova import db
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
@@ -32,6 +32,10 @@ LOG = logging.getLogger(__name__)
class DbDriver(api.ServiceGroupDriver):
+ def __init__(self, *args, **kwargs):
+ self.db_allowed = kwargs.get('db_allowed', True)
+ self.conductor_api = conductor.API(use_local=self.db_allowed)
+
def join(self, member_id, group_id, service=None):
"""Join the given service with it's group."""
@@ -53,6 +57,11 @@ class DbDriver(api.ServiceGroupDriver):
Check whether a service is up based on last heartbeat.
"""
last_heartbeat = service_ref['updated_at'] or service_ref['created_at']
+ if isinstance(last_heartbeat, basestring):
+ # NOTE(russellb) If this service_ref came in over rpc via
+ # conductor, then the timestamp will be a string and needs to be
+ # converted back to a datetime.
+ last_heartbeat = timeutils.parse_strtime(last_heartbeat)
# Timestamps in DB are UTC.
elapsed = utils.total_seconds(timeutils.utcnow() - last_heartbeat)
LOG.debug('DB_Driver.is_up last_heartbeat = %(lhb)s elapsed = %(el)s',
@@ -66,7 +75,8 @@ class DbDriver(api.ServiceGroupDriver):
LOG.debug(_('DB_Driver: get_all members of the %s group') % group_id)
rs = []
ctxt = context.get_admin_context()
- for service in db.service_get_all_by_topic(ctxt, group_id):
+ services = self.conductor_api.service_get_all_by_topic(ctxt, group_id)
+ for service in services:
if self.is_up(service):
rs.append(service['host'])
return rs
@@ -76,18 +86,11 @@ class DbDriver(api.ServiceGroupDriver):
ctxt = context.get_admin_context()
state_catalog = {}
try:
- try:
- service_ref = db.service_get(ctxt, service.service_id)
- except exception.NotFound:
- LOG.debug(_('The service database object disappeared, '
- 'Recreating it.'))
- service._create_service_ref(ctxt)
- service_ref = db.service_get(ctxt, service.service_id)
-
- state_catalog['report_count'] = service_ref['report_count'] + 1
+ report_count = service.service_ref['report_count'] + 1
+ state_catalog['report_count'] = report_count
- db.service_update(ctxt,
- service.service_id, state_catalog)
+ service.service_ref = self.conductor_api.service_update(ctxt,
+ service.service_ref, state_catalog)
# TODO(termie): make this pattern be more elegant.
if getattr(service, 'model_disconnected', False):
diff --git a/nova/tests/api/ec2/test_cinder_cloud.py b/nova/tests/api/ec2/test_cinder_cloud.py
index d403ba1f0..5e5723a08 100644
--- a/nova/tests/api/ec2/test_cinder_cloud.py
+++ b/nova/tests/api/ec2/test_cinder_cloud.py
@@ -18,9 +18,10 @@
# under the License.
import copy
-import tempfile
import uuid
+import fixtures
+
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova.compute import api as compute_api
@@ -86,7 +87,7 @@ def get_instances_with_cached_ips(orig_func, *args, **kwargs):
class CinderCloudTestCase(test.TestCase):
def setUp(self):
super(CinderCloudTestCase, self).setUp()
- vol_tmpdir = tempfile.mkdtemp()
+ vol_tmpdir = self.useFixture(fixtures.TempDir()).path
self.flags(compute_driver='nova.virt.fake.FakeDriver',
volume_api_class='nova.tests.fake_volume.API')
diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py
index 562473121..a00dceff1 100644
--- a/nova/tests/api/ec2/test_cloud.py
+++ b/nova/tests/api/ec2/test_cloud.py
@@ -1440,7 +1440,7 @@ class CloudTestCase(test.TestCase):
self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys))
def test_describe_bad_key_pairs(self):
- self.assertRaises(exception.EC2APIError,
+ self.assertRaises(exception.KeypairNotFound,
self.cloud.describe_key_pairs, self.context,
key_name=['DoesNotExist'])
@@ -1490,7 +1490,7 @@ class CloudTestCase(test.TestCase):
self.assertEqual(result['keyName'], key_name)
for key_name in bad_names:
- self.assertRaises(exception.EC2APIError,
+ self.assertRaises(exception.InvalidKeypair,
self.cloud.create_key_pair,
self.context,
key_name)
diff --git a/nova/tests/api/openstack/compute/contrib/test_admin_actions_with_cells.py b/nova/tests/api/openstack/compute/contrib/test_admin_actions_with_cells.py
index b8f4e6398..4e577e1f5 100644
--- a/nova/tests/api/openstack/compute/contrib/test_admin_actions_with_cells.py
+++ b/nova/tests/api/openstack/compute/contrib/test_admin_actions_with_cells.py
@@ -54,10 +54,10 @@ class CellsAdminAPITestCase(test.TestCase):
def fake_cast_to_cells(context, instance, method, *args, **kwargs):
"""
- Makes sure that the cells recieve the cast to update
+ Makes sure that the cells receive the cast to update
the cell state
"""
- self.cells_recieved_kwargs.update(kwargs)
+ self.cells_received_kwargs.update(kwargs)
self.admin_api = admin_actions.AdminActionsController()
self.admin_api.compute_api = compute_cells_api.ComputeCellsAPI()
@@ -76,14 +76,14 @@ class CellsAdminAPITestCase(test.TestCase):
self.uuid = uuidutils.generate_uuid()
url = '/fake/servers/%s/action' % self.uuid
self.request = fakes.HTTPRequest.blank(url)
- self.cells_recieved_kwargs = {}
+ self.cells_received_kwargs = {}
def test_reset_active(self):
body = {"os-resetState": {"state": "error"}}
result = self.admin_api._reset_state(self.request, 'inst_id', body)
self.assertEqual(result.status_int, 202)
- # Make sure the cells recieved the update
- self.assertEqual(self.cells_recieved_kwargs,
+ # Make sure the cells received the update
+ self.assertEqual(self.cells_received_kwargs,
dict(vm_state=vm_states.ERROR,
task_state=None))
diff --git a/nova/tests/api/openstack/compute/contrib/test_floating_ips.py b/nova/tests/api/openstack/compute/contrib/test_floating_ips.py
index a72430fd9..efc9b36cc 100644
--- a/nova/tests/api/openstack/compute/contrib/test_floating_ips.py
+++ b/nova/tests/api/openstack/compute/contrib/test_floating_ips.py
@@ -380,16 +380,16 @@ class FloatingIpTest(test.TestCase):
floating_ips = ["10.10.10.10", "10.10.10.11"]
if floating_address not in floating_ips:
raise exception.FloatingIpNotFoundForAddress(
- address=flaoting_address)
+ address=floating_address)
- self.stubs.Set(network.api.API, "associate_floating_ip",
- fake_network_api_associate)
+ self.stubs.Set(network.api.API, "associate_floating_ip",
+ fake_network_api_associate)
- body = dict(addFloatingIp=dict(address='1.1.1.1'))
- req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
- self.assertRaises(webob.exc.HTTPNotFound,
- self.manager._add_floating_ip,
- req, 'test_inst', body)
+ body = dict(addFloatingIp=dict(address='1.1.1.1'))
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.manager._add_floating_ip,
+ req, 'test_inst', body)
def test_floating_ip_disassociate_non_existent_ip(self):
def network_api_get_floating_ip_by_address(self, context,
@@ -400,7 +400,7 @@ class FloatingIpTest(test.TestCase):
address=floating_address)
self.stubs.Set(network.api.API, "get_floating_ip_by_address",
- network_api_get_floating_ip_by_address)
+ network_api_get_floating_ip_by_address)
body = dict(removeFloatingIp=dict(address='1.1.1.1'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
diff --git a/nova/tests/api/openstack/compute/contrib/test_services.py b/nova/tests/api/openstack/compute/contrib/test_services.py
index 1bd47b67a..3a6e5db7c 100644
--- a/nova/tests/api/openstack/compute/contrib/test_services.py
+++ b/nova/tests/api/openstack/compute/contrib/test_services.py
@@ -60,7 +60,7 @@ class FakeRequest(object):
GET = {}
-class FakeRequestWithSevice(object):
+class FakeRequestWithService(object):
environ = {"nova.context": context.get_admin_context()}
GET = {"service": "nova-compute"}
@@ -160,7 +160,7 @@ class ServicesTest(test.TestCase):
self.assertEqual(res_dict, response)
def test_services_list_with_service(self):
- req = FakeRequestWithSevice()
+ req = FakeRequestWithService()
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'nova-compute', 'host': 'host1',
diff --git a/nova/tests/api/openstack/compute/test_limits.py b/nova/tests/api/openstack/compute/test_limits.py
index f0f2f02d5..375355a70 100644
--- a/nova/tests/api/openstack/compute/test_limits.py
+++ b/nova/tests/api/openstack/compute/test_limits.py
@@ -618,7 +618,7 @@ class WsgiLimiterTest(BaseLimitTestSuite):
self.app = limits.WsgiLimiter(TEST_LIMITS)
def _request_data(self, verb, path):
- """Get data decribing a limit request verb/path."""
+ """Get data describing a limit request verb/path."""
return jsonutils.dumps({"verb": verb, "path": path})
def _request(self, verb, url, username=None):
diff --git a/nova/tests/api/openstack/compute/test_server_metadata.py b/nova/tests/api/openstack/compute/test_server_metadata.py
index 1e992c2a3..71fa9f3f3 100644
--- a/nova/tests/api/openstack/compute/test_server_metadata.py
+++ b/nova/tests/api/openstack/compute/test_server_metadata.py
@@ -21,6 +21,7 @@ import webob
from nova.api.openstack.compute import server_metadata
from nova.compute import rpcapi as compute_rpcapi
+from nova.compute import vm_states
import nova.db
from nova import exception
from nova.openstack.common import cfg
@@ -75,14 +76,16 @@ def return_server(context, server_id):
return {'id': server_id,
'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
'name': 'fake',
- 'locked': False}
+ 'locked': False,
+ 'vm_state': vm_states.ACTIVE}
def return_server_by_uuid(context, server_uuid):
return {'id': 1,
'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
'name': 'fake',
- 'locked': False}
+ 'locked': False,
+ 'vm_state': vm_states.ACTIVE}
def return_server_nonexistent(context, server_id):
@@ -93,10 +96,9 @@ def fake_change_instance_metadata(self, context, instance, diff):
pass
-class ServerMetaDataTest(test.TestCase):
-
+class BaseTest(test.TestCase):
def setUp(self):
- super(ServerMetaDataTest, self).setUp()
+ super(BaseTest, self).setUp()
fakes.stub_out_key_pair_funcs(self.stubs)
self.stubs.Set(nova.db, 'instance_get', return_server)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
@@ -112,6 +114,9 @@ class ServerMetaDataTest(test.TestCase):
self.uuid = str(uuid.uuid4())
self.url = '/v1.1/fake/servers/%s/metadata' % self.uuid
+
+class ServerMetaDataTest(BaseTest):
+
def test_index(self):
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.uuid)
@@ -510,3 +515,50 @@ class ServerMetaDataTest(test.TestCase):
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update_all, req, self.uuid, data)
+
+
+class BadStateServerMetaDataTest(BaseTest):
+
+ def setUp(self):
+ super(BadStateServerMetaDataTest, self).setUp()
+ self.stubs.Set(nova.db, 'instance_get', self._return_server_in_build)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ self._return_server_in_build_by_uuid)
+ self.stubs.Set(nova.db, 'instance_metadata_delete',
+ delete_server_metadata)
+
+ def test_invalid_state_on_delete(self):
+ req = fakes.HTTPRequest.blank(self.url + '/key2')
+ req.method = 'DELETE'
+ self.assertRaises(webob.exc.HTTPConflict, self.controller.delete,
+ req, self.uuid, 'key2')
+
+ def test_invalid_state_on_update_metadata(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = fakes.HTTPRequest.blank(self.url)
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ expected = {
+ 'metadata': {
+ 'key1': 'updatedvalue',
+ 'key29': 'newkey',
+ }
+ }
+ req.body = jsonutils.dumps(expected)
+ self.assertRaises(webob.exc.HTTPConflict, self.controller.update_all,
+ req, self.uuid, expected)
+
+ def _return_server_in_build(self, context, server_id):
+ return {'id': server_id,
+ 'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
+ 'name': 'fake',
+ 'locked': False,
+ 'vm_state': vm_states.BUILDING}
+
+ def _return_server_in_build_by_uuid(self, context, server_uuid):
+ return {'id': 1,
+ 'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
+ 'name': 'fake',
+ 'locked': False,
+ 'vm_state': vm_states.BUILDING}
diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py
index b69268d2a..af769a6ca 100644
--- a/nova/tests/api/openstack/compute/test_servers.py
+++ b/nova/tests/api/openstack/compute/test_servers.py
@@ -835,6 +835,12 @@ class ServersControllerTest(test.TestCase):
self.assertEqual(len(servers), 1)
self.assertEqual(servers[0]['id'], server_uuid)
+ def test_get_servers_with_bad_flavor(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/servers?flavor=abcde')
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 0)
+
def test_get_servers_allows_status(self):
server_uuid = str(uuid.uuid4())
@@ -2197,6 +2203,74 @@ class ServersControllerCreateTest(test.TestCase):
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params)
+ def test_create_instance_with_device_name_not_string(self):
+ self.ext_mgr.extensions = {'os-volumes': 'fake'}
+ bdm = [{'delete_on_termination': 1,
+ 'device_name': 123,
+ 'volume_size': 1,
+ 'volume_id': '11111111-1111-1111-1111-111111111111'}]
+ params = {'block_device_mapping': bdm}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['block_device_mapping'], bdm)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params)
+
+ def test_create_instance_with_device_name_empty(self):
+ self.ext_mgr.extensions = {'os-volumes': 'fake'}
+ bdm = [{'delete_on_termination': 1,
+ 'device_name': '',
+ 'volume_size': 1,
+ 'volume_id': '11111111-1111-1111-1111-111111111111'}]
+ params = {'block_device_mapping': bdm}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['block_device_mapping'], bdm)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params)
+
+ def test_create_instance_with_device_name_too_long(self):
+ self.ext_mgr.extensions = {'os-volumes': 'fake'}
+ bdm = [{'delete_on_termination': 1,
+ 'device_name': 'a' * 256,
+ 'volume_size': 1,
+ 'volume_id': '11111111-1111-1111-1111-111111111111'}]
+ params = {'block_device_mapping': bdm}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['block_device_mapping'], bdm)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params)
+
+ def test_create_instance_with_space_in_device_name(self):
+ self.ext_mgr.extensions = {'os-volumes': 'fake'}
+ bdm = [{'delete_on_termination': 1,
+ 'device_name': 'vd a',
+ 'volume_size': 1,
+ 'volume_id': '11111111-1111-1111-1111-111111111111'}]
+ params = {'block_device_mapping': bdm}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['block_device_mapping'], bdm)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params)
+
def test_create_instance_with_bdm_delete_on_termination(self):
self.ext_mgr.extensions = {'os-volumes': 'fake'}
bdm = [{'device_name': 'foo1', 'delete_on_termination': 1},
diff --git a/nova/tests/baremetal/test_nova_baremetal_deploy_helper.py b/nova/tests/baremetal/test_nova_baremetal_deploy_helper.py
new file mode 100644
index 000000000..56c3f953e
--- /dev/null
+++ b/nova/tests/baremetal/test_nova_baremetal_deploy_helper.py
@@ -0,0 +1,256 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# Copyright 2011 OpenStack LLC
+# Copyright 2011 Ilya Alekseyev
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import imp
+import os
+import sys
+import tempfile
+import time
+
+from nova import test
+
+from nova.tests.baremetal.db import base as bm_db_base
+
+
+TOPDIR = os.path.normpath(os.path.join(
+ os.path.dirname(os.path.abspath(__file__)),
+ os.pardir,
+ os.pardir,
+ os.pardir))
+BMDH_PATH = os.path.join(TOPDIR, 'bin', 'nova-baremetal-deploy-helper')
+
+sys.dont_write_bytecode = True
+bmdh = imp.load_source('bmdh', BMDH_PATH)
+sys.dont_write_bytecode = False
+
+_PXECONF_DEPLOY = """
+default deploy
+
+label deploy
+kernel deploy_kernel
+append initrd=deploy_ramdisk
+ipappend 3
+
+label boot
+kernel kernel
+append initrd=ramdisk root=${ROOT}
+"""
+
+_PXECONF_BOOT = """
+default boot
+
+label deploy
+kernel deploy_kernel
+append initrd=deploy_ramdisk
+ipappend 3
+
+label boot
+kernel kernel
+append initrd=ramdisk root=UUID=12345678-1234-1234-1234-1234567890abcdef
+"""
+
+
+class WorkerTestCase(bm_db_base.BMDBTestCase):
+ def setUp(self):
+ super(WorkerTestCase, self).setUp()
+ self.worker = bmdh.Worker()
+ # Make tearDown() fast
+ self.worker.queue_timeout = 0.1
+ self.worker.start()
+
+ def tearDown(self):
+ if self.worker.isAlive():
+ self.worker.stop = True
+ self.worker.join(timeout=1)
+ super(WorkerTestCase, self).tearDown()
+
+ def wait_queue_empty(self, timeout):
+ for _ in xrange(int(timeout / 0.1)):
+ if bmdh.QUEUE.empty():
+ break
+ time.sleep(0.1)
+
+ def test_run_calls_deploy(self):
+ """Check all queued requests are passed to deploy()."""
+ history = []
+
+ def fake_deploy(**params):
+ history.append(params)
+
+ self.stubs.Set(bmdh, 'deploy', fake_deploy)
+ params_list = [{'fake1': ''}, {'fake2': ''}, {'fake3': ''}]
+ for (dep_id, params) in enumerate(params_list):
+ bmdh.QUEUE.put((dep_id, params))
+ self.wait_queue_empty(1)
+ self.assertEqual(params_list, history)
+
+ def test_run_with_failing_deploy(self):
+ """Check a worker keeps on running even if deploy() raises
+ an exception.
+ """
+ history = []
+
+ def fake_deploy(**params):
+ history.append(params)
+ # always fail
+ raise Exception('test')
+
+ self.stubs.Set(bmdh, 'deploy', fake_deploy)
+ params_list = [{'fake1': ''}, {'fake2': ''}, {'fake3': ''}]
+ for (dep_id, params) in enumerate(params_list):
+ bmdh.QUEUE.put((dep_id, params))
+ self.wait_queue_empty(1)
+ self.assertEqual(params_list, history)
+
+
+class PhysicalWorkTestCase(test.TestCase):
+ def setUp(self):
+ super(PhysicalWorkTestCase, self).setUp()
+
+ def noop(*args, **kwargs):
+ pass
+
+ self.stubs.Set(time, 'sleep', noop)
+
+ def test_deploy(self):
+ """Check loosely all functions are called with right args."""
+ address = '127.0.0.1'
+ port = 3306
+ iqn = 'iqn.xyz'
+ lun = 1
+ image_path = '/tmp/xyz/image'
+ pxe_config_path = '/tmp/abc/pxeconfig'
+ root_mb = 128
+ swap_mb = 64
+
+ dev = '/dev/fake'
+ root_part = '/dev/fake-part1'
+ swap_part = '/dev/fake-part2'
+ root_uuid = '12345678-1234-1234-12345678-12345678abcdef'
+
+ self.mox.StubOutWithMock(bmdh, 'get_dev')
+ self.mox.StubOutWithMock(bmdh, 'get_image_mb')
+ self.mox.StubOutWithMock(bmdh, 'discovery')
+ self.mox.StubOutWithMock(bmdh, 'login_iscsi')
+ self.mox.StubOutWithMock(bmdh, 'logout_iscsi')
+ self.mox.StubOutWithMock(bmdh, 'make_partitions')
+ self.mox.StubOutWithMock(bmdh, 'is_block_device')
+ self.mox.StubOutWithMock(bmdh, 'dd')
+ self.mox.StubOutWithMock(bmdh, 'mkswap')
+ self.mox.StubOutWithMock(bmdh, 'block_uuid')
+ self.mox.StubOutWithMock(bmdh, 'switch_pxe_config')
+ self.mox.StubOutWithMock(bmdh, 'notify')
+
+ bmdh.get_dev(address, port, iqn, lun).AndReturn(dev)
+ bmdh.get_image_mb(image_path).AndReturn(1) # < root_mb
+ bmdh.discovery(address, port)
+ bmdh.login_iscsi(address, port, iqn)
+ bmdh.is_block_device(dev).AndReturn(True)
+ bmdh.make_partitions(dev, root_mb, swap_mb)
+ bmdh.is_block_device(root_part).AndReturn(True)
+ bmdh.is_block_device(swap_part).AndReturn(True)
+ bmdh.dd(image_path, root_part)
+ bmdh.mkswap(swap_part)
+ bmdh.block_uuid(root_part).AndReturn(root_uuid)
+ bmdh.logout_iscsi(address, port, iqn)
+ bmdh.switch_pxe_config(pxe_config_path, root_uuid)
+ bmdh.notify(address, 10000)
+ self.mox.ReplayAll()
+
+ bmdh.deploy(address, port, iqn, lun, image_path, pxe_config_path,
+ root_mb, swap_mb)
+
+ def test_always_logout_iscsi(self):
+ """logout_iscsi() must be called once login_iscsi() is called."""
+ address = '127.0.0.1'
+ port = 3306
+ iqn = 'iqn.xyz'
+ lun = 1
+ image_path = '/tmp/xyz/image'
+ pxe_config_path = '/tmp/abc/pxeconfig'
+ root_mb = 128
+ swap_mb = 64
+
+ dev = '/dev/fake'
+
+ self.mox.StubOutWithMock(bmdh, 'get_dev')
+ self.mox.StubOutWithMock(bmdh, 'get_image_mb')
+ self.mox.StubOutWithMock(bmdh, 'discovery')
+ self.mox.StubOutWithMock(bmdh, 'login_iscsi')
+ self.mox.StubOutWithMock(bmdh, 'logout_iscsi')
+ self.mox.StubOutWithMock(bmdh, 'work_on_disk')
+
+ class TestException(Exception):
+ pass
+
+ bmdh.get_dev(address, port, iqn, lun).AndReturn(dev)
+ bmdh.get_image_mb(image_path).AndReturn(1) # < root_mb
+ bmdh.discovery(address, port)
+ bmdh.login_iscsi(address, port, iqn)
+ bmdh.work_on_disk(dev, root_mb, swap_mb, image_path).\
+ AndRaise(TestException)
+ bmdh.logout_iscsi(address, port, iqn)
+ self.mox.ReplayAll()
+
+ self.assertRaises(TestException,
+ bmdh.deploy,
+ address, port, iqn, lun, image_path,
+ pxe_config_path, root_mb, swap_mb)
+
+
+class SwitchPxeConfigTestCase(test.TestCase):
+ def setUp(self):
+ super(SwitchPxeConfigTestCase, self).setUp()
+ (fd, self.fname) = tempfile.mkstemp()
+ os.write(fd, _PXECONF_DEPLOY)
+ os.close(fd)
+
+ def tearDown(self):
+ os.unlink(self.fname)
+ super(SwitchPxeConfigTestCase, self).tearDown()
+
+ def test_switch_pxe_config(self):
+ bmdh.switch_pxe_config(self.fname,
+ '12345678-1234-1234-1234-1234567890abcdef')
+ with open(self.fname, 'r') as f:
+ pxeconf = f.read()
+ self.assertEqual(pxeconf, _PXECONF_BOOT)
+
+
+class OtherFunctionTestCase(test.TestCase):
+ def test_get_dev(self):
+ expected = '/dev/disk/by-path/ip-1.2.3.4:5678-iscsi-iqn.fake-lun-9'
+ actual = bmdh.get_dev('1.2.3.4', 5678, 'iqn.fake', 9)
+ self.assertEqual(expected, actual)
+
+ def test_get_image_mb(self):
+ mb = 1024 * 1024
+ size = None
+
+ def fake_getsize(path):
+ return size
+
+ self.stubs.Set(os.path, 'getsize', fake_getsize)
+ size = 0
+ self.assertEqual(bmdh.get_image_mb('x'), 0)
+ size = 1
+ self.assertEqual(bmdh.get_image_mb('x'), 1)
+ size = mb
+ self.assertEqual(bmdh.get_image_mb('x'), 1)
+ size = mb + 1
+ self.assertEqual(bmdh.get_image_mb('x'), 2)
diff --git a/nova/tests/baremetal/test_pxe.py b/nova/tests/baremetal/test_pxe.py
index 45c9ede43..73ef8caa3 100644
--- a/nova/tests/baremetal/test_pxe.py
+++ b/nova/tests/baremetal/test_pxe.py
@@ -147,12 +147,6 @@ class PXEClassMethodsTestCase(BareMetalPXETestCase):
config = pxe.build_network_config(net)
self.assertIn('eth0', config)
self.assertNotIn('eth1', config)
- self.assertIn('hwaddress ether fake', config)
- self.assertNotIn('hwaddress ether aa:bb:cc:dd', config)
-
- net[0][1]['mac'] = 'aa:bb:cc:dd'
- config = pxe.build_network_config(net)
- self.assertIn('hwaddress ether aa:bb:cc:dd', config)
net = utils.get_test_network_info(2)
config = pxe.build_network_config(net)
@@ -254,6 +248,13 @@ class PXEClassMethodsTestCase(BareMetalPXETestCase):
pxe.get_tftp_image_info,
self.instance)
+ # Test that other non-true values also raise an exception
+ CONF.baremetal.deploy_kernel = ""
+ CONF.baremetal.deploy_ramdisk = ""
+ self.assertRaises(exception.NovaException,
+ pxe.get_tftp_image_info,
+ self.instance)
+
# Even if the instance includes kernel_id and ramdisk_id,
# we still need deploy_kernel_id and deploy_ramdisk_id.
# If those aren't present in instance[], and not specified in
@@ -295,6 +296,17 @@ class PXEClassMethodsTestCase(BareMetalPXETestCase):
self.assertEqual(res['deploy_kernel'][0], 'eeee')
self.assertEqual(res['deploy_ramdisk'][0], 'ffff')
+ # However, if invalid values are passed on the image extra_specs,
+ # this should still raise an exception.
+ extra_specs = {
+ 'deploy_kernel_id': '',
+ 'deploy_ramdisk_id': '',
+ }
+ self.instance['extra_specs'] = extra_specs
+ self.assertRaises(exception.NovaException,
+ pxe.get_tftp_image_info,
+ self.instance)
+
class PXEPrivateMethodsTestCase(BareMetalPXETestCase):
@@ -306,15 +318,6 @@ class PXEPrivateMethodsTestCase(BareMetalPXETestCase):
macs = self.driver._collect_mac_addresses(self.context, self.node)
self.assertEqual(macs, address_list)
- def test_generate_udev_rules(self):
- self._create_node()
- address_list = [nic['address'] for nic in self.nic_info]
- address_list.append(self.node_info['prov_mac_address'])
-
- rules = self.driver._generate_udev_rules(self.context, self.node)
- for address in address_list:
- self.assertIn('ATTR{address}=="%s"' % address, rules)
-
def test_cache_tftp_images(self):
self.instance['kernel_id'] = 'aaaa'
self.instance['ramdisk_id'] = 'bbbb'
@@ -357,8 +360,6 @@ class PXEPrivateMethodsTestCase(BareMetalPXETestCase):
# nova.virt.disk.api._inject_*_into_fs
self._create_node()
files = []
- files.append(('/etc/udev/rules.d/70-persistent-net.rules',
- self.driver._generate_udev_rules(self.context, self.node)))
self.instance['hostname'] = 'fake hostname'
files.append(('/etc/hostname', 'fake hostname'))
self.instance['key_data'] = 'fake ssh key'
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index 3740d598e..b8212848c 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -2631,8 +2631,6 @@ class ComputeTestCase(BaseTestCase):
'setup_networks_on_host')
self.mox.StubOutWithMock(self.compute.network_api,
'migrate_instance_finish')
- self.mox.StubOutWithMock(self.compute.driver,
- 'post_live_migration_at_destination')
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.mox.StubOutWithMock(self.compute, '_instance_update')
@@ -2650,10 +2648,12 @@ class ComputeTestCase(BaseTestCase):
self.compute.network_api.migrate_instance_finish(admin_ctxt,
instance, migration)
fake_net_info = []
+ fake_block_dev_info = {'foo': 'bar'}
self.compute.driver.post_live_migration_at_destination(admin_ctxt,
- instance,
- fake_net_info,
- False)
+ instance,
+ fake_net_info,
+ False,
+ fake_block_dev_info)
self.compute._get_power_state(admin_ctxt, instance).AndReturn(
'fake_power_state')
@@ -2698,8 +2698,8 @@ class ComputeTestCase(BaseTestCase):
self.assertEqual(task_states.POWERING_OFF, instances[0]['task_state'])
def test_add_instance_fault(self):
+ instance = self._create_fake_instance()
exc_info = None
- instance_uuid = str(uuid.uuid4())
def fake_db_fault_create(ctxt, values):
self.assertTrue(values['details'].startswith('test'))
@@ -2709,7 +2709,8 @@ class ComputeTestCase(BaseTestCase):
expected = {
'code': 500,
'message': 'NotImplementedError',
- 'instance_uuid': instance_uuid,
+ 'instance_uuid': instance['uuid'],
+ 'host': self.compute.host
}
self.assertEquals(expected, values)
@@ -2721,13 +2722,12 @@ class ComputeTestCase(BaseTestCase):
self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
ctxt = context.get_admin_context()
- compute_utils.add_instance_fault_from_exc(ctxt, instance_uuid,
- NotImplementedError('test'),
- exc_info)
+ compute_utils.add_instance_fault_from_exc(ctxt, instance,
+ NotImplementedError('test'), exc_info)
def test_add_instance_fault_with_remote_error(self):
+ instance = self._create_fake_instance()
exc_info = None
- instance_uuid = str(uuid.uuid4())
def fake_db_fault_create(ctxt, values):
self.assertTrue(values['details'].startswith('Remote error'))
@@ -2737,8 +2737,9 @@ class ComputeTestCase(BaseTestCase):
expected = {
'code': 500,
- 'instance_uuid': instance_uuid,
- 'message': 'My Test Message'
+ 'instance_uuid': instance['uuid'],
+ 'message': 'My Test Message',
+ 'host': self.compute.host
}
self.assertEquals(expected, values)
@@ -2750,13 +2751,12 @@ class ComputeTestCase(BaseTestCase):
self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
ctxt = context.get_admin_context()
- compute_utils.add_instance_fault_from_exc(ctxt, instance_uuid,
- exc,
- exc_info)
+ compute_utils.add_instance_fault_from_exc(ctxt, instance, exc,
+ exc_info)
def test_add_instance_fault_user_error(self):
+ instance = self._create_fake_instance()
exc_info = None
- instance_uuid = str(uuid.uuid4())
def fake_db_fault_create(ctxt, values):
@@ -2764,7 +2764,8 @@ class ComputeTestCase(BaseTestCase):
'code': 400,
'message': 'Invalid',
'details': 'fake details',
- 'instance_uuid': instance_uuid,
+ 'instance_uuid': instance['uuid'],
+ 'host': self.compute.host
}
self.assertEquals(expected, values)
@@ -2778,26 +2779,27 @@ class ComputeTestCase(BaseTestCase):
self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
ctxt = context.get_admin_context()
- compute_utils.add_instance_fault_from_exc(ctxt, instance_uuid,
- user_exc, exc_info)
+ compute_utils.add_instance_fault_from_exc(ctxt, instance, user_exc,
+ exc_info)
def test_add_instance_fault_no_exc_info(self):
- instance_uuid = str(uuid.uuid4())
+ instance = self._create_fake_instance()
def fake_db_fault_create(ctxt, values):
expected = {
'code': 500,
'message': 'NotImplementedError',
'details': 'test',
- 'instance_uuid': instance_uuid,
+ 'instance_uuid': instance['uuid'],
+ 'host': self.compute.host
}
self.assertEquals(expected, values)
self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
ctxt = context.get_admin_context()
- compute_utils.add_instance_fault_from_exc(ctxt, instance_uuid,
- NotImplementedError('test'))
+ compute_utils.add_instance_fault_from_exc(ctxt, instance,
+ NotImplementedError('test'))
def test_cleanup_running_deleted_instances(self):
admin_context = context.get_admin_context()
@@ -2959,7 +2961,7 @@ class ComputeTestCase(BaseTestCase):
call_info['expected_instance'] = instances[0]
self.compute._heal_instance_info_cache(ctxt)
self.assertEqual(call_info['get_all_by_host'], 2)
- # Stays the same, beacuse the instance came from the DB
+ # Stays the same, because the instance came from the DB
self.assertEqual(call_info['get_by_uuid'], 3)
self.assertEqual(call_info['get_nw_info'], 4)
@@ -3304,6 +3306,35 @@ class ComputeTestCase(BaseTestCase):
self.mox.VerifyAll()
self.mox.UnsetStubs()
+ def test_init_instance_failed_resume_sets_error(self):
+ instance = {
+ 'uuid': 'fake-uuid',
+ 'info_cache': None,
+ 'power_state': power_state.RUNNING,
+ 'vm_state': vm_states.ACTIVE,
+ }
+ self.flags(resume_guests_state_on_host_boot=True)
+ self.mox.StubOutWithMock(self.compute, '_get_power_state')
+ self.mox.StubOutWithMock(self.compute.driver, 'plug_vifs')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'resume_state_on_host_boot')
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instance_volume_block_device_info')
+ self.mox.StubOutWithMock(self.compute,
+ '_set_instance_error_state')
+ self.compute._get_power_state(mox.IgnoreArg(),
+ instance).AndReturn(power_state.SHUTDOWN)
+ self.compute.driver.plug_vifs(instance, mox.IgnoreArg())
+ self.compute._get_instance_volume_block_device_info(mox.IgnoreArg(),
+ instance['uuid']).AndReturn('fake-bdm')
+ self.compute.driver.resume_state_on_host_boot(mox.IgnoreArg(),
+ instance, mox.IgnoreArg(),
+ 'fake-bdm').AndRaise(test.TestingException)
+ self.compute._set_instance_error_state(mox.IgnoreArg(),
+ instance['uuid'])
+ self.mox.ReplayAll()
+ self.compute._init_instance('fake-context', instance)
+
def test_get_instances_on_driver(self):
fake_context = context.get_admin_context()
@@ -3956,6 +3987,38 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
+ def test_delete_in_resizing(self):
+ def fake_quotas_reserve(context, expire=None, project_id=None,
+ **deltas):
+ old_type = instance_types.get_instance_type_by_name('m1.tiny')
+ # ensure using old instance type to create reservations
+ self.assertEqual(deltas['cores'], -old_type['vcpus'])
+ self.assertEqual(deltas['ram'], -old_type['memory_mb'])
+
+ self.stubs.Set(QUOTAS, 'reserve', fake_quotas_reserve)
+
+ instance, instance_uuid = self._run_instance(params={
+ 'host': CONF.host})
+
+ # create a fake migration record (manager does this)
+ new_inst_type = instance_types.get_instance_type_by_name('m1.small')
+ db.migration_create(self.context.elevated(),
+ {'instance_uuid': instance['uuid'],
+ 'old_instance_type_id': instance['instance_type_id'],
+ 'new_instance_type_id': new_inst_type['id'],
+ 'status': 'post-migrating'})
+
+ # update instance type to resized one
+ db.instance_update(self.context, instance['uuid'],
+ {'instance_type_id': new_inst_type['id'],
+ 'vcpus': new_inst_type['vcpus'],
+ 'memory_mb': new_inst_type['memory_mb'],
+ 'task_state': task_states.RESIZE_FINISH})
+
+ self.compute_api.delete(self.context, instance)
+
+ db.instance_destroy(self.context, instance['uuid'])
+
def test_delete_in_resized(self):
instance, instance_uuid = self._run_instance(params={
'host': CONF.host})
@@ -5192,14 +5255,14 @@ class ComputeAPITestCase(BaseTestCase):
self.assertTrue(instance3['uuid'] in instance_uuids)
self.assertTrue(instance4['uuid'] in instance_uuids)
- # multiple criterias as a dict
+ # multiple criteria as a dict
instances = self.compute_api.get_all(c,
search_opts={'metadata': {'key3': 'value3',
'key4': 'value4'}})
self.assertEqual(len(instances), 1)
self.assertEqual(instances[0]['uuid'], instance4['uuid'])
- # multiple criterias as a list
+ # multiple criteria as a list
instances = self.compute_api.get_all(c,
search_opts={'metadata': [{'key4': 'value4'},
{'key3': 'value3'}]})
@@ -5271,6 +5334,24 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(_context, instance['uuid'])
+ def test_disallow_metadata_changes_during_building(self):
+ def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
+ instance_uuid=None):
+ pass
+ self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
+ fake_change_instance_metadata)
+
+ instance = self._create_fake_instance({'vm_state': vm_states.BUILDING})
+ instance = dict(instance)
+
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.delete_instance_metadata, self.context,
+ instance, "key")
+
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.update_instance_metadata, self.context,
+ instance, "key")
+
def test_get_instance_faults(self):
# Get an instances latest fault.
instance = self._create_fake_instance()
@@ -6367,7 +6448,7 @@ class DisabledInstanceTypesTestCase(BaseTestCase):
"""
Some instance-types are marked 'disabled' which means that they will not
show up in customer-facing listings. We do, however, want those
- instance-types to be availble for emergency migrations and for rebuilding
+ instance-types to be available for emergency migrations and for rebuilding
of existing instances.
One legitimate use of the 'disabled' field would be when phasing out a
@@ -6617,7 +6698,7 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
exc_info = sys.exc_info()
compute_utils.add_instance_fault_from_exc(self.context,
- instance_uuid, exc_info[0], exc_info=exc_info)
+ self.instance, exc_info[0], exc_info=exc_info)
self.compute._deallocate_network(self.context,
self.instance).AndRaise(InnerTestingException("Error"))
self.compute._log_original_error(exc_info, instance_uuid)
@@ -6667,7 +6748,7 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
except Exception:
exc_info = sys.exc_info()
compute_utils.add_instance_fault_from_exc(self.context,
- instance_uuid, exc_info[0], exc_info=exc_info)
+ self.instance, exc_info[0], exc_info=exc_info)
self.compute._deallocate_network(self.context,
self.instance)
self.compute._reschedule(self.context, None, {}, instance_uuid,
@@ -6695,7 +6776,7 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
exc_info = sys.exc_info()
compute_utils.add_instance_fault_from_exc(self.context,
- instance_uuid, exc_info[0], exc_info=exc_info)
+ self.instance, exc_info[0], exc_info=exc_info)
self.compute._deallocate_network(self.context,
self.instance)
self.compute._reschedule(self.context, None, {}, instance_uuid,
diff --git a/nova/tests/compute/test_compute_utils.py b/nova/tests/compute/test_compute_utils.py
index f29c68627..6e7227d4c 100644
--- a/nova/tests/compute/test_compute_utils.py
+++ b/nova/tests/compute/test_compute_utils.py
@@ -69,8 +69,11 @@ class ComputeValidateDeviceTestCase(test.TestCase):
lambda context, instance: self.data)
def _validate_device(self, device=None):
+ bdms = db.block_device_mapping_get_all_by_instance(
+ self.context, self.instance['uuid'])
return compute_utils.get_device_name_for_instance(self.context,
self.instance,
+ bdms,
device)
@staticmethod
diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py
index b29db92e7..30d176bbd 100644
--- a/nova/tests/conductor/test_conductor.py
+++ b/nova/tests/conductor/test_conductor.py
@@ -335,14 +335,23 @@ class _BaseTestCase(object):
def test_instance_get_active_by_window(self):
self.mox.StubOutWithMock(db, 'instance_get_active_by_window_joined')
- db.instance_get_active_by_window_joined(self.context, 'fake-begin',
- 'fake-end', 'fake-proj',
- 'fake-host')
+ db.instance_get_active_by_window(self.context, 'fake-begin',
+ 'fake-end', 'fake-proj',
+ 'fake-host')
self.mox.ReplayAll()
self.conductor.instance_get_active_by_window(self.context,
'fake-begin', 'fake-end',
'fake-proj', 'fake-host')
+ def test_instance_get_active_by_window_joined(self):
+ self.mox.StubOutWithMock(db, 'instance_get_active_by_window_joined')
+ db.instance_get_active_by_window_joined(self.context, 'fake-begin',
+ 'fake-end', 'fake-proj',
+ 'fake-host')
+ self.mox.ReplayAll()
+ self.conductor.instance_get_active_by_window_joined(
+ self.context, 'fake-begin', 'fake-end', 'fake-proj', 'fake-host')
+
def test_instance_destroy(self):
self.mox.StubOutWithMock(db, 'instance_destroy')
db.instance_destroy(self.context, 'fake-uuid')
@@ -747,6 +756,14 @@ class ConductorAPITestCase(_BaseTestCase, test.TestCase):
def test_service_destroy(self):
self._test_stubbed('service_destroy', '', returns=False)
+ def test_service_update(self):
+ ctxt = self.context
+ self.mox.StubOutWithMock(db, 'service_update')
+ db.service_update(ctxt, '', {}).AndReturn('fake-result')
+ self.mox.ReplayAll()
+ result = self.conductor.service_update(self.context, {'id': ''}, {})
+ self.assertEqual(result, 'fake-result')
+
def test_instance_get_all_by_host(self):
self._test_stubbed('instance_get_all_by_host',
self.context.elevated(), 'host')
diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py
index 7c13796a6..9dd9e5121 100644
--- a/nova/tests/image/test_glance.py
+++ b/nova/tests/image/test_glance.py
@@ -17,7 +17,10 @@
import datetime
+import filecmp
+import os
import random
+import tempfile
import time
import glanceclient.exc
@@ -468,6 +471,40 @@ class TestGlanceImageService(test.TestCase):
self.flags(glance_num_retries=1)
service.download(self.context, image_id, writer)
+ def test_download_file_url(self):
+ class MyGlanceStubClient(glance_stubs.StubGlanceClient):
+ """A client that returns a file url."""
+
+ (outfd, s_tmpfname) = tempfile.mkstemp(prefix='directURLsrc')
+ outf = os.fdopen(outfd, 'w')
+ inf = open('/dev/urandom', 'r')
+ for i in range(10):
+ _data = inf.read(1024)
+ outf.write(_data)
+ outf.close()
+
+ def get(self, image_id):
+ return type('GlanceTestDirectUrlMeta', (object,),
+ {'direct_url': 'file://%s' + self.s_tmpfname})
+
+ client = MyGlanceStubClient()
+ (outfd, tmpfname) = tempfile.mkstemp(prefix='directURLdst')
+ writer = os.fdopen(outfd, 'w')
+
+ service = self._create_image_service(client)
+ image_id = 1 # doesn't matter
+
+ self.flags(allowed_direct_url_schemes=['file'])
+ service.download(self.context, image_id, writer)
+ writer.close()
+
+ # compare the two files
+ rc = filecmp.cmp(tmpfname, client.s_tmpfname)
+ self.assertTrue(rc, "The file %s and %s should be the same" %
+ (tmpfname, client.s_tmpfname))
+ os.remove(client.s_tmpfname)
+ os.remove(tmpfname)
+
def test_client_forbidden_converts_to_imagenotauthed(self):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that raises a Forbidden exception."""
diff --git a/nova/tests/image/test_s3.py b/nova/tests/image/test_s3.py
index 4f8790cc7..0afe397a2 100644
--- a/nova/tests/image/test_s3.py
+++ b/nova/tests/image/test_s3.py
@@ -129,7 +129,7 @@ class TestS3ImageService(test.TestCase):
'snapshot_id': 'snap-12345678',
'delete_on_termination': True},
{'device_name': '/dev/sda2',
- 'virutal_name': 'ephemeral0'},
+ 'virtual_name': 'ephemeral0'},
{'device_name': '/dev/sdb0',
'no_device': True}]}}
_manifest, image, image_uuid = self.image_service._s3_parse_manifest(
@@ -156,7 +156,7 @@ class TestS3ImageService(test.TestCase):
'snapshot_id': 'snap-12345678',
'delete_on_termination': True},
{'device_name': '/dev/sda2',
- 'virutal_name': 'ephemeral0'},
+ 'virtual_name': 'ephemeral0'},
{'device_name': '/dev/sdb0',
'no_device': True}]
self.assertEqual(block_device_mapping, expected_bdm)
diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py
index aa41a8259..f101da243 100644
--- a/nova/tests/integrated/test_api_samples.py
+++ b/nova/tests/integrated/test_api_samples.py
@@ -171,23 +171,32 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
if not isinstance(result, list):
raise NoMatch(
_('Result: %(result)s is not a list.') % locals())
- if len(expected) != len(result):
- raise NoMatch(
- _('Length mismatch: %(result)s\n%(expected)s.')
- % locals())
+
+ expected = expected[:]
+ extra = []
for res_obj in result:
- for ex_obj in expected:
+ for i, ex_obj in enumerate(expected):
try:
- res = self._compare_result(subs, ex_obj, res_obj)
+ matched_value = self._compare_result(subs, ex_obj,
+ res_obj)
+ del expected[i]
break
except NoMatch:
pass
else:
- raise NoMatch(
- _('Result: %(res_obj)s not in %(expected)s.')
- % locals())
- matched_value = res or matched_value
+ extra.append(res_obj)
+
+ error = []
+ if expected:
+ error.append(_('Extra items in expected:'))
+ error.extend([repr(o) for o in expected])
+
+ if extra:
+ error.append(_('Extra items in result:'))
+ error.extend([repr(o) for o in extra])
+ if error:
+ raise NoMatch('\n'.join(error))
elif isinstance(expected, basestring) and '%' in expected:
# NOTE(vish): escape stuff for regex
for char in '[]<>?':
@@ -672,7 +681,7 @@ class ImagesSampleJsonTest(ApiSampleTestBase):
return self._verify_response('images-details-get-resp', subs, response)
def test_image_metadata_get(self):
- # Get api sample of a image metadata request.
+ # Get api sample of an image metadata request.
image_id = fake.get_valid_image_id()
response = self._do_get('images/%s/metadata' % image_id)
subs = self._get_regexes()
@@ -701,7 +710,7 @@ class ImagesSampleJsonTest(ApiSampleTestBase):
subs, response)
def test_image_meta_key_get(self):
- # Get api sample of a image metadata key request.
+ # Get api sample of an image metadata key request.
image_id = fake.get_valid_image_id()
key = "kernel_id"
response = self._do_get('images/%s/metadata/%s' % (image_id, key))
diff --git a/nova/tests/network/test_linux_net.py b/nova/tests/network/test_linux_net.py
index c0770902d..8a7865b83 100644
--- a/nova/tests/network/test_linux_net.py
+++ b/nova/tests/network/test_linux_net.py
@@ -469,13 +469,9 @@ class LinuxNetworkTestCase(test.TestCase):
'--arp-ip-src', dhcp, '-j', 'DROP'),
('ebtables', '-I', 'OUTPUT', '-p', 'ARP', '-o', iface,
'--arp-ip-src', dhcp, '-j', 'DROP'),
- ('iptables-save', '-c', '-t', 'filter'),
+ ('iptables-save', '-c'),
('iptables-restore', '-c'),
- ('iptables-save', '-c', '-t', 'mangle'),
- ('iptables-restore', '-c'),
- ('iptables-save', '-c', '-t', 'nat'),
- ('iptables-restore', '-c'),
- ('ip6tables-save', '-c', '-t', 'filter'),
+ ('ip6tables-save', '-c'),
('ip6tables-restore', '-c'),
]
self.assertEqual(executes, expected)
@@ -508,13 +504,9 @@ class LinuxNetworkTestCase(test.TestCase):
'--arp-ip-dst', dhcp, '-j', 'DROP'),
('ebtables', '-D', 'OUTPUT', '-p', 'ARP', '-o', iface,
'--arp-ip-src', dhcp, '-j', 'DROP'),
- ('iptables-save', '-c', '-t', 'filter'),
- ('iptables-restore', '-c'),
- ('iptables-save', '-c', '-t', 'mangle'),
- ('iptables-restore', '-c'),
- ('iptables-save', '-c', '-t', 'nat'),
+ ('iptables-save', '-c'),
('iptables-restore', '-c'),
- ('ip6tables-save', '-c', '-t', 'filter'),
+ ('ip6tables-save', '-c'),
('ip6tables-restore', '-c'),
]
self.assertEqual(executes, expected)
diff --git a/nova/tests/network/test_manager.py b/nova/tests/network/test_manager.py
index 1552630fb..b5b3ec107 100644
--- a/nova/tests/network/test_manager.py
+++ b/nova/tests/network/test_manager.py
@@ -16,8 +16,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import shutil
-import tempfile
+import fixtures
import mox
from nova import context
@@ -142,7 +142,7 @@ vifs = [{'id': 0,
class FlatNetworkTestCase(test.TestCase):
def setUp(self):
super(FlatNetworkTestCase, self).setUp()
- self.tempdir = tempfile.mkdtemp()
+ self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.network = network_manager.FlatManager(host=HOST)
self.network.instance_dns_domain = ''
@@ -150,10 +150,6 @@ class FlatNetworkTestCase(test.TestCase):
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
- def tearDown(self):
- shutil.rmtree(self.tempdir)
- super(FlatNetworkTestCase, self).tearDown()
-
def test_get_instance_nw_info(self):
fake_get_instance_nw_info = fake_network.fake_get_instance_nw_info
@@ -1629,7 +1625,7 @@ class FloatingIPTestCase(test.TestCase):
"""Tests nova.network.manager.FloatingIP."""
def setUp(self):
super(FloatingIPTestCase, self).setUp()
- self.tempdir = tempfile.mkdtemp()
+ self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.network = TestFloatingIPManager()
self.network.db = db
@@ -1637,10 +1633,6 @@ class FloatingIPTestCase(test.TestCase):
self.context = context.RequestContext('testuser', self.project_id,
is_admin=False)
- def tearDown(self):
- shutil.rmtree(self.tempdir)
- super(FloatingIPTestCase, self).tearDown()
-
def test_disassociate_floating_ip_multi_host_calls(self):
floating_ip = {
'fixed_ip_id': 12
@@ -2128,7 +2120,7 @@ class InstanceDNSTestCase(test.TestCase):
"""Tests nova.network.manager instance DNS."""
def setUp(self):
super(InstanceDNSTestCase, self).setUp()
- self.tempdir = tempfile.mkdtemp()
+ self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.network = TestFloatingIPManager()
self.network.db = db
@@ -2136,10 +2128,6 @@ class InstanceDNSTestCase(test.TestCase):
self.context = context.RequestContext('testuser', self.project_id,
is_admin=False)
- def tearDown(self):
- shutil.rmtree(self.tempdir)
- super(InstanceDNSTestCase, self).tearDown()
-
def test_dns_domains_private(self):
zone1 = 'testzone'
domain1 = 'example.org'
diff --git a/nova/tests/network/test_quantumv2.py b/nova/tests/network/test_quantumv2.py
index 876bce90d..c9b2e43b3 100644
--- a/nova/tests/network/test_quantumv2.py
+++ b/nova/tests/network/test_quantumv2.py
@@ -349,6 +349,9 @@ class TestQuantumv2(test.TestCase):
nets = self.nets[net_idx - 1]
ports = {}
fixed_ips = {}
+ macs = kwargs.get('macs')
+ if macs:
+ macs = set(macs)
req_net_ids = []
if 'requested_networks' in kwargs:
for id, fixed_ip, port_id in kwargs['requested_networks']:
@@ -359,13 +362,15 @@ class TestQuantumv2(test.TestCase):
'mac_address': 'my_mac1'}})
ports['my_netid1'] = self.port_data1[0]
id = 'my_netid1'
+ if macs is not None:
+ macs.discard('my_mac1')
else:
fixed_ips[id] = fixed_ip
req_net_ids.append(id)
expected_network_order = req_net_ids
else:
expected_network_order = [n['id'] for n in nets]
- if kwargs.get('_break_list_networks'):
+ if kwargs.get('_break') == 'pre_list_networks':
self.mox.ReplayAll()
return api
search_ids = [net['id'] for net in nets if net['id'] in req_net_ids]
@@ -382,8 +387,10 @@ class TestQuantumv2(test.TestCase):
mox_list_network_params['id'] = mox.SameElementsAs(search_ids)
self.moxed_client.list_networks(
**mox_list_network_params).AndReturn({'networks': []})
-
for net_id in expected_network_order:
+ if kwargs.get('_break') == 'net_id2':
+ self.mox.ReplayAll()
+ return api
port_req_body = {
'port': {
'device_id': self.instance['uuid'],
@@ -406,10 +413,15 @@ class TestQuantumv2(test.TestCase):
port_req_body['port']['admin_state_up'] = True
port_req_body['port']['tenant_id'] = \
self.instance['project_id']
+ if macs:
+ port_req_body['port']['mac_address'] = macs.pop()
res_port = {'port': {'id': 'fake'}}
self.moxed_client.create_port(
MyComparator(port_req_body)).AndReturn(res_port)
+ if kwargs.get('_break') == 'pre_get_instance_nw_info':
+ self.mox.ReplayAll()
+ return api
api.get_instance_nw_info(mox.IgnoreArg(),
self.instance,
networks=nets).AndReturn(None)
@@ -433,16 +445,63 @@ class TestQuantumv2(test.TestCase):
self._allocate_for_instance(1, macs=None)
def test_allocate_for_instance_accepts_macs_kwargs_set(self):
- # The macs kwarg should be accepted, as a set.
+ # The macs kwarg should be accepted, as a set, the
+ # _allocate_for_instance helper checks that the mac is used to create a
+ # port.
self._allocate_for_instance(1, macs=set(['ab:cd:ef:01:23:45']))
+ def test_allocate_for_instance_not_enough_macs_via_ports(self):
+ # using a hypervisor MAC via a pre-created port will stop it being
+ # used to dynamically create a port on a network. We put the network
+ # first in requested_networks so that if the code were to not pre-check
+ # requested ports, it would incorrectly assign the mac and not fail.
+ requested_networks = [
+ (self.nets2[1]['id'], None, None),
+ (None, None, 'my_portid1')]
+ api = self._stub_allocate_for_instance(
+ net_idx=2, requested_networks=requested_networks,
+ macs=set(['my_mac1']),
+ _break='net_id2')
+ self.assertRaises(exception.PortNotFree,
+ api.allocate_for_instance, self.context,
+ self.instance, requested_networks=requested_networks,
+ macs=set(['my_mac1']))
+
+ def test_allocate_for_instance_not_enough_macs(self):
+ # If not enough MAC addresses are available to allocate to networks, an
+ # error should be raised.
+ # We could pass in macs=set(), but that wouldn't tell us that
+ # allocate_for_instance tracks used macs properly, so we pass in one
+ # mac, and ask for two networks.
+ requested_networks = [
+ (self.nets2[1]['id'], None, None),
+ (self.nets2[0]['id'], None, None)]
+ api = self._stub_allocate_for_instance(
+ net_idx=2, requested_networks=requested_networks,
+ macs=set(['my_mac2']),
+ _break='pre_get_instance_nw_info')
+ self.assertRaises(exception.PortNotFree,
+ api.allocate_for_instance, self.context,
+ self.instance, requested_networks=requested_networks,
+ macs=set(['my_mac2']))
+
+ def test_allocate_for_instance_two_macs_two_networks(self):
+ # If two MACs are available and two networks requested, two new ports
+ # get made and no exceptions raised.
+ requested_networks = [
+ (self.nets2[1]['id'], None, None),
+ (self.nets2[0]['id'], None, None)]
+ self._allocate_for_instance(
+ net_idx=2, requested_networks=requested_networks,
+ macs=set(['my_mac2', 'my_mac1']))
+
def test_allocate_for_instance_mac_conflicting_requested_port(self):
# specify only first and last network
requested_networks = [(None, None, 'my_portid1')]
api = self._stub_allocate_for_instance(
net_idx=1, requested_networks=requested_networks,
macs=set(['unknown:mac']),
- _break_list_networks=True)
+ _break='pre_list_networks')
self.assertRaises(exception.PortNotUsable,
api.allocate_for_instance, self.context,
self.instance, requested_networks=requested_networks,
@@ -943,6 +1002,54 @@ class TestQuantumv2(test.TestCase):
self.mox.ReplayAll()
api.disassociate_floating_ip(self.context, self.instance, address)
+ def test_add_fixed_ip_to_instance(self):
+ api = quantumapi.API()
+ network_id = 'my_netid1'
+ search_opts = {'network_id': network_id}
+ self.moxed_client.list_subnets(
+ **search_opts).AndReturn({'subnets': self.subnet_data1})
+
+ zone = 'compute:%s' % self.instance['availability_zone']
+ search_opts = {'device_id': self.instance['uuid'],
+ 'device_owner': 'compute:nova',
+ 'network_id': network_id}
+ self.moxed_client.list_ports(
+ **search_opts).AndReturn({'ports': self.port_data1})
+ port_req_body = {
+ 'port': {
+ 'fixed_ips': [{'subnet_id': 'my_subid1'}],
+ },
+ }
+ port = self.port_data1[0]
+ port['fixed_ips'] = [{'subnet_id': 'my_subid1'}]
+ self.moxed_client.update_port('my_portid1',
+ MyComparator(port_req_body)).AndReturn({'port': port})
+
+ self.mox.ReplayAll()
+ api.add_fixed_ip_to_instance(self.context, self.instance, network_id)
+
+ def test_remove_fixed_ip_from_instance(self):
+ api = quantumapi.API()
+ address = '10.0.0.3'
+ zone = 'compute:%s' % self.instance['availability_zone']
+ search_opts = {'device_id': self.instance['uuid'],
+ 'device_owner': zone,
+ 'fixed_ips': 'ip_address=%s' % address}
+ self.moxed_client.list_ports(
+ **search_opts).AndReturn({'ports': self.port_data1})
+ port_req_body = {
+ 'port': {
+ 'fixed_ips': [],
+ },
+ }
+ port = self.port_data1[0]
+ port['fixed_ips'] = []
+ self.moxed_client.update_port('my_portid1',
+ MyComparator(port_req_body)).AndReturn({'port': port})
+
+ self.mox.ReplayAll()
+ api.remove_fixed_ip_from_instance(self.context, self.instance, address)
+
class TestQuantumv2ModuleMethods(test.TestCase):
def test_ensure_requested_network_ordering_no_preference(self):
diff --git a/nova/tests/network/test_rpcapi.py b/nova/tests/network/test_rpcapi.py
index 90bffeeaf..5ba7459fb 100644
--- a/nova/tests/network/test_rpcapi.py
+++ b/nova/tests/network/test_rpcapi.py
@@ -108,8 +108,9 @@ class NetworkRpcAPITestCase(test.TestCase):
def test_get_floating_ip(self):
self._test_network_api('get_floating_ip', rpc_method='call', id='id')
- def test_get_floating_pools(self):
- self._test_network_api('get_floating_pools', rpc_method='call')
+ def test_get_floating_ip_pools(self):
+ self._test_network_api('get_floating_ip_pools', rpc_method='call',
+ version="1.7")
def test_get_floating_ip_by_address(self):
self._test_network_api('get_floating_ip_by_address', rpc_method='call',
diff --git a/nova/tests/scheduler/test_chance_scheduler.py b/nova/tests/scheduler/test_chance_scheduler.py
index 26cde055b..76fba900d 100644
--- a/nova/tests/scheduler/test_chance_scheduler.py
+++ b/nova/tests/scheduler/test_chance_scheduler.py
@@ -130,11 +130,11 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
# instance 1
ctxt.elevated().AndReturn(ctxt_elevated)
self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn([])
- compute_utils.add_instance_fault_from_exc(ctxt,
- uuid, mox.IsA(exception.NoValidHost), mox.IgnoreArg())
- db.instance_update_and_get_original(ctxt, uuid,
+ old_ref, new_ref = db.instance_update_and_get_original(ctxt, uuid,
{'vm_state': vm_states.ERROR,
'task_state': None}).AndReturn(({}, {}))
+ compute_utils.add_instance_fault_from_exc(ctxt,
+ new_ref, mox.IsA(exception.NoValidHost), mox.IgnoreArg())
self.mox.ReplayAll()
self.driver.schedule_run_instance(
diff --git a/nova/tests/scheduler/test_filter_scheduler.py b/nova/tests/scheduler/test_filter_scheduler.py
index 5d8e8236b..2bd2cb85b 100644
--- a/nova/tests/scheduler/test_filter_scheduler.py
+++ b/nova/tests/scheduler/test_filter_scheduler.py
@@ -58,11 +58,11 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ old_ref, new_ref = db.instance_update_and_get_original(fake_context,
+ uuid, {'vm_state': vm_states.ERROR, 'task_state':
+ None}).AndReturn(({}, {}))
compute_utils.add_instance_fault_from_exc(fake_context,
- uuid, mox.IsA(exception.NoValidHost), mox.IgnoreArg())
- db.instance_update_and_get_original(fake_context, uuid,
- {'vm_state': vm_states.ERROR,
- 'task_state': None}).AndReturn(({}, {}))
+ new_ref, mox.IsA(exception.NoValidHost), mox.IgnoreArg())
self.mox.ReplayAll()
sched.schedule_run_instance(
fake_context, request_spec, None, None, None, None, {})
@@ -88,11 +88,11 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
'instance_uuids': [uuid]}
self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ old_ref, new_ref = db.instance_update_and_get_original(fake_context,
+ uuid, {'vm_state': vm_states.ERROR, 'task_state':
+ None}).AndReturn(({}, {}))
compute_utils.add_instance_fault_from_exc(fake_context,
- uuid, mox.IsA(exception.NoValidHost), mox.IgnoreArg())
- db.instance_update_and_get_original(fake_context, uuid,
- {'vm_state': vm_states.ERROR,
- 'task_state': None}).AndReturn(({}, {}))
+ new_ref, mox.IsA(exception.NoValidHost), mox.IgnoreArg())
self.mox.ReplayAll()
sched.schedule_run_instance(
fake_context, request_spec, None, None, None, None, {})
diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py
index 9f7f189cc..f8b9f9296 100644
--- a/nova/tests/scheduler/test_host_filters.py
+++ b/nova/tests/scheduler/test_host_filters.py
@@ -22,6 +22,7 @@ from nova import context
from nova import db
from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
+from nova.openstack.common import timeutils
from nova.scheduler import filters
from nova.scheduler.filters import extra_specs_ops
from nova.scheduler.filters.trusted_filter import AttestationService
@@ -233,11 +234,13 @@ class HostFiltersTestCase(test.TestCase):
def fake_oat_request(self, *args, **kwargs):
"""Stubs out the response from OAT service."""
- return httplib.OK, jsonutils.loads(self.oat_data)
+ self.oat_attested = True
+ return httplib.OK, self.oat_data
def setUp(self):
super(HostFiltersTestCase, self).setUp()
self.oat_data = ''
+ self.oat_attested = False
self.stubs = stubout.StubOutForTesting()
self.stubs.Set(AttestationService, '_request', self.fake_oat_request)
self.context = context.RequestContext('fake', 'fake')
@@ -1147,54 +1150,121 @@ class HostFiltersTestCase(test.TestCase):
def test_trusted_filter_default_passes(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
- filter_properties = {'instance_type': {'memory_mb': 1024}}
+ filter_properties = {'context': self.context.elevated(),
+ 'instance_type': {'memory_mb': 1024}}
host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_trusted_and_trusted_passes(self):
- self.oat_data =\
- '{"hosts":[{"host_name":"host1","trust_lvl":"trusted"}]}'
+ self.oat_data = {"hosts": [{"host_name": "host1",
+ "trust_lvl": "trusted",
+ "vtime": timeutils.isotime()}]}
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
extra_specs = {'trust:trusted_host': 'trusted'}
- filter_properties = {'instance_type': {'memory_mb': 1024,
+ filter_properties = {'context': self.context.elevated(),
+ 'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_trusted_and_untrusted_fails(self):
- self.oat_data =\
- '{"hosts":[{"host_name":"host1","trust_lvl":"untrusted"}]}'
+ self.oat_data = {"hosts": [{"host_name": "host1",
+ "trust_lvl": "untrusted",
+ "vtime": timeutils.isotime()}]}
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
extra_specs = {'trust:trusted_host': 'trusted'}
- filter_properties = {'instance_type': {'memory_mb': 1024,
+ filter_properties = {'context': self.context.elevated(),
+ 'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'node1', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_untrusted_and_trusted_fails(self):
- self.oat_data =\
- '{"hosts":[{"host_name":"host1","trust_lvl":"trusted"}]}'
+ self.oat_data = {"hosts": [{"host_name": "host1",
+ "trust_lvl": "trusted",
+ "vtime": timeutils.isotime()}]}
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
extra_specs = {'trust:trusted_host': 'untrusted'}
- filter_properties = {'instance_type': {'memory_mb': 1024,
+ filter_properties = {'context': self.context.elevated(),
+ 'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'node1', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_untrusted_and_untrusted_passes(self):
- self.oat_data =\
- '{"hosts":[{"host_name":"host1","trust_lvl":"untrusted"}]}'
+ self.oat_data = {"hosts": [{"host_name": "host1",
+ "trust_lvl": "untrusted",
+ "vtime":timeutils.isotime()}]}
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
extra_specs = {'trust:trusted_host': 'untrusted'}
- filter_properties = {'instance_type': {'memory_mb': 1024,
+ filter_properties = {'context': self.context.elevated(),
+ 'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ def test_trusted_filter_update_cache(self):
+ self.oat_data = {"hosts": [{"host_name":
+ "host1", "trust_lvl": "untrusted",
+ "vtime": timeutils.isotime()}]}
+
+ filt_cls = self.class_map['TrustedFilter']()
+ extra_specs = {'trust:trusted_host': 'untrusted'}
+ filter_properties = {'context': self.context.elevated(),
+ 'instance_type': {'memory_mb': 1024,
+ 'extra_specs': extra_specs}}
+ host = fakes.FakeHostState('host1', 'node1', {})
+
+ filt_cls.host_passes(host, filter_properties) # Fill the caches
+
+ self.oat_attested = False
+ filt_cls.host_passes(host, filter_properties)
+ self.assertFalse(self.oat_attested)
+
+ self.oat_attested = False
+
+ timeutils.set_time_override(timeutils.utcnow())
+ timeutils.advance_time_seconds(
+ CONF.trusted_computing.attestation_auth_timeout + 80)
+ filt_cls.host_passes(host, filter_properties)
+ self.assertTrue(self.oat_attested)
+
+ timeutils.clear_time_override()
+
+ def test_trusted_filter_update_cache_timezone(self):
+ self.oat_data = {"hosts": [{"host_name": "host1",
+ "trust_lvl": "untrusted",
+ "vtime": "2012-09-09T05:10:40-04:00"}]}
+
+ filt_cls = self.class_map['TrustedFilter']()
+ extra_specs = {'trust:trusted_host': 'untrusted'}
+ filter_properties = {'context': self.context.elevated(),
+ 'instance_type': {'memory_mb': 1024,
+ 'extra_specs': extra_specs}}
+ host = fakes.FakeHostState('host1', 'node1', {})
+
+ timeutils.set_time_override(
+ timeutils.normalize_time(
+ timeutils.parse_isotime("2012-09-09T09:10:40Z")))
+
+ filt_cls.host_passes(host, filter_properties) # Fill the caches
+
+ self.oat_attested = False
+ filt_cls.host_passes(host, filter_properties)
+ self.assertFalse(self.oat_attested)
+
+ self.oat_attested = False
+ timeutils.advance_time_seconds(
+ CONF.trusted_computing.attestation_auth_timeout - 10)
+ filt_cls.host_passes(host, filter_properties)
+ self.assertFalse(self.oat_attested)
+
+ timeutils.clear_time_override()
+
def test_core_filter_passes(self):
filt_cls = self.class_map['CoreFilter']()
filter_properties = {'instance_type': {'vcpus': 1}}
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index dd5b0ae32..eb4c3864f 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -183,12 +183,12 @@ class SchedulerManagerTestCase(test.TestCase):
self.manager.driver.schedule_run_instance(self.context,
request_spec, None, None, None, None, {}).AndRaise(
exception.NoValidHost(reason=""))
- db.instance_update_and_get_original(self.context, fake_instance_uuid,
+ old, new_ref = db.instance_update_and_get_original(self.context,
+ fake_instance_uuid,
{"vm_state": vm_states.ERROR,
"task_state": None}).AndReturn((inst, inst))
- compute_utils.add_instance_fault_from_exc(self.context,
- fake_instance_uuid, mox.IsA(exception.NoValidHost),
- mox.IgnoreArg())
+ compute_utils.add_instance_fault_from_exc(self.context, new_ref,
+ mox.IsA(exception.NoValidHost), mox.IgnoreArg())
self.mox.ReplayAll()
self.manager.run_instance(self.context, request_spec,
@@ -217,12 +217,12 @@ class SchedulerManagerTestCase(test.TestCase):
}
self.manager.driver.schedule_prep_resize(**kwargs).AndRaise(
exception.NoValidHost(reason=""))
- db.instance_update_and_get_original(self.context, fake_instance_uuid,
+ old_ref, new_ref = db.instance_update_and_get_original(self.context,
+ fake_instance_uuid,
{"vm_state": vm_states.ACTIVE, "task_state": None}).AndReturn(
(inst, inst))
- compute_utils.add_instance_fault_from_exc(self.context,
- fake_instance_uuid, mox.IsA(exception.NoValidHost),
- mox.IgnoreArg())
+ compute_utils.add_instance_fault_from_exc(self.context, new_ref,
+ mox.IsA(exception.NoValidHost), mox.IgnoreArg())
self.mox.ReplayAll()
self.manager.prep_resize(**kwargs)
@@ -254,12 +254,12 @@ class SchedulerManagerTestCase(test.TestCase):
"vm_state": "",
"task_state": "",
}
- db.instance_update_and_get_original(self.context, fake_instance_uuid,
+ old_ref, new_ref = db.instance_update_and_get_original(self.context,
+ fake_instance_uuid,
{"vm_state": vm_states.ERROR,
"task_state": None}).AndReturn((inst, inst))
- compute_utils.add_instance_fault_from_exc(self.context,
- fake_instance_uuid, mox.IsA(test.TestingException),
- mox.IgnoreArg())
+ compute_utils.add_instance_fault_from_exc(self.context, new_ref,
+ mox.IsA(test.TestingException), mox.IgnoreArg())
self.mox.ReplayAll()
diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py
index 829a98334..fb2e76e45 100644
--- a/nova/tests/test_api.py
+++ b/nova/tests/test_api.py
@@ -309,11 +309,10 @@ class ApiEc2TestCase(test.TestCase):
try:
self.ec2.create_key_pair('test')
except boto_exc.EC2ResponseError, e:
- if e.code == 'KeyPairExists':
+ if e.code == 'InvalidKeyPair.Duplicate':
pass
else:
- self.fail("Unexpected EC2ResponseError: %s "
- "(expected KeyPairExists)" % e.code)
+ self.assertEqual('InvalidKeyPair.Duplicate', e.code)
else:
self.fail('Exception not raised.')
diff --git a/nova/tests/test_availability_zones.py b/nova/tests/test_availability_zones.py
new file mode 100644
index 000000000..2c5c06921
--- /dev/null
+++ b/nova/tests/test_availability_zones.py
@@ -0,0 +1,114 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 Netease Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Tests for availability zones
+"""
+
+from nova import availability_zones as az
+from nova import context
+from nova import db
+from nova.openstack.common import cfg
+from nova import service
+from nova import test
+
+CONF = cfg.CONF
+CONF.import_opt('internal_service_availability_zone',
+ 'nova.availability_zones')
+CONF.import_opt('default_availability_zone',
+ 'nova.availability_zones')
+
+
+class AvailabilityZoneTestCases(test.TestCase):
+ """Test case for aggregate based availability zone."""
+
+ def setUp(self):
+ super(AvailabilityZoneTestCases, self).setUp()
+ self.host = 'me'
+ self.availability_zone = 'nova-test'
+ self.default_az = CONF.default_availability_zone
+ self.default_in_az = CONF.internal_service_availability_zone
+ self.context = context.get_admin_context()
+
+ agg = {'name': 'agg1'}
+ self.agg = db.aggregate_create(self.context, agg)
+
+ metadata = {'availability_zone': self.availability_zone}
+ db.aggregate_metadata_add(self.context, self.agg['id'], metadata)
+
+ def tearDown(self):
+ db.aggregate_delete(self.context, self.agg['id'])
+ super(AvailabilityZoneTestCases, self).tearDown()
+
+ def _create_service_with_topic(self, topic):
+ values = {
+ 'binary': 'bin',
+ 'host': self.host,
+ 'topic': topic,
+ }
+ return db.service_create(self.context, values)
+
+ def _destroy_service(self, service):
+ return db.service_destroy(self.context, service['id'])
+
+ def _add_to_aggregate(self, service):
+ return db.aggregate_host_add(self.context,
+ self.agg['id'], service['host'])
+
+ def _delete_from_aggregate(self, service):
+ return db.aggregate_host_delete(self.context,
+ self.agg['id'], service['host'])
+
+ def test_set_availability_zone_compute_service(self):
+ """Test for compute service get right availability zone."""
+ service = self._create_service_with_topic('compute')
+ services = db.service_get_all(self.context)
+
+ # The service is not add into aggregate, so confirm it is default
+ # availability zone.
+ new_service = az.set_availability_zones(self.context, services)[0]
+ self.assertEquals(new_service['availability_zone'],
+ self.default_az)
+
+ # The service is added into aggregate, confirm return the aggregate
+ # availability zone.
+ self._add_to_aggregate(service)
+ new_service = az.set_availability_zones(self.context, services)[0]
+ self.assertEquals(new_service['availability_zone'],
+ self.availability_zone)
+
+ self._destroy_service(service)
+
+ def test_set_availability_zone_not_compute_service(self):
+ """Test not compute service get right availability zone."""
+ service = self._create_service_with_topic('network')
+ services = db.service_get_all(self.context)
+ new_service = az.set_availability_zones(self.context, services)[0]
+ self.assertEquals(new_service['availability_zone'],
+ self.default_in_az)
+ self._destroy_service(service)
+
+ def test_get_host_availability_zone(self):
+ """Test get right availability zone by given host."""
+ self.assertEquals(self.default_az,
+ az.get_host_availability_zone(self.context, self.host))
+
+ service = self._create_service_with_topic('compute')
+ self._add_to_aggregate(service)
+
+ self.assertEquals(self.availability_zone,
+ az.get_host_availability_zone(self.context, self.host))
diff --git a/nova/tests/test_cinder.py b/nova/tests/test_cinder.py
index 29e2e978b..79b5ae66a 100644
--- a/nova/tests/test_cinder.py
+++ b/nova/tests/test_cinder.py
@@ -98,13 +98,14 @@ class FakeHTTPClient(cinder.cinder_client.client.HTTPClient):
class FakeCinderClient(cinder.cinder_client.Client):
def __init__(self, username, password, project_id=None, auth_url=None,
- retries=None):
+ insecure=False, retries=None):
super(FakeCinderClient, self).__init__(username, password,
project_id=project_id,
auth_url=auth_url,
+ insecure=insecure,
retries=retries)
self.client = FakeHTTPClient(username, password, project_id, auth_url,
- retries=retries)
+ insecure=insecure, retries=retries)
# keep a ref to the clients callstack for factory's assert_called
self.callstack = self.client.callstack = []
@@ -177,6 +178,15 @@ class CinderTestCase(test.TestCase):
self.assertTrue('volume_image_metadata' in volume)
self.assertEqual(volume['volume_image_metadata'], _image_metadata)
+ def test_cinder_api_insecure(self):
+ # The True/False negation is awkward, but better for the client
+ # to pass us insecure=True and we check verify_cert == False
+ self.flags(cinder_api_insecure=True)
+ volume = self.api.get(self.context, '1234')
+ self.assert_called('GET', '/volumes/1234')
+ self.assertEquals(
+ self.fake_client_factory.client.client.verify_cert, False)
+
def test_cinder_http_retries(self):
retries = 42
self.flags(cinder_http_retries=retries)
diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py
index c70e96cdc..e43a32c19 100644
--- a/nova/tests/test_db_api.py
+++ b/nova/tests/test_db_api.py
@@ -261,6 +261,32 @@ class DbApiTestCase(test.TestCase):
res = db.floating_ip_disassociate(ctxt, floating)
self.assertEqual(res, None)
+ def test_fixed_ip_get_by_floating_address(self):
+ ctxt = context.get_admin_context()
+ values = {'address': 'fixed'}
+ fixed = db.fixed_ip_create(ctxt, values)
+ fixed_ip_ref = db.fixed_ip_get_by_address(ctxt, fixed)
+ values = {'address': 'floating',
+ 'fixed_ip_id': fixed_ip_ref['id']}
+ floating = db.floating_ip_create(ctxt, values)
+ fixed_ip_ref = db.fixed_ip_get_by_floating_address(ctxt, floating)
+ self.assertEqual(fixed, fixed_ip_ref['address'])
+
+ def test_floating_ip_get_by_fixed_address(self):
+ ctxt = context.get_admin_context()
+ values = {'address': 'fixed'}
+ fixed = db.fixed_ip_create(ctxt, values)
+ fixed_ip_ref = db.fixed_ip_get_by_address(ctxt, fixed)
+ values = {'address': 'floating1',
+ 'fixed_ip_id': fixed_ip_ref['id']}
+ floating1 = db.floating_ip_create(ctxt, values)
+ values = {'address': 'floating2',
+ 'fixed_ip_id': fixed_ip_ref['id']}
+ floating2 = db.floating_ip_create(ctxt, values)
+ floating_ip_refs = db.floating_ip_get_by_fixed_address(ctxt, fixed)
+ self.assertEqual(floating1, floating_ip_refs[0]['address'])
+ self.assertEqual(floating2, floating_ip_refs[1]['address'])
+
def test_network_create_safe(self):
ctxt = context.get_admin_context()
values = {'host': 'localhost', 'project_id': 'project1'}
diff --git a/nova/tests/test_driver.py b/nova/tests/test_driver.py
new file mode 100644
index 000000000..2dee7725f
--- /dev/null
+++ b/nova/tests/test_driver.py
@@ -0,0 +1,60 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2013 Citrix Systems, Inc.
+# Copyright 2013 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import test
+from nova.virt import driver
+
+
+class FakeDriver(object):
+ def __init__(self, *args, **kwargs):
+ self.args = args
+ self.kwargs = kwargs
+
+
+class FakeDriver2(FakeDriver):
+ pass
+
+
+class ToDriverRegistryTestCase(test.TestCase):
+
+ def assertDriverInstance(self, inst, class_, *args, **kwargs):
+ self.assertEquals(class_, inst.__class__)
+ self.assertEquals(args, inst.args)
+ self.assertEquals(kwargs, inst.kwargs)
+
+ def test_driver_dict_from_config(self):
+ drvs = driver.driver_dict_from_config(
+ [
+ 'key1=nova.tests.test_driver.FakeDriver',
+ 'key2=nova.tests.test_driver.FakeDriver2',
+ ], 'arg1', 'arg2', param1='value1', param2='value2'
+ )
+
+ self.assertEquals(
+ sorted(['key1', 'key2']),
+ sorted(drvs.keys())
+ )
+
+ self.assertDriverInstance(
+ drvs['key1'],
+ FakeDriver, 'arg1', 'arg2', param1='value1',
+ param2='value2')
+
+ self.assertDriverInstance(
+ drvs['key2'],
+ FakeDriver2, 'arg1', 'arg2', param1='value1',
+ param2='value2')
diff --git a/nova/tests/test_imagebackend.py b/nova/tests/test_imagebackend.py
index a9865cb44..495e7c947 100644
--- a/nova/tests/test_imagebackend.py
+++ b/nova/tests/test_imagebackend.py
@@ -273,7 +273,7 @@ class LvmTestCase(_ImageTestCase, test.TestCase):
cmd = ('dd', 'if=%s' % self.TEMPLATE_PATH,
'of=%s' % self.PATH, 'bs=4M')
self.utils.execute(*cmd, run_as_root=True)
- self.disk.resize2fs(self.PATH)
+ self.disk.resize2fs(self.PATH, run_as_root=True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
diff --git a/nova/tests/test_imagecache.py b/nova/tests/test_imagecache.py
index eaf244c56..8142312b9 100644
--- a/nova/tests/test_imagecache.py
+++ b/nova/tests/test_imagecache.py
@@ -721,7 +721,7 @@ class ImageCacheManagerTestCase(test.TestCase):
def fq_path(path):
return os.path.join('/instance_path/_base/', path)
- # Fake base directory existance
+ # Fake base directory existence
orig_exists = os.path.exists
def exists(path):
@@ -747,7 +747,7 @@ class ImageCacheManagerTestCase(test.TestCase):
'/instance_path/_base/%s_sm' % hashed_42]:
return False
- self.fail('Unexpected path existance check: %s' % path)
+ self.fail('Unexpected path existence check: %s' % path)
self.stubs.Set(os.path, 'exists', lambda x: exists(x))
diff --git a/nova/tests/test_instance_types.py b/nova/tests/test_instance_types.py
index 4a136cf13..b70b96b7f 100644
--- a/nova/tests/test_instance_types.py
+++ b/nova/tests/test_instance_types.py
@@ -142,6 +142,67 @@ class InstanceTypeTestCase(test.TestCase):
self.assertRaises(exception.InvalidInput, instance_types.create,
name, 256, 1, 120, 100, flavorid)
+ def test_add_instance_type_access(self):
+ user_id = 'fake'
+ project_id = 'fake'
+ ctxt = context.RequestContext(user_id, project_id, is_admin=True)
+ flavor_id = 'flavor1'
+ type_ref = instance_types.create('some flavor', 256, 1, 120, 100,
+ flavorid=flavor_id)
+ access_ref = instance_types.add_instance_type_access(flavor_id,
+ project_id,
+ ctxt=ctxt)
+ self.assertEqual(access_ref["project_id"], project_id)
+ self.assertEqual(access_ref["instance_type_id"], type_ref["id"])
+
+ def test_add_instance_type_access_already_exists(self):
+ user_id = 'fake'
+ project_id = 'fake'
+ ctxt = context.RequestContext(user_id, project_id, is_admin=True)
+ flavor_id = 'flavor1'
+ type_ref = instance_types.create('some flavor', 256, 1, 120, 100,
+ flavorid=flavor_id)
+ access_ref = instance_types.add_instance_type_access(flavor_id,
+ project_id,
+ ctxt=ctxt)
+ self.assertRaises(exception.FlavorAccessExists,
+ instance_types.add_instance_type_access,
+ flavor_id, project_id, ctxt)
+
+ def test_add_instance_type_access_invalid_flavor(self):
+ user_id = 'fake'
+ project_id = 'fake'
+ ctxt = context.RequestContext(user_id, project_id, is_admin=True)
+ flavor_id = 'no_such_flavor'
+ self.assertRaises(exception.FlavorNotFound,
+ instance_types.add_instance_type_access,
+ flavor_id, project_id, ctxt)
+
+ def test_remove_instance_type_access(self):
+ user_id = 'fake'
+ project_id = 'fake'
+ ctxt = context.RequestContext(user_id, project_id, is_admin=True)
+ flavor_id = 'flavor1'
+ it = instance_types
+ type_ref = it.create('some flavor', 256, 1, 120, 100,
+ flavorid=flavor_id)
+ access_ref = it.add_instance_type_access(flavor_id, project_id, ctxt)
+ it.remove_instance_type_access(flavor_id, project_id, ctxt)
+
+ projects = it.get_instance_type_access_by_flavor_id(flavor_id, ctxt)
+ self.assertEqual([], projects)
+
+ def test_remove_instance_type_access_doesnt_exists(self):
+ user_id = 'fake'
+ project_id = 'fake'
+ ctxt = context.RequestContext(user_id, project_id, is_admin=True)
+ flavor_id = 'flavor1'
+ type_ref = instance_types.create('some flavor', 256, 1, 120, 100,
+ flavorid=flavor_id)
+ self.assertRaises(exception.FlavorAccessNotFound,
+ instance_types.remove_instance_type_access,
+ flavor_id, project_id, ctxt=ctxt)
+
def test_get_all_instance_types(self):
# Ensures that all instance types can be retrieved.
session = sql_session.get_session()
diff --git a/nova/tests/test_instance_types_extra_specs.py b/nova/tests/test_instance_types_extra_specs.py
index f53840b86..f48c2efe8 100644
--- a/nova/tests/test_instance_types_extra_specs.py
+++ b/nova/tests/test_instance_types_extra_specs.py
@@ -18,6 +18,7 @@ Unit Tests for instance types extra specs code
from nova import context
from nova import db
+from nova import exception
from nova import test
@@ -87,6 +88,13 @@ class InstanceTypeExtraSpecsTestCase(test.TestCase):
self.flavorid)
self.assertEquals(expected_specs, actual_specs)
+ def test_instance_type_extra_specs_update_with_nonexisting_flavor(self):
+ extra_specs = dict(cpu_arch="x86_64")
+ nonexisting_flavorid = "some_flavor_that_doesnt_exists"
+ self.assertRaises(exception.FlavorNotFound,
+ db.instance_type_extra_specs_update_or_create,
+ self.context, nonexisting_flavorid, extra_specs)
+
def test_instance_type_extra_specs_create(self):
expected_specs = dict(cpu_arch="x86_64",
cpu_model="Nehalem",
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index 0abf16801..75e758cde 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -570,7 +570,6 @@ class LibvirtConnTestCase(test.TestCase):
self.context = context.get_admin_context()
self.flags(instances_path='')
self.flags(libvirt_snapshots_directory='')
- self.call_libvirt_dependant_setup = False
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt_utils',
fake_libvirt_utils))
@@ -769,6 +768,20 @@ class LibvirtConnTestCase(test.TestCase):
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(cfg.devices[3].target_dev, 'vdd')
+ def test_get_guest_config_with_configdrive(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = db.instance_create(self.context, self.test_instance)
+
+ # make configdrive.enabled_for() return True
+ instance_ref['config_drive'] = 'ANY_ID'
+
+ cfg = conn.get_guest_config(instance_ref, [], None, None)
+
+ self.assertEquals(type(cfg.devices[2]),
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertEquals(cfg.devices[2].target_dev,
+ conn.default_last_device)
+
def test_get_guest_config_with_vnc(self):
self.flags(libvirt_type='kvm',
vnc_enabled=True,
@@ -2854,11 +2867,11 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(conn, "_wrapped_conn")
- self.mox.StubOutWithMock(conn._wrapped_conn, "getCapabilities")
+ self.mox.StubOutWithMock(conn._wrapped_conn, "getLibVersion")
self.mox.StubOutWithMock(libvirt.libvirtError, "get_error_code")
self.mox.StubOutWithMock(libvirt.libvirtError, "get_error_domain")
- conn._wrapped_conn.getCapabilities().AndRaise(
+ conn._wrapped_conn.getLibVersion().AndRaise(
libvirt.libvirtError("fake failure"))
libvirt.libvirtError.get_error_code().AndReturn(error)
@@ -3086,7 +3099,7 @@ class LibvirtConnTestCase(test.TestCase):
self.stubs.Set(conn, 'get_info', fake_get_info)
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
- # NOTE(vish): verifies destory doesn't raise if the instance disappears
+ # NOTE(vish): verifies destroy doesn't raise if the instance disappears
conn._destroy(instance)
def test_available_least_handles_missing(self):
@@ -3693,30 +3706,25 @@ class IptablesFirewallTestCase(test.TestCase):
fake.FakeVirtAPI(),
get_connection=lambda: self.fake_libvirt_connection)
- in_nat_rules = [
+ in_rules = [
'# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
'*nat',
':PREROUTING ACCEPT [1170:189210]',
':INPUT ACCEPT [844:71028]',
':OUTPUT ACCEPT [5149:405186]',
':POSTROUTING ACCEPT [5063:386098]',
- ]
-
- in_mangle_rules = [
- '# Generated by iptables-save v1.4.12 on Tue Dec 18 15:50:25 201;',
- '*mangle',
- ':PREROUTING ACCEPT [241:39722]',
- ':INPUT ACCEPT [230:39282]',
- ':FORWARD ACCEPT [0:0]',
- ':OUTPUT ACCEPT [266:26558]',
- ':POSTROUTING ACCEPT [267:26590]',
- '-A POSTROUTING -o virbr0 -p udp -m udp --dport 68 -j CHECKSUM '
- '--checksum-fill',
- 'COMMIT',
- '# Completed on Tue Dec 18 15:50:25 2012',
- ]
-
- in_filter_rules = [
+ '# Completed on Tue Dec 18 15:50:25 2012',
+ '# Generated by iptables-save v1.4.12 on Tue Dec 18 15:50:25 201;',
+ '*mangle',
+ ':PREROUTING ACCEPT [241:39722]',
+ ':INPUT ACCEPT [230:39282]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [266:26558]',
+ ':POSTROUTING ACCEPT [267:26590]',
+ '-A POSTROUTING -o virbr0 -p udp -m udp --dport 68 -j CHECKSUM '
+ '--checksum-fill',
+ 'COMMIT',
+ '# Completed on Tue Dec 18 15:50:25 2012',
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*filter',
':INPUT ACCEPT [969615:281627771]',
@@ -3811,15 +3819,11 @@ class IptablesFirewallTestCase(test.TestCase):
# self.fw.add_instance(instance_ref)
def fake_iptables_execute(*cmd, **kwargs):
process_input = kwargs.get('process_input', None)
- if cmd == ('ip6tables-save', '-c', '-t', 'filter'):
+ if cmd == ('ip6tables-save', '-c'):
return '\n'.join(self.in6_filter_rules), None
- if cmd == ('iptables-save', '-c', '-t', 'filter'):
- return '\n'.join(self.in_filter_rules), None
- if cmd == ('iptables-save', '-c', '-t', 'nat'):
- return '\n'.join(self.in_nat_rules), None
- if cmd == ('iptables-save', '-c', '-t', 'mangle'):
- return '\n'.join(self.in_mangle_rules), None
- if cmd == ('iptables-restore', '-c',):
+ if cmd == ('iptables-save', '-c'):
+ return '\n'.join(self.in_rules), None
+ if cmd == ('iptables-restore', '-c'):
lines = process_input.split('\n')
if '*filter' in lines:
self.out_rules = lines
@@ -3843,7 +3847,7 @@ class IptablesFirewallTestCase(test.TestCase):
self.fw.apply_instance_filter(instance_ref, network_info)
in_rules = filter(lambda l: not l.startswith('#'),
- self.in_filter_rules)
+ self.in_rules)
for rule in in_rules:
if not 'nova' in rule:
self.assertTrue(rule in self.out_rules,
@@ -4590,7 +4594,7 @@ class LibvirtDriverTestCase(test.TestCase):
pass
def fake_to_xml(instance, network_info, image_meta=None, rescue=None,
- block_device_info=None):
+ block_device_info=None, write_to_disk=False):
return ""
def fake_plug_vifs(instance, network_info):
diff --git a/nova/tests/test_migrations.py b/nova/tests/test_migrations.py
index abd04a641..a6c150971 100644
--- a/nova/tests/test_migrations.py
+++ b/nova/tests/test_migrations.py
@@ -47,7 +47,7 @@ def _get_connect_string(backend,
passwd="openstack_citest",
database="openstack_citest"):
"""
- Try to get a connection with a very specfic set of values, if we get
+ Try to get a connection with a very specific set of values, if we get
these then we'll run the tests, otherwise they are skipped
"""
if backend == "postgres":
@@ -195,7 +195,7 @@ class TestMigrations(test.TestCase):
"~/.pgpass && chmod 0600 ~/.pgpass" % locals())
execute_cmd(createpgpass)
# note(boris-42): We must create and drop database, we can't
- # drop database wich we have connected to, so for such
+ # drop database which we have connected to, so for such
# operations there is a special database template1.
sqlcmd = ("psql -w -U %(user)s -h %(host)s -c"
" '%(sql)s' -d template1")
@@ -222,7 +222,7 @@ class TestMigrations(test.TestCase):
if _is_backend_avail('mysql', user="openstack_cifail"):
self.fail("Shouldn't have connected")
- def test_mysql_innodb(self):
+ def test_mysql_opportunistically(self):
# Test that table creation on mysql only builds InnoDB tables
if not _is_backend_avail('mysql'):
self.skipTest("mysql not available")
@@ -233,6 +233,12 @@ class TestMigrations(test.TestCase):
self.engines["mysqlcitest"] = engine
self.test_databases["mysqlcitest"] = connect_string
+ # Test that we end in an innodb
+ self._check_mysql_innodb(engine)
+ # Test IP transition
+ self._check_mysql_migration_149(engine)
+
+ def _check_mysql_innodb(self, engine):
# build a fully populated mysql database with all the tables
self._reset_databases()
self._walk_versions(engine, False, False)
@@ -298,16 +304,8 @@ class TestMigrations(test.TestCase):
"AND column_name='cidr'").scalar())
connection.close()
- def test_migration_149_mysql(self):
+ def _check_mysql_migration_149(self, engine):
"""Test updating a table with IPAddress columns."""
- if not _have_mysql():
- self.skipTest("mysql not available")
-
- connect_string = _get_connect_string("mysql")
- engine = sqlalchemy.create_engine(connect_string)
- self.engines["mysqlcitest"] = engine
- self.test_databases["mysqlcitest"] = connect_string
-
self._reset_databases()
migration_api.version_control(engine, TestMigrations.REPOSITORY,
migration.INIT_VERSION)
diff --git a/nova/tests/test_nova_rootwrap.py b/nova/tests/test_nova_rootwrap.py
deleted file mode 100644
index 1029e0c2c..000000000
--- a/nova/tests/test_nova_rootwrap.py
+++ /dev/null
@@ -1,198 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import ConfigParser
-import logging
-import logging.handlers
-import os
-import subprocess
-
-from nova.rootwrap import filters
-from nova.rootwrap import wrapper
-from nova import test
-
-
-class RootwrapTestCase(test.TestCase):
-
- def setUp(self):
- super(RootwrapTestCase, self).setUp()
- self.filters = [
- filters.RegExpFilter("/bin/ls", "root", 'ls', '/[a-z]+'),
- filters.CommandFilter("/usr/bin/foo_bar_not_exist", "root"),
- filters.RegExpFilter("/bin/cat", "root", 'cat', '/[a-z]+'),
- filters.CommandFilter("/nonexistent/cat", "root"),
- filters.CommandFilter("/bin/cat", "root") # Keep this one last
- ]
-
- def test_RegExpFilter_match(self):
- usercmd = ["ls", "/root"]
- filtermatch = wrapper.match_filter(self.filters, usercmd)
- self.assertFalse(filtermatch is None)
- self.assertEqual(filtermatch.get_command(usercmd),
- ["/bin/ls", "/root"])
-
- def test_RegExpFilter_reject(self):
- usercmd = ["ls", "root"]
- self.assertRaises(wrapper.NoFilterMatched,
- wrapper.match_filter, self.filters, usercmd)
-
- def test_missing_command(self):
- valid_but_missing = ["foo_bar_not_exist"]
- invalid = ["foo_bar_not_exist_and_not_matched"]
- self.assertRaises(wrapper.FilterMatchNotExecutable,
- wrapper.match_filter, self.filters, valid_but_missing)
- self.assertRaises(wrapper.NoFilterMatched,
- wrapper.match_filter, self.filters, invalid)
-
- def _test_DnsmasqFilter(self, filter_class, config_file_arg):
- usercmd = ['env', config_file_arg + '=A', 'NETWORK_ID=foobar',
- 'dnsmasq', 'foo']
- f = filter_class("/usr/bin/dnsmasq", "root")
- self.assertTrue(f.match(usercmd))
- self.assertEqual(f.get_command(usercmd), ['/usr/bin/dnsmasq', 'foo'])
- env = f.get_environment(usercmd)
- self.assertEqual(env.get(config_file_arg), 'A')
- self.assertEqual(env.get('NETWORK_ID'), 'foobar')
-
- def test_DnsmasqFilter(self):
- self._test_DnsmasqFilter(filters.DnsmasqFilter, 'CONFIG_FILE')
-
- def test_DeprecatedDnsmasqFilter(self):
- self._test_DnsmasqFilter(filters.DeprecatedDnsmasqFilter, 'FLAGFILE')
-
- def test_KillFilter(self):
- if not os.path.exists("/proc/%d" % os.getpid()):
- self.skipTest("Test requires /proc filesystem (procfs)")
- p = subprocess.Popen(["cat"], stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- try:
- f = filters.KillFilter("root", "/bin/cat", "-9", "-HUP")
- f2 = filters.KillFilter("root", "/usr/bin/cat", "-9", "-HUP")
- usercmd = ['kill', '-ALRM', p.pid]
- # Incorrect signal should fail
- self.assertFalse(f.match(usercmd) or f2.match(usercmd))
- usercmd = ['kill', p.pid]
- # Providing no signal should fail
- self.assertFalse(f.match(usercmd) or f2.match(usercmd))
- # Providing matching signal should be allowed
- usercmd = ['kill', '-9', p.pid]
- self.assertTrue(f.match(usercmd) or f2.match(usercmd))
-
- f = filters.KillFilter("root", "/bin/cat")
- f2 = filters.KillFilter("root", "/usr/bin/cat")
- usercmd = ['kill', os.getpid()]
- # Our own PID does not match /bin/sleep, so it should fail
- self.assertFalse(f.match(usercmd) or f2.match(usercmd))
- usercmd = ['kill', 999999]
- # Nonexistent PID should fail
- self.assertFalse(f.match(usercmd) or f2.match(usercmd))
- usercmd = ['kill', p.pid]
- # Providing no signal should work
- self.assertTrue(f.match(usercmd) or f2.match(usercmd))
- finally:
- # Terminate the "cat" process and wait for it to finish
- p.terminate()
- p.wait()
-
- def test_KillFilter_no_raise(self):
- # Makes sure ValueError from bug 926412 is gone.
- f = filters.KillFilter("root", "")
- # Providing anything other than kill should be False
- usercmd = ['notkill', 999999]
- self.assertFalse(f.match(usercmd))
- # Providing something that is not a pid should be False
- usercmd = ['kill', 'notapid']
- self.assertFalse(f.match(usercmd))
-
- def test_KillFilter_deleted_exe(self):
- # Makes sure deleted exe's are killed correctly.
- # See bug #967931.
- def fake_readlink(blah):
- return '/bin/commandddddd (deleted)'
-
- f = filters.KillFilter("root", "/bin/commandddddd")
- usercmd = ['kill', 1234]
- # Providing no signal should work
- self.stubs.Set(os, 'readlink', fake_readlink)
- self.assertTrue(f.match(usercmd))
-
- def test_ReadFileFilter(self):
- goodfn = '/good/file.name'
- f = filters.ReadFileFilter(goodfn)
- usercmd = ['cat', '/bad/file']
- self.assertFalse(f.match(['cat', '/bad/file']))
- usercmd = ['cat', goodfn]
- self.assertEqual(f.get_command(usercmd), ['/bin/cat', goodfn])
- self.assertTrue(f.match(usercmd))
-
- def test_exec_dirs_search(self):
- # This test supposes you have /bin/cat or /usr/bin/cat locally
- f = filters.CommandFilter("cat", "root")
- usercmd = ['cat', '/f']
- self.assertTrue(f.match(usercmd))
- self.assertTrue(f.get_command(usercmd, exec_dirs=['/bin',
- '/usr/bin']) in (['/bin/cat', '/f'], ['/usr/bin/cat', '/f']))
-
- def test_skips(self):
- # Check that all filters are skipped and that the last matches
- usercmd = ["cat", "/"]
- filtermatch = wrapper.match_filter(self.filters, usercmd)
- self.assertTrue(filtermatch is self.filters[-1])
-
- def test_RootwrapConfig(self):
- raw = ConfigParser.RawConfigParser()
-
- # Empty config should raise ConfigParser.Error
- self.assertRaises(ConfigParser.Error, wrapper.RootwrapConfig, raw)
-
- # Check default values
- raw.set('DEFAULT', 'filters_path', '/a,/b')
- config = wrapper.RootwrapConfig(raw)
- self.assertEqual(config.filters_path, ['/a', '/b'])
- self.assertEqual(config.exec_dirs, os.environ["PATH"].split(':'))
- self.assertFalse(config.use_syslog)
- self.assertEqual(config.syslog_log_facility,
- logging.handlers.SysLogHandler.LOG_SYSLOG)
- self.assertEqual(config.syslog_log_level, logging.ERROR)
-
- # Check general values
- raw.set('DEFAULT', 'exec_dirs', '/a,/x')
- config = wrapper.RootwrapConfig(raw)
- self.assertEqual(config.exec_dirs, ['/a', '/x'])
-
- raw.set('DEFAULT', 'use_syslog', 'oui')
- self.assertRaises(ValueError, wrapper.RootwrapConfig, raw)
- raw.set('DEFAULT', 'use_syslog', 'true')
- config = wrapper.RootwrapConfig(raw)
- self.assertTrue(config.use_syslog)
-
- raw.set('DEFAULT', 'syslog_log_facility', 'moo')
- self.assertRaises(ValueError, wrapper.RootwrapConfig, raw)
- raw.set('DEFAULT', 'syslog_log_facility', 'local0')
- config = wrapper.RootwrapConfig(raw)
- self.assertEqual(config.syslog_log_facility,
- logging.handlers.SysLogHandler.LOG_LOCAL0)
- raw.set('DEFAULT', 'syslog_log_facility', 'LOG_AUTH')
- config = wrapper.RootwrapConfig(raw)
- self.assertEqual(config.syslog_log_facility,
- logging.handlers.SysLogHandler.LOG_AUTH)
-
- raw.set('DEFAULT', 'syslog_log_level', 'bar')
- self.assertRaises(ValueError, wrapper.RootwrapConfig, raw)
- raw.set('DEFAULT', 'syslog_log_level', 'INFO')
- config = wrapper.RootwrapConfig(raw)
- self.assertEqual(config.syslog_log_level, logging.INFO)
diff --git a/nova/tests/test_pipelib.py b/nova/tests/test_pipelib.py
index 85c2ca2cd..5cd715552 100644
--- a/nova/tests/test_pipelib.py
+++ b/nova/tests/test_pipelib.py
@@ -51,11 +51,11 @@ class PipelibTest(test.TestCase):
def test_setup_security_group(self):
group_name = "%s%s" % (self.project, CONF.vpn_key_suffix)
- # First attemp, does not exist (thus its created)
+ # First attempt, does not exist (thus its created)
res1_group = self.cloudpipe.setup_security_group(self.context)
self.assertEqual(res1_group, group_name)
- # Second attem, it exists in the DB
+ # Second attempt, it exists in the DB
res2_group = self.cloudpipe.setup_security_group(self.context)
self.assertEqual(res1_group, res2_group)
@@ -64,10 +64,10 @@ class PipelibTest(test.TestCase):
with utils.tempdir() as tmpdir:
self.flags(keys_path=tmpdir)
- # First attemp, key does not exist (thus it is generated)
+ # First attempt, key does not exist (thus it is generated)
res1_key = self.cloudpipe.setup_key_pair(self.context)
self.assertEqual(res1_key, key_name)
- # Second attem, it exists in the DB
+ # Second attempt, it exists in the DB
res2_key = self.cloudpipe.setup_key_pair(self.context)
self.assertEqual(res2_key, res1_key)
diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py
index 2c46b27bd..9eab72c5b 100644
--- a/nova/tests/test_utils.py
+++ b/nova/tests/test_utils.py
@@ -757,3 +757,24 @@ class LastBytesTestCase(test.TestCase):
content = '1234567890'
flo.write(content)
self.assertEqual((content, 0), utils.last_bytes(flo, 1000))
+
+
+class IntLikeTestCase(test.TestCase):
+
+ def test_is_int_like(self):
+ self.assertTrue(utils.is_int_like(1))
+ self.assertTrue(utils.is_int_like("1"))
+ self.assertTrue(utils.is_int_like("514"))
+ self.assertTrue(utils.is_int_like("0"))
+
+ self.assertFalse(utils.is_int_like(1.1))
+ self.assertFalse(utils.is_int_like("1.1"))
+ self.assertFalse(utils.is_int_like("1.1.1"))
+ self.assertFalse(utils.is_int_like(None))
+ self.assertFalse(utils.is_int_like("0."))
+ self.assertFalse(utils.is_int_like("aaaaaa"))
+ self.assertFalse(utils.is_int_like("...."))
+ self.assertFalse(utils.is_int_like("1g"))
+ self.assertFalse(
+ utils.is_int_like("0cc3346e-9fef-4445-abe6-5d2b2690ec64"))
+ self.assertFalse(utils.is_int_like("a1"))
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 0b1c5d0e7..067e28a13 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -1822,16 +1822,31 @@ class XenAPIBWCountersTestCase(stubs.XenAPITestBase):
# Consider abstracting common code in a base class for firewall driver testing.
class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase):
- _in_nat_rules = [
+ _in_rules = [
'# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
'*nat',
':PREROUTING ACCEPT [1170:189210]',
':INPUT ACCEPT [844:71028]',
':OUTPUT ACCEPT [5149:405186]',
':POSTROUTING ACCEPT [5063:386098]',
- ]
-
- _in_filter_rules = [
+ '# Completed on Mon Dec 6 11:54:13 2010',
+ '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
+ '*mangle',
+ ':INPUT ACCEPT [969615:281627771]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [915599:63811649]',
+ ':nova-block-ipv4 - [0:0]',
+ '[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
+ '[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
+ ',ESTABLISHED -j ACCEPT ',
+ '[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
+ '[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
+ '[0:0] -A FORWARD -o virbr0 -j REJECT '
+ '--reject-with icmp-port-unreachable ',
+ '[0:0] -A FORWARD -i virbr0 -j REJECT '
+ '--reject-with icmp-port-unreachable ',
+ 'COMMIT',
+ '# Completed on Mon Dec 6 11:54:13 2010',
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*filter',
':INPUT ACCEPT [969615:281627771]',
@@ -1916,7 +1931,7 @@ class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase):
def _validate_security_group(self):
in_rules = filter(lambda l: not l.startswith('#'),
- self._in_filter_rules)
+ self._in_rules)
for rule in in_rules:
if not 'nova' in rule:
self.assertTrue(rule in self._out_rules,
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index 85c85b5e2..fa214b23e 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -208,12 +208,10 @@ class FakeSessionForFirewallTests(FakeSessionForVMTests):
def __init__(self, uri, test_case=None):
super(FakeSessionForFirewallTests, self).__init__(uri)
- if hasattr(test_case, '_in_filter_rules'):
- self._in_filter_rules = test_case._in_filter_rules
+ if hasattr(test_case, '_in_rules'):
+ self._in_rules = test_case._in_rules
if hasattr(test_case, '_in6_filter_rules'):
self._in6_filter_rules = test_case._in6_filter_rules
- if hasattr(test_case, '_in_nat_rules'):
- self._in_nat_rules = test_case._in_nat_rules
self._test_case = test_case
def host_call_plugin(self, _1, _2, plugin, method, args):
@@ -230,12 +228,10 @@ class FakeSessionForFirewallTests(FakeSessionForVMTests):
else:
output = ''
process_input = args.get('process_input', None)
- if cmd == ['ip6tables-save', '-c', '-t', 'filter']:
+ if cmd == ['ip6tables-save', '-c']:
output = '\n'.join(self._in6_filter_rules)
- if cmd == ['iptables-save', '-c', '-t', 'filter']:
- output = '\n'.join(self._in_filter_rules)
- if cmd == ['iptables-save', '-c', '-t', 'nat']:
- output = '\n'.join(self._in_nat_rules)
+ if cmd == ['iptables-save', '-c']:
+ output = '\n'.join(self._in_rules)
if cmd == ['iptables-restore', '-c', ]:
lines = process_input.split('\n')
if '*filter' in lines:
diff --git a/nova/utils.py b/nova/utils.py
index 115791b64..75cba0a7c 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -859,6 +859,14 @@ def bool_from_str(val):
val.lower() == 'y'
+def is_int_like(val):
+ """Check if a value looks like an int."""
+ try:
+ return str(int(val)) == str(val)
+ except Exception:
+ return False
+
+
def is_valid_boolstr(val):
"""Check if the provided string is a valid bool string or not."""
val = str(val).lower()
diff --git a/nova/virt/baremetal/net-dhcp.ubuntu.template b/nova/virt/baremetal/net-dhcp.ubuntu.template
index e8824a88d..34a9e8be7 100644
--- a/nova/virt/baremetal/net-dhcp.ubuntu.template
+++ b/nova/virt/baremetal/net-dhcp.ubuntu.template
@@ -10,9 +10,6 @@ iface lo inet loopback
#for $ifc in $interfaces
auto ${ifc.name}
iface ${ifc.name} inet dhcp
-#if $ifc.hwaddress
- hwaddress ether ${ifc.hwaddress}
-#end if
#if $use_ipv6
iface ${ifc.name} inet6 dhcp
diff --git a/nova/virt/baremetal/net-static.ubuntu.template b/nova/virt/baremetal/net-static.ubuntu.template
index f14f0ce8c..1fe5a1ab8 100644
--- a/nova/virt/baremetal/net-static.ubuntu.template
+++ b/nova/virt/baremetal/net-static.ubuntu.template
@@ -16,9 +16,6 @@ iface ${ifc.name} inet static
#if $ifc.dns
dns-nameservers ${ifc.dns}
#end if
-#if $ifc.hwaddress
- hwaddress ether ${ifc.hwaddress}
-#end if
#if $use_ipv6
iface ${ifc.name} inet6 static
diff --git a/nova/virt/baremetal/pxe.py b/nova/virt/baremetal/pxe.py
index b94ac9032..0daac1d46 100644
--- a/nova/virt/baremetal/pxe.py
+++ b/nova/virt/baremetal/pxe.py
@@ -121,7 +121,6 @@ def build_network_config(network_info):
gateway_v6 = mapping['gateway_v6']
interface = {
'name': 'eth%d' % id,
- 'hwaddress': mapping['mac'],
'address': mapping['ips'][0]['ip'],
'gateway': mapping['gateway'],
'netmask': mapping['ips'][0]['netmask'],
@@ -219,7 +218,7 @@ def get_tftp_image_info(instance):
missing_labels = []
for label in image_info.keys():
(uuid, path) = image_info[label]
- if uuid is None:
+ if not uuid:
missing_labels.append(label)
else:
image_info[label][1] = os.path.join(CONF.baremetal.tftp_root,
@@ -238,27 +237,12 @@ class PXE(base.NodeDriver):
super(PXE, self).__init__()
def _collect_mac_addresses(self, context, node):
- macs = []
- macs.append(db.bm_node_get(context, node['id'])['prov_mac_address'])
+ macs = set()
+ macs.add(db.bm_node_get(context, node['id'])['prov_mac_address'])
for nic in db.bm_interface_get_all_by_bm_node_id(context, node['id']):
if nic['address']:
- macs.append(nic['address'])
- macs.sort()
- return macs
-
- def _generate_udev_rules(self, context, node):
- # TODO(deva): fix assumption that device names begin with "eth"
- # and fix assumption of ordering
- macs = self._collect_mac_addresses(context, node)
- rules = ''
- for (i, mac) in enumerate(macs):
- rules += 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ' \
- 'ATTR{address}=="%(mac)s", ATTR{dev_id}=="0x0", ' \
- 'ATTR{type}=="1", KERNEL=="eth*", NAME="%(name)s"\n' \
- % {'mac': mac.lower(),
- 'name': 'eth%d' % i,
- }
- return rules
+ macs.add(nic['address'])
+ return sorted(macs)
def _cache_tftp_images(self, context, instance, image_info):
"""Fetch the necessary kernels and ramdisks for the instance."""
@@ -330,9 +314,6 @@ class PXE(base.NodeDriver):
injected_files = []
net_config = build_network_config(network_info)
- udev_rules = self._generate_udev_rules(context, node)
- injected_files.append(
- ('/etc/udev/rules.d/70-persistent-net.rules', udev_rules))
if instance['hostname']:
injected_files.append(('/etc/hostname', instance['hostname']))
@@ -385,7 +366,6 @@ class PXE(base.NodeDriver):
config
./pxelinux.cfg/
{mac} -> ../{uuid}/config
-
"""
image_info = get_tftp_image_info(instance)
(root_mb, swap_mb) = get_partition_sizes(instance)
diff --git a/nova/virt/baremetal/volume_driver.py b/nova/virt/baremetal/volume_driver.py
index 2e6f82b93..0a05dfedd 100644
--- a/nova/virt/baremetal/volume_driver.py
+++ b/nova/virt/baremetal/volume_driver.py
@@ -31,7 +31,7 @@ opts = [
cfg.BoolOpt('use_unsafe_iscsi',
default=False,
help='Do not set this out of dev/test environments. '
- 'If a node does not have an fixed PXE IP address, '
+ 'If a node does not have a fixed PXE IP address, '
'volumes are exported with globally opened ACL'),
cfg.StrOpt('iscsi_iqn_prefix',
default='iqn.2010-10.org.openstack.baremetal',
diff --git a/nova/virt/disk/api.py b/nova/virt/disk/api.py
index 26fb86f1e..d080f6d36 100644
--- a/nova/virt/disk/api.py
+++ b/nova/virt/disk/api.py
@@ -96,9 +96,13 @@ def mkfs(os_type, fs_label, target):
utils.execute(*mkfs_command.split())
-def resize2fs(image, check_exit_code=False):
- utils.execute('e2fsck', '-fp', image, check_exit_code=check_exit_code)
- utils.execute('resize2fs', image, check_exit_code=check_exit_code)
+def resize2fs(image, check_exit_code=False, run_as_root=False):
+ utils.execute('e2fsck', '-fp', image,
+ check_exit_code=check_exit_code,
+ run_as_root=run_as_root)
+ utils.execute('resize2fs', image,
+ check_exit_code=check_exit_code,
+ run_as_root=run_as_root)
def get_disk_size(path):
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index aa0439e74..747b60714 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -49,6 +49,17 @@ CONF.register_opts(driver_opts)
LOG = logging.getLogger(__name__)
+def driver_dict_from_config(named_driver_config, *args, **kwargs):
+ driver_registry = dict()
+
+ for driver_str in named_driver_config:
+ driver_type, _sep, driver = driver_str.partition('=')
+ driver_class = importutils.import_class(driver)
+ driver_registry[driver_type] = driver_class(*args, **kwargs)
+
+ return driver_registry
+
+
def block_device_info_get_root(block_device_info):
block_device_info = block_device_info or {}
return block_device_info.get('root_device_name')
@@ -447,7 +458,8 @@ class ComputeDriver(object):
def post_live_migration_at_destination(self, ctxt, instance_ref,
network_info,
- block_migration=False):
+ block_migration=False,
+ block_device_info=None):
"""Post operation of live migration at destination host.
:param ctxt: security context
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 338d1dec1..04eeded72 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -166,6 +166,12 @@ class FakeDriver(driver.ComputeDriver):
block_device_info=None):
pass
+ def post_live_migration_at_destination(self, context, instance,
+ network_info,
+ block_migration=False,
+ block_device_info=None):
+ pass
+
def power_off(self, instance):
pass
diff --git a/nova/virt/firewall.py b/nova/virt/firewall.py
index bbc6034bd..ad38cd9a4 100644
--- a/nova/virt/firewall.py
+++ b/nova/virt/firewall.py
@@ -146,7 +146,7 @@ class IptablesFirewallDriver(FirewallDriver):
self.iptables = linux_net.iptables_manager
self.instances = {}
self.network_infos = {}
- self.basicly_filtered = False
+ self.basically_filtered = False
self.iptables.ipv4['filter'].add_chain('sg-fallback')
self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP')
diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py
index 799ef7172..9316b2598 100644
--- a/nova/virt/hyperv/driver.py
+++ b/nova/virt/hyperv/driver.py
@@ -164,7 +164,7 @@ class HyperVDriver(driver.ComputeDriver):
block_device_info, network_info)
def post_live_migration_at_destination(self, ctxt, instance_ref,
- network_info, block_migration):
+ network_info, block_migration, block_device_info=None):
self._livemigrationops.post_live_migration_at_destination(ctxt,
instance_ref, network_info, block_migration)
diff --git a/nova/virt/hyperv/volumeops.py b/nova/virt/hyperv/volumeops.py
index 200236233..b69cf7bf1 100644
--- a/nova/virt/hyperv/volumeops.py
+++ b/nova/virt/hyperv/volumeops.py
@@ -37,7 +37,7 @@ hyper_volumeops_opts = [
help='The number of times we retry on attaching volume '),
cfg.IntOpt('hyperv_wait_between_attach_retry',
default=5,
- help='The seconds to wait between an volume attachment attempt'),
+ help='The seconds to wait between a volume attachment attempt'),
cfg.BoolOpt('force_volumeutils_v1',
default=False,
help='Force volumeutils v1'),
@@ -183,7 +183,7 @@ class VolumeOps(baseops.BaseOps):
"SELECT * FROM Msvm_ResourceAllocationSettingData \
WHERE ResourceSubType LIKE 'Microsoft Physical Disk Drive'\
AND Parent = '" + scsi_controller.path_() + "'")
- #Slots starts from 0, so the lenght of the disks gives us the free slot
+ #Slots starts from 0, so the length of the disks gives us the free slot
return len(volumes)
def detach_volume(self, connection_info, instance_name, mountpoint):
diff --git a/nova/virt/images.py b/nova/virt/images.py
index f80c19999..018badecf 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -123,7 +123,7 @@ class QemuImgInfo(object):
if len(line_pieces) != 6:
break
else:
- # Check against this pattern occuring in the final position
+ # Check against this pattern in the final position
# "%02d:%02d:%02d.%03d"
date_pieces = line_pieces[5].split(":")
if len(date_pieces) != 3:
@@ -175,7 +175,7 @@ class QemuImgInfo(object):
def qemu_img_info(path):
- """Return a object containing the parsed output from qemu-img info."""
+ """Return an object containing the parsed output from qemu-img info."""
if not os.path.exists(path):
return QemuImgInfo()
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index a10dc6f2f..e4da5cbde 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -277,17 +277,17 @@ class LibvirtDriver(driver.ComputeDriver):
self._host_state = None
self._initiator = None
self._wrapped_conn = None
+ self._caps = None
self.read_only = read_only
self.firewall_driver = firewall.load_driver(
DEFAULT_FIREWALL_DRIVER,
self.virtapi,
get_connection=self._get_connection)
self.vif_driver = importutils.import_object(CONF.libvirt_vif_driver)
- self.volume_drivers = {}
- for driver_str in CONF.libvirt_volume_drivers:
- driver_type, _sep, driver = driver_str.partition('=')
- driver_class = importutils.import_class(driver)
- self.volume_drivers[driver_type] = driver_class(self)
+
+ self.volume_drivers = driver.driver_dict_from_config(
+ CONF.libvirt_volume_drivers, self)
+
self._host_state = None
disk_prefix_map = {"lxc": "", "uml": "ubd", "xen": "sd"}
@@ -362,7 +362,7 @@ class LibvirtDriver(driver.ComputeDriver):
def _test_connection(self):
try:
- self._wrapped_conn.getCapabilities()
+ self._wrapped_conn.getLibVersion()
return True
except libvirt.libvirtError as e:
if (e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and
@@ -426,10 +426,10 @@ class LibvirtDriver(driver.ComputeDriver):
"""Efficient override of base instance_exists method."""
return self._conn.numOfDomains()
- def instance_exists(self, instance_id):
+ def instance_exists(self, instance_name):
"""Efficient override of base instance_exists method."""
try:
- self._lookup_by_name(instance_id)
+ self._lookup_by_name(instance_name)
return True
except exception.NovaException:
return False
@@ -706,7 +706,8 @@ class LibvirtDriver(driver.ComputeDriver):
if child.get('dev') == device:
return etree.tostring(node)
- def _get_domain_xml(self, instance, network_info, block_device_info=None):
+ def _get_existing_domain_xml(self, instance, network_info,
+ block_device_info=None):
try:
virt_dom = self._lookup_by_name(instance['name'])
xml = virt_dom.XMLDesc(0)
@@ -807,7 +808,7 @@ class LibvirtDriver(driver.ComputeDriver):
# NOTE(dkang): managedSave does not work for LXC
if CONF.libvirt_type != 'lxc':
- if state == power_state.RUNNING:
+ if state == power_state.RUNNING or state == power_state.PAUSED:
virt_dom.managedSave(0)
# Make the snapshot
@@ -831,6 +832,9 @@ class LibvirtDriver(driver.ComputeDriver):
if CONF.libvirt_type != 'lxc':
if state == power_state.RUNNING:
self._create_domain(domain=virt_dom)
+ elif state == power_state.PAUSED:
+ self._create_domain(domain=virt_dom,
+ launch_flags=libvirt.VIR_DOMAIN_START_PAUSED)
# Upload that image to the image service
@@ -854,8 +858,7 @@ class LibvirtDriver(driver.ComputeDriver):
else:
LOG.warn(_("Failed to soft reboot instance."),
instance=instance)
- return self._hard_reboot(instance, network_info,
- block_device_info=block_device_info)
+ return self._hard_reboot(instance, network_info, block_device_info)
def _soft_reboot(self, instance):
"""Attempt to shutdown and restart the instance gracefully.
@@ -894,8 +897,7 @@ class LibvirtDriver(driver.ComputeDriver):
greenthread.sleep(1)
return False
- def _hard_reboot(self, instance, network_info, xml=None,
- block_device_info=None):
+ def _hard_reboot(self, instance, network_info, block_device_info=None):
"""Reboot a virtual machine, given an instance reference.
Performs a Libvirt reset (if supported) on the domain.
@@ -908,11 +910,10 @@ class LibvirtDriver(driver.ComputeDriver):
existing domain.
"""
- if not xml:
- xml = self._get_domain_xml(instance, network_info,
- block_device_info)
-
self._destroy(instance)
+ xml = self.to_xml(instance, network_info,
+ block_device_info=block_device_info,
+ write_to_disk=True)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
@@ -957,17 +958,37 @@ class LibvirtDriver(driver.ComputeDriver):
def resume(self, instance, network_info, block_device_info=None):
"""resume the specified instance."""
- xml = self._get_domain_xml(instance, network_info, block_device_info)
+ xml = self._get_existing_domain_xml(instance, network_info,
+ block_device_info)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
- xml = self._get_domain_xml(instance, network_info, block_device_info)
+ xml = self._get_existing_domain_xml(instance, network_info,
+ block_device_info)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
+ # Check if the instance is running already and avoid doing
+ # anything if it is.
+ if self.instance_exists(instance['name']):
+ domain = self._lookup_by_name(instance['name'])
+ state = LIBVIRT_POWER_STATE[domain.info()[0]]
+
+ ignored_states = (power_state.RUNNING,
+ power_state.SUSPENDED,
+ power_state.PAUSED)
+
+ if state in ignored_states:
+ return
+
+ # Instance is not up and could be in an unknown state.
+ # Be as absolute as possible about getting it back into
+ # a known and running state.
+ self._hard_reboot(instance, network_info, block_device_info)
+
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Loads a VM using rescue images.
@@ -979,7 +1000,7 @@ class LibvirtDriver(driver.ComputeDriver):
"""
instance_dir = libvirt_utils.get_instance_path(instance)
- unrescue_xml = self._get_domain_xml(instance, network_info)
+ unrescue_xml = self._get_existing_domain_xml(instance, network_info)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml)
@@ -1443,11 +1464,11 @@ class LibvirtDriver(driver.ComputeDriver):
def get_host_capabilities(self):
"""Returns an instance of config.LibvirtConfigCaps representing
the capabilities of the host"""
- xmlstr = self._conn.getCapabilities()
-
- caps = vconfig.LibvirtConfigCaps()
- caps.parse_str(xmlstr)
- return caps
+ if not self._caps:
+ xmlstr = self._conn.getCapabilities()
+ self._caps = vconfig.LibvirtConfigCaps()
+ self._caps.parse_str(xmlstr)
+ return self._caps
def get_host_uuid(self):
"""Returns a UUID representing the host."""
@@ -1645,7 +1666,7 @@ class LibvirtDriver(driver.ComputeDriver):
diskconfig.driver_format = "raw"
diskconfig.driver_cache = self.disk_cachemode
diskconfig.source_path = os.path.join(
- libvirt_utils.get_instance_path(instane), "disk.config")
+ libvirt_utils.get_instance_path(instance), "disk.config")
diskconfig.target_dev = self.default_last_device
diskconfig.target_bus = default_disk_bus
devices.append(diskconfig)
@@ -1854,11 +1875,18 @@ class LibvirtDriver(driver.ComputeDriver):
return guest
def to_xml(self, instance, network_info, image_meta=None, rescue=None,
- block_device_info=None):
+ block_device_info=None, write_to_disk=False):
LOG.debug(_('Starting toXML method'), instance=instance)
conf = self.get_guest_config(instance, network_info, image_meta,
rescue, block_device_info)
xml = conf.to_xml()
+
+ if write_to_disk:
+ instance_dir = os.path.join(CONF.instances_path,
+ instance["name"])
+ xml_path = os.path.join(instance_dir, 'libvirt.xml')
+ libvirt_utils.write_to_file(xml_path, xml)
+
LOG.debug(_('Finished toXML method'), instance=instance)
return xml
@@ -1994,7 +2022,7 @@ class LibvirtDriver(driver.ComputeDriver):
def get_interfaces(self, xml):
"""
- Note that this function takes an domain xml.
+ Note that this function takes a domain xml.
Returns a list of all network interfaces for this instance.
"""
@@ -2760,28 +2788,24 @@ class LibvirtDriver(driver.ComputeDriver):
def post_live_migration_at_destination(self, ctxt,
instance_ref,
network_info,
- block_migration):
+ block_migration,
+ block_device_info=None):
"""Post operation of live migration at destination host.
:param ctxt: security context
:param instance_ref:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
- :param network_info: instance network infomation
+ :param network_info: instance network information
:param block_migration: if true, post operation of block_migraiton.
"""
# Define migrated instance, otherwise, suspend/destroy does not work.
dom_list = self._conn.listDefinedDomains()
if instance_ref["name"] not in dom_list:
- instance_dir = libvirt_utils.get_instance_path(instance_ref)
- xml_path = os.path.join(instance_dir, 'libvirt.xml')
# In case of block migration, destination does not have
# libvirt.xml
- if not os.path.isfile(xml_path):
- xml = self.to_xml(instance_ref, network_info=network_info)
- f = open(os.path.join(instance_dir, 'libvirt.xml'), 'w+')
- f.write(xml)
- f.close()
+ self.to_xml(instance_ref, network_info, block_device_info,
+ write_to_disk=True)
# libvirt.xml should be made by to_xml(), but libvirt
# does not accept to_xml() result, since uuid is not
# included in to_xml() result.
diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py
index c47056ff2..3323b8f1d 100644
--- a/nova/virt/libvirt/firewall.py
+++ b/nova/virt/libvirt/firewall.py
@@ -228,11 +228,11 @@ class IptablesFirewallDriver(base_firewall.IptablesFirewallDriver):
def setup_basic_filtering(self, instance, network_info):
"""Set up provider rules and basic NWFilter."""
self.nwfilter.setup_basic_filtering(instance, network_info)
- if not self.basicly_filtered:
+ if not self.basically_filtered:
LOG.debug(_('iptables firewall: Setup Basic Filtering'),
instance=instance)
self.refresh_provider_fw_rules()
- self.basicly_filtered = True
+ self.basically_filtered = True
def apply_instance_filter(self, instance, network_info):
"""No-op. Everything is done in prepare_instance_filter."""
diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py
index d272e408c..0815c142f 100644
--- a/nova/virt/libvirt/imagebackend.py
+++ b/nova/virt/libvirt/imagebackend.py
@@ -228,7 +228,7 @@ class Lvm(Image):
cmd = ('dd', 'if=%s' % base, 'of=%s' % self.path, 'bs=4M')
utils.execute(*cmd, run_as_root=True)
if resize:
- disk.resize2fs(self.path)
+ disk.resize2fs(self.path, run_as_root=True)
generated = 'ephemeral_size' in kwargs
diff --git a/nova/virt/libvirt/imagecache.py b/nova/virt/libvirt/imagecache.py
index 50fac9bb4..8f677b482 100644
--- a/nova/virt/libvirt/imagecache.py
+++ b/nova/virt/libvirt/imagecache.py
@@ -77,7 +77,7 @@ CONF.import_opt('instances_path', 'nova.compute.manager')
def get_info_filename(base_path):
- """Construct a filename for storing addtional information about a base
+ """Construct a filename for storing additional information about a base
image.
Returns a filename.
diff --git a/nova/virt/powervm/operator.py b/nova/virt/powervm/operator.py
index b25a96159..5a4a2938b 100644
--- a/nova/virt/powervm/operator.py
+++ b/nova/virt/powervm/operator.py
@@ -55,7 +55,7 @@ def get_powervm_disk_adapter():
class PowerVMOperator(object):
"""PowerVM main operator.
- The PowerVMOperator is intented to wrapper all operations
+ The PowerVMOperator is intended to wrap all operations
from the driver and handle either IVM or HMC managed systems.
"""
diff --git a/nova/virt/vmwareapi/network_util.py b/nova/virt/vmwareapi/network_util.py
index a3b20137d..d2bdad0c1 100644
--- a/nova/virt/vmwareapi/network_util.py
+++ b/nova/virt/vmwareapi/network_util.py
@@ -38,7 +38,7 @@ def get_network_with_the_name(session, network_name="vmnet0"):
vm_networks_ret = hostsystems[0].propSet[0].val
# Meaning there are no networks on the host. suds responds with a ""
# in the parent property field rather than a [] in the
- # ManagedObjectRefernce property field of the parent
+ # ManagedObjectReference property field of the parent
if not vm_networks_ret:
return None
vm_networks = vm_networks_ret.ManagedObjectReference
diff --git a/nova/virt/xenapi/agent.py b/nova/virt/xenapi/agent.py
index 61cfa9631..ef08edbc1 100644
--- a/nova/virt/xenapi/agent.py
+++ b/nova/virt/xenapi/agent.py
@@ -21,6 +21,9 @@ import os
import time
import uuid
+from nova.api.metadata import password
+from nova import context
+from nova import crypto
from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
@@ -207,6 +210,12 @@ class XenAPIBasedAgent(object):
LOG.error(msg, instance=self.instance)
raise Exception(msg)
+ sshkey = self.instance.get('key_data')
+ if sshkey:
+ enc = crypto.ssh_encrypt_text(sshkey, new_pass)
+ password.set_password(context.get_admin_context(),
+ self.instance['uuid'], base64.b64encode(enc))
+
return resp['message']
def inject_file(self, path, contents):
diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py
index 0acc360e8..a894e95b9 100644
--- a/nova/virt/xenapi/driver.py
+++ b/nova/virt/xenapi/driver.py
@@ -499,14 +499,15 @@ class XenAPIDriver(driver.ComputeDriver):
pass
def post_live_migration_at_destination(self, ctxt, instance_ref,
- network_info, block_migration):
+ network_info, block_migration,
+ block_device_info=None):
"""Post operation of live migration at destination host.
:params ctxt: security context
:params instance_ref:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
- :params network_info: instance network infomation
+ :params network_info: instance network information
:params : block_migration: if true, post operation of block_migraiton.
"""
# TODO(JohnGarbutt) look at moving/downloading ramdisk and kernel
diff --git a/nova/virt/xenapi/pool_states.py b/nova/virt/xenapi/pool_states.py
index 5bf326117..138f84831 100644
--- a/nova/virt/xenapi/pool_states.py
+++ b/nova/virt/xenapi/pool_states.py
@@ -19,10 +19,10 @@
A pool may be 'created', in which case the admin has triggered its
creation, but the underlying hypervisor pool has not actually being set up
-yet. An pool may be 'changing', meaning that the underlying hypervisor
-pool is being setup. An pool may be 'active', in which case the underlying
-hypervisor pool is up and running. An pool may be 'dismissed' when it has
-no hosts and it has been deleted. An pool may be in 'error' in all other
+yet. A pool may be 'changing', meaning that the underlying hypervisor
+pool is being setup. A pool may be 'active', in which case the underlying
+hypervisor pool is up and running. A pool may be 'dismissed' when it has
+no hosts and it has been deleted. A pool may be in 'error' in all other
cases.
A 'created' pool becomes 'changing' during the first request of
adding a host. During a 'changing' status no other requests will be accepted;
@@ -34,7 +34,7 @@ All other operations (e.g. add/remove hosts) that succeed will keep the
pool in the 'active' state. If a number of continuous requests fail,
an 'active' pool goes into an 'error' state. To recover from such a state,
admin intervention is required. Currently an error state is irreversible,
-that is, in order to recover from it an pool must be deleted.
+that is, in order to recover from it a pool must be deleted.
"""
CREATED = 'created'
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index debba4f02..52a5f37b2 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -1510,7 +1510,7 @@ def fetch_bandwidth(session):
def compile_metrics(start_time, stop_time=None):
"""Compile bandwidth usage, cpu, and disk metrics for all VMs on
this host.
- Note that some stats, like bandwith, do not seem to be very
+ Note that some stats, like bandwidth, do not seem to be very
accurate in some of the data from XenServer (mdragon). """
start_time = int(start_time)
diff --git a/nova/volume/cinder.py b/nova/volume/cinder.py
index fccdedac8..3e1ccc66b 100644
--- a/nova/volume/cinder.py
+++ b/nova/volume/cinder.py
@@ -48,6 +48,9 @@ cinder_opts = [
cfg.IntOpt('cinder_http_retries',
default=3,
help='Number of cinderclient retries on failed http calls'),
+ cfg.BoolOpt('cinder_api_insecure',
+ default=False,
+ help='Allow to perform insecure SSL requests to cinder'),
]
CONF = cfg.CONF
@@ -88,6 +91,7 @@ def cinderclient(context):
context.auth_token,
project_id=context.project_id,
auth_url=url,
+ insecure=CONF.cinder_api_insecure,
retries=CONF.cinder_http_retries)
# noauth extracts user_id:project_id from auth_token
c.client.auth_token = context.auth_token or '%s:%s' % (context.user_id,
diff --git a/openstack-common.conf b/openstack-common.conf
index ea33ab235..a0b14e651 100644
--- a/openstack-common.conf
+++ b/openstack-common.conf
@@ -1,7 +1,7 @@
[DEFAULT]
# The list of modules to copy from openstack-common
-modules=cfg,cliutils,context,excutils,eventlet_backdoor,fileutils,gettextutils,importutils,iniparser,jsonutils,local,lockutils,log,network_utils,notifier,plugin,policy,setup,timeutils,rpc,uuidutils
+modules=cfg,cliutils,context,excutils,eventlet_backdoor,fileutils,gettextutils,importutils,iniparser,jsonutils,local,lockutils,log,network_utils,notifier,plugin,policy,rootwrap,setup,timeutils,rpc,uuidutils
# The base module to hold the copy of openstack.common
base=nova
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration
index 35316a9b8..b9e9da2e2 100755
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration
@@ -16,7 +16,7 @@
# under the License.
"""
-XenAPI Plugin for transfering data between host nodes
+XenAPI Plugin for transferring data between host nodes
"""
import utils
diff --git a/run_tests.sh b/run_tests.sh
index 3a579ca36..238f5e194 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -81,15 +81,19 @@ function run_tests {
if [ $coverage -eq 1 ]; then
# Do not test test_coverage_ext when gathering coverage.
if [ "x$testrargs" = "x" ]; then
- testrargs="^(?!.*test_coverage_ext).*$"
+ testrargs="^(?!.*test.*coverage).*$"
fi
- export PYTHON="${wrapper} coverage run --source nova --parallel-mode"
+ TESTRTESTS="$TESTRTESTS --coverage"
+ else
+ TESTRTESTS="$TESTRTESTS --slowest"
fi
+
# Just run the test suites in current environment
set +e
- TESTRTESTS="$TESTRTESTS $testrargs"
+ testrargs=`echo "$testrargs" | sed -e's/^\s*\(.*\)\s*$/\1/'`
+ TESTRTESTS="$TESTRTESTS --testr-args='$testrargs'"
echo "Running \`${wrapper} $TESTRTESTS\`"
- ${wrapper} $TESTRTESTS
+ bash -c "${wrapper} $TESTRTESTS"
RESULT=$?
set -e
@@ -121,7 +125,7 @@ function run_pep8 {
srcfiles+=" setup.py"
# Until all these issues get fixed, ignore.
- ignore='--ignore=E12,E711,E721,E712'
+ ignore='--ignore=E12,E711,E721,E712,N403,N404'
# First run the hacking selftest, to make sure it's right
echo "Running hacking.py self test"
@@ -143,7 +147,7 @@ function run_pep8 {
}
-TESTRTESTS="testr run --parallel $testropts"
+TESTRTESTS="python setup.py testr $testropts"
if [ $never_venv -eq 0 ]
then
diff --git a/tools/conf/extract_opts.py b/tools/conf/extract_opts.py
index 3185cb93d..4dde53335 100644
--- a/tools/conf/extract_opts.py
+++ b/tools/conf/extract_opts.py
@@ -2,7 +2,6 @@
# Copyright 2012 SINA Corporation
# All Rights Reserved.
-# Author: Zhongyue Luo <lzyeval@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -15,6 +14,9 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+#
+# @author: Zhongyue Luo, SINA Corporation.
+#
"""Extracts OpenStack config option info from module(s)."""
@@ -35,6 +37,15 @@ FLOATOPT = "FloatOpt"
LISTOPT = "ListOpt"
MULTISTROPT = "MultiStrOpt"
+OPT_TYPES = {
+ STROPT: 'string value',
+ BOOLOPT: 'boolean value',
+ INTOPT: 'integer value',
+ FLOATOPT: 'floating point value',
+ LISTOPT: 'list value',
+ MULTISTROPT: 'multi valued',
+}
+
OPTION_COUNT = 0
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
FLOATOPT, LISTOPT,
@@ -63,10 +74,6 @@ def main(srcfiles):
# The options list is a list of (module, options) tuples
opts_by_group = {'DEFAULT': []}
- opts_by_group['DEFAULT'].append(
- (cfg.__name__ + ':' + cfg.CommonConfigOpts.__name__,
- _list_opts(cfg.CommonConfigOpts)[0][1]))
-
for pkg_name in pkg_names:
mods = mods_by_pkg.get(pkg_name)
mods.sort()
@@ -187,33 +194,19 @@ def _get_my_ip():
return None
-MY_IP = _get_my_ip()
-HOST = socket.getfqdn()
-
-
def _sanitize_default(s):
"""Set up a reasonably sensible default for pybasedir, my_ip and host."""
if s.startswith(BASEDIR):
return s.replace(BASEDIR, '/usr/lib/python/site-packages')
- elif s == MY_IP:
+ elif s == _get_my_ip():
return '10.0.0.1'
- elif s == HOST:
+ elif s == socket.getfqdn():
return 'nova'
elif s.strip() != s:
return '"%s"' % s
return s
-OPT_TYPES = {
- 'StrOpt': 'string value',
- 'BoolOpt': 'boolean value',
- 'IntOpt': 'integer value',
- 'FloatOpt': 'floating point value',
- 'ListOpt': 'list value',
- 'MultiStrOpt': 'multi valued',
-}
-
-
def _print_opt(opt):
opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help
if not opt_help:
diff --git a/tools/hacking.py b/tools/hacking.py
index ed22956eb..56f6694bd 100755
--- a/tools/hacking.py
+++ b/tools/hacking.py
@@ -21,7 +21,6 @@
built on top of pep8.py
"""
-import fnmatch
import inspect
import logging
import os
@@ -46,16 +45,15 @@ logging.disable('LOG')
#N8xx git commit messages
IMPORT_EXCEPTIONS = ['sqlalchemy', 'migrate', 'nova.db.sqlalchemy.session']
-DOCSTRING_TRIPLE = ['"""', "'''"]
+START_DOCSTRING_TRIPLE = ['u"""', 'r"""', '"""', "u'''", "r'''", "'''"]
+END_DOCSTRING_TRIPLE = ['"""', "'''"]
VERBOSE_MISSING_IMPORT = os.getenv('HACKING_VERBOSE_MISSING_IMPORT', 'False')
# Monkey patch broken excluded filter in pep8
# See https://github.com/jcrocholl/pep8/pull/111
def excluded(self, filename):
- """
- Check if options.exclude contains a pattern that matches filename.
- """
+ """Check if options.exclude contains a pattern that matches filename."""
basename = os.path.basename(filename)
return any((pep8.filename_match(filename, self.options.exclude,
default=False),
@@ -120,7 +118,7 @@ def nova_todo_format(physical_line):
pos2 = physical_line.find('#') # make sure it's a comment
# TODO(sdague): should be smarter on this test
this_test = physical_line.find('N101: #TODO fail')
- if (pos != pos1 and pos2 >= 0 and pos2 < pos and this_test == -1):
+ if pos != pos1 and pos2 >= 0 and pos2 < pos and this_test == -1:
return pos, "N101: Use TODO(NAME)"
@@ -187,7 +185,8 @@ def nova_import_module_only(logical_line):
# TODO(sdague) actually get these tests working
def importModuleCheck(mod, parent=None, added=False):
- """
+ """Import Module helper function.
+
If can't find module on first try, recursively check for relative
imports
"""
@@ -258,8 +257,7 @@ def nova_import_module_only(logical_line):
def nova_import_alphabetical(logical_line, blank_lines, previous_logical,
indent_level, previous_indent_level):
- r"""
- Check for imports in alphabetical order.
+ r"""Check for imports in alphabetical order.
nova HACKING guide recommendation for imports:
imports in human alphabetical order
@@ -294,6 +292,11 @@ def nova_import_no_db_in_virt(logical_line, filename):
yield (0, "N307: nova.db import not allowed in nova/virt/*")
+def in_docstring_position(previous_logical):
+ return (previous_logical.startswith("def ") or
+ previous_logical.startswith("class "))
+
+
def nova_docstring_start_space(physical_line, previous_logical):
r"""Check for docstring not start with space.
@@ -311,11 +314,10 @@ def nova_docstring_start_space(physical_line, previous_logical):
# it's important that we determine this is actually a docstring,
# and not a doc block used somewhere after the first line of a
# function def
- if (previous_logical.startswith("def ") or
- previous_logical.startswith("class ")):
- pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE])
- if (pos != -1 and len(physical_line) > pos + 4):
- if (physical_line[pos + 3] == ' '):
+ if in_docstring_position(previous_logical):
+ pos = max([physical_line.find(i) for i in START_DOCSTRING_TRIPLE])
+ if pos != -1 and len(physical_line) > pos + 4:
+ if physical_line[pos + 3] == ' ':
return (pos, "N401: docstring should not start with"
" a space")
@@ -330,33 +332,63 @@ def nova_docstring_one_line(physical_line):
N402: '''This is not'''
N402: '''Bad punctuation,'''
"""
+ #TODO(jogo) make this apply to multi line docstrings as well
line = physical_line.lstrip()
if line.startswith('"') or line.startswith("'"):
- pos = max([line.find(i) for i in DOCSTRING_TRIPLE]) # start
- end = max([line[-4:-1] == i for i in DOCSTRING_TRIPLE]) # end
+ pos = max([line.find(i) for i in START_DOCSTRING_TRIPLE]) # start
+ end = max([line[-4:-1] == i for i in END_DOCSTRING_TRIPLE]) # end
- if (pos != -1 and end and len(line) > pos + 4):
- if (line[-5] not in ['.', '?', '!']):
+ if pos != -1 and end and len(line) > pos + 4:
+ if line[-5] not in ['.', '?', '!']:
return pos, "N402: one line docstring needs punctuation."
-def nova_docstring_multiline_end(physical_line):
+def nova_docstring_multiline_end(physical_line, previous_logical):
r"""Check multi line docstring end.
nova HACKING guide recommendation for docstring:
Docstring should end on a new line
- Okay: '''\nfoo\nbar\n'''
- # This test is not triggered, don't think it's right, removing
- # the colon prevents it from running
- N403 '''\nfoo\nbar\n ''' \n\n
+ Okay: '''foobar\nfoo\nbar\n'''
+ N403: def foo():\n'''foobar\nfoo\nbar\n d'''\n\n
"""
- # TODO(sdague) actually get these tests working
- pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
- if (pos != -1 and len(physical_line) == pos):
- if (physical_line[pos + 3] == ' '):
- return (pos, "N403: multi line docstring end on new line")
+ if in_docstring_position(previous_logical):
+ pos = max(physical_line.find(i) for i in END_DOCSTRING_TRIPLE)
+ if pos != -1 and len(physical_line) == pos + 4:
+ if physical_line.strip() not in START_DOCSTRING_TRIPLE:
+ return (pos, "N403: multi line docstring end on new line")
+
+
+def nova_docstring_multiline_start(physical_line, previous_logical, tokens):
+ r"""Check multi line docstring start with summary.
+
+ nova HACKING guide recommendation for docstring:
+ Docstring should start with A multi line docstring has a one-line summary
+
+ Okay: '''foobar\nfoo\nbar\n'''
+ N404: def foo():\n'''\nfoo\nbar\n''' \n\n
+ """
+ if in_docstring_position(previous_logical):
+ pos = max([physical_line.find(i) for i in START_DOCSTRING_TRIPLE])
+ # start of docstring when len(tokens)==0
+ if len(tokens) == 0 and pos != -1 and len(physical_line) == pos + 4:
+ if physical_line.strip() in START_DOCSTRING_TRIPLE:
+ return (pos, "N404: multi line docstring "
+ "should start with a summary")
+
+
+def nova_no_cr(physical_line):
+ r"""Check that we only use newlines not cariage returns.
+
+ Okay: import os\nimport sys
+ # pep8 doesn't yet replace \r in strings, will work on an
+ # upstream fix
+ N901 import os\r\nimport sys
+ """
+ pos = physical_line.find('\r')
+ if pos != -1 and pos == (len(physical_line) - 2):
+ return (pos, "N901: Windows style line endings not allowed in code")
FORMAT_RE = re.compile("%(?:"
diff --git a/tools/test-requires b/tools/test-requires
index 5f195d5c1..bc279166e 100644
--- a/tools/test-requires
+++ b/tools/test-requires
@@ -12,4 +12,4 @@ pylint==0.25.2
python-subunit
sphinx>=1.1.2
testrepository>=0.0.13
-testtools>=0.9.22
+testtools>=0.9.26
diff --git a/tools/xenserver/vm_vdi_cleaner.py b/tools/xenserver/vm_vdi_cleaner.py
index eeaf978b8..27b89d510 100755
--- a/tools/xenserver/vm_vdi_cleaner.py
+++ b/tools/xenserver/vm_vdi_cleaner.py
@@ -42,6 +42,7 @@ cleaner_opts = [
]
CONF = cfg.CONF
CONF.register_opts(cleaner_opts)
+CONF.import_opt('verbose', 'nova.openstack.common.log')
CONF.import_opt("resize_confirm_window", "nova.compute.manager")
diff --git a/tox.ini b/tox.ini
index e3322e044..e98f30151 100644
--- a/tox.ini
+++ b/tox.ini
@@ -18,9 +18,9 @@ downloadcache = ~/cache/pip
deps=pep8==1.3.3
commands =
python tools/hacking.py --doctest
- python tools/hacking.py --ignore=E12,E711,E721,E712 --repeat --show-source \
+ python tools/hacking.py --ignore=E12,E711,E721,E712,N403,N404 --show-source \
--exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg .
- python tools/hacking.py --ignore=E12,E711,E721,E712 --repeat --show-source \
+ python tools/hacking.py --ignore=E12,E711,E721,E712,N403,N404 --show-source \
--filename=nova* bin
[testenv:pylint]
@@ -38,7 +38,7 @@ commands = python tools/flakes.py nova
# tests conflict with coverage.
commands =
python setup.py testr --coverage \
- --testr-args='^(?!.*test_coverage_ext).*$'
+ --testr-args='^(?!.*test.*coverage).*$'
[testenv:venv]
commands = {posargs}