diff options
66 files changed, 2436 insertions, 963 deletions
diff --git a/HACKING.rst b/HACKING.rst index b29361a8d..c272bfc25 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -262,18 +262,18 @@ uses.) More information about testr can be found at: http://wiki.openstack.org/testr -openstack-common +oslo-incubator ---------------- -A number of modules from openstack-common are imported into the project. +A number of modules from oslo-incubator are imported into the project. -These modules are "incubating" in openstack-common and are kept in sync -with the help of openstack-common's update.py script. See: +These modules are "incubating" in oslo-incubator and are kept in sync +with the help of oslo's update.py script. See: - http://wiki.openstack.org/CommonLibrary#Incubation + https://wiki.openstack.org/wiki/Oslo#Incubation The copy of the code should never be directly modified here. Please -always update openstack-common first and then run the script to copy +always update oslo-incubator first and then run the script to copy the changes across. OpenStack Trademark diff --git a/etc/nova/nova.conf.sample b/etc/nova/nova.conf.sample index 29778a556..7524a7e5e 100644 --- a/etc/nova/nova.conf.sample +++ b/etc/nova/nova.conf.sample @@ -52,15 +52,6 @@ # -# Options defined in nova.manager -# - -# Some periodic tasks can be run in a separate process. Should -# we run them here? (boolean value) -#run_external_periodic_tasks=true - - -# # Options defined in nova.netconf # @@ -82,6 +73,13 @@ # # If set, send compute.instance.update notifications on +# instance state changes. Valid values are None for no +# notifications, "vm_state" for notifications on VM state +# changes, or "vm_and_task_state" for notifications on VM and +# task state changes. (string value) +#notify_on_state_change=<None> + +# If set, send compute.instance.update notifications on # instance state changes. Valid values are False for no # notifications, True for notifications on any instance # changes. (boolean value) @@ -91,13 +89,6 @@ # the API service. (boolean value) #notify_api_faults=false -# If set, send compute.instance.update notifications on -# instance state changes. Valid values are None for no -# notifications, "vm_state" for notifications on VM state -# changes, or "vm_and_task_state" for notifications on VM and -# task state changes. (string value) -#notify_on_state_change=<None> - # # Options defined in nova.paths @@ -143,6 +134,10 @@ # number of floating ips allowed per project (integer value) #quota_floating_ips=10 +# number of fixed ips allowed per project (this should be at +# least the number of instances allowed) (integer value) +#quota_fixed_ips=-1 + # number of metadata items allowed per instance (integer # value) #quota_metadata_items=128 @@ -276,9 +271,6 @@ # Length of generated instance admin passwords (integer value) #password_length=12 -# Whether to disable inter-process locks (boolean value) -#disable_process_locking=false - # time period to generate instance usages for. Time period # must be hour, day, month or year (string value) #instance_usage_audit_period=month @@ -541,19 +533,73 @@ # -# Options defined in nova.common.memorycache +# Options defined in nova.cmd.clear_rabbit_queues # -# Memcached servers or None for in process cache. (list value) -#memcached_servers=<None> +# Queues to delete (multi valued) +#queues= + +# delete nova exchange too. (boolean value) +#delete_exchange=false # -# Options defined in nova.compute +# Options defined in nova.cmd.novnc +# + +# Record sessions to FILE.[session_number] (boolean value) +#record=false + +# Become a daemon (background process) (boolean value) +#daemon=false + +# Disallow non-encrypted connections (boolean value) +#ssl_only=false + +# Source is ipv6 (boolean value) +#source_is_ipv6=false + +# SSL certificate file (string value) +#cert=self.pem + +# SSL key file (if separate from cert) (string value) +#key=<None> + +# Run webserver on same port. Serve files from DIR. (string +# value) +#web=/usr/share/spice-html5 + + +# +# Options defined in nova.cmd.novncproxy # -# The full class name of the compute API class to use (string +# Host on which to listen for incoming requests (string value) +#novncproxy_host=0.0.0.0 + +# Port on which to listen for incoming requests (integer # value) +#novncproxy_port=6080 + + +# +# Options defined in nova.cmd.spicehtml5proxy +# + +# Host on which to listen for incoming requests (string value) +#spicehtml5proxy_host=0.0.0.0 + +# Port on which to listen for incoming requests (integer +# value) +#spicehtml5proxy_port=6082 + + +# +# Options defined in nova.compute +# + +# The full class name of the compute API class to use +# (deprecated) (string value) #compute_api_class=nova.compute.api.API @@ -565,6 +611,10 @@ # when testing in single-host environments. (boolean value) #allow_resize_to_same_host=false +# Allow migrate machine to the same host. Useful when testing +# in single-host environments. (boolean value) +#allow_migrate_to_same_host=false + # availability zone to use when user doesn't specify one # (string value) #default_schedule_zone=<None> @@ -588,7 +638,7 @@ # -# Options defined in nova.compute.instance_types +# Options defined in nova.compute.flavors # # default instance type to use, testing only (string value) @@ -630,6 +680,10 @@ # interval to pull bandwidth usage info (integer value) #bandwidth_poll_interval=600 +# interval to sync power states between the database and the +# hypervisor (integer value) +#sync_power_state_interval=600 + # Number of seconds between instance info_cache self healing # updates (integer value) #heal_instance_info_cache_interval=60 @@ -1046,6 +1100,10 @@ # be on the bottom. (string value) #iptables_bottom_regex= +# The table that iptables to jump to when a packet is to be +# dropped. (string value) +#iptables_drop_action=DROP + # # Options defined in nova.network.manager @@ -1083,7 +1141,9 @@ # Number of addresses in each private subnet (integer value) #network_size=256 -# Fixed IP address block (string value) +# DEPRECATED - Fixed IP address block.If set to an empty +# string, the subnet range(s) will be automatically determined +# and configured. (string value) #fixed_range=10.0.0.0/8 # Fixed IPv6 address block (string value) @@ -1210,50 +1270,6 @@ # The full class name of the security API class (string value) #security_group_api=nova -# -# Options defined in bin.nova-clear-rabbit-queues -# - -# Queues to delete (multi valued) -#queues= - -# delete nova exchange too. (boolean value) -#delete_exchange=false - - -# -# Options defined in bin.nova-novncproxy -# - -# Record sessions to FILE.[session_number] (boolean value) -#record=false - -# Become a daemon (background process) (boolean value) -#daemon=false - -# Disallow non-encrypted connections (boolean value) -#ssl_only=false - -# Source is ipv6 (boolean value) -#source_is_ipv6=false - -# SSL certificate file (string value) -#cert=self.pem - -# SSL key file (if separate from cert) (string value) -#key=<None> - -# Run webserver on same port. Serve files from DIR. (string -# value) -#web=/usr/share/novnc - -# Host on which to listen for incoming requests (string value) -#novncproxy_host=0.0.0.0 - -# Port on which to listen for incoming requests (integer -# value) -#novncproxy_port=6080 - # # Options defined in nova.objectstore.s3server @@ -1287,7 +1303,7 @@ # The SQLAlchemy connection string used to connect to the # database (string value) -#sql_connection=sqlite:////nova/openstack/common/db/$sqlite_db +#sql_connection=sqlite:////common/db/$sqlite_db # the filename to use with sqlite (string value) #sqlite_db=nova.sqlite @@ -1363,13 +1379,9 @@ # Log output to standard error (boolean value) #use_stderr=true -# Default file mode used when creating log files (string -# value) -#logfile_mode=0644 - # format string to use for log messages with context (string # value) -#logging_context_format_string=%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user)s %(tenant)s] %(instance)s%(message)s +#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user)s %(tenant)s] %(instance)s%(message)s # format string to use for log messages without context # (string value) @@ -1408,20 +1420,22 @@ #log_config=<None> # A logging.Formatter log message format string which may use -# any of the available logging.LogRecord attributes. Default: -# %(default)s (string value) -#log_format=%(asctime)s %(levelname)8s [%(name)s] %(message)s +# any of the available logging.LogRecord attributes. This +# option is deprecated. Please use +# logging_context_format_string and +# logging_default_format_string instead. (string value) +#log_format=<None> # Format string for %%(asctime)s in log records. Default: # %(default)s (string value) #log_date_format=%Y-%m-%d %H:%M:%S -# (Optional) Name of log file to output to. If not set, -# logging will go to stdout. (string value) +# (Optional) Name of log file to output to. If no default is +# set, logging will go to stdout. (string value) #log_file=<None> -# (Optional) The directory to keep log files in (will be -# prepended to --log-file) (string value) +# (Optional) The base directory used for relative --log-file +# paths (string value) #log_dir=<None> # Use syslog for logging. (boolean value) @@ -1432,6 +1446,14 @@ # +# Options defined in nova.openstack.common.memorycache +# + +# Memcached servers or None for in process cache. (list value) +#memcached_servers=<None> + + +# # Options defined in nova.openstack.common.notifier.api # @@ -1457,6 +1479,15 @@ # +# Options defined in nova.openstack.common.periodic_task +# + +# Some periodic tasks can be run in a separate process. Should +# we run them here? (boolean value) +#run_external_periodic_tasks=true + + +# # Options defined in nova.openstack.common.rpc # @@ -1569,7 +1600,7 @@ # Qpid broker hostname (string value) #qpid_hostname=localhost -# Qpid broker port (string value) +# Qpid broker port (integer value) #qpid_port=5672 # Qpid HA cluster host:port pairs (list value) @@ -1624,7 +1655,7 @@ # Name of this node. Must be a valid hostname, FQDN, or IP # address. Must match "host" option, if running Nova. (string # value) -#rpc_zmq_host=sorcha +#rpc_zmq_host=nova # @@ -1634,6 +1665,12 @@ # Matchmaker ring file (JSON) (string value) #matchmaker_ringfile=/etc/nova/matchmaker_ring.json +# Heartbeat frequency (integer value) +#matchmaker_heartbeat_freq=300 + +# Heartbeat time-to-live. (integer value) +#matchmaker_heartbeat_ttl=600 + # # Options defined in nova.scheduler.driver @@ -1743,17 +1780,6 @@ # -# Options defined in nova.scheduler.multi -# - -# Driver to use for scheduling compute calls (string value) -#compute_scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler - -# Default driver to use for scheduling calls (string value) -#default_scheduler_driver=nova.scheduler.chance.ChanceScheduler - - -# # Options defined in nova.scheduler.rpcapi # @@ -1771,24 +1797,6 @@ # -# Options defined in nova.scheduler.weights.least_cost -# - -# Which cost functions the LeastCostScheduler should use (list -# value) -#least_cost_functions=<None> - -# How much weight to give the noop cost function (floating -# point value) -#noop_cost_fn_weight=1.0 - -# How much weight to give the fill-first cost function. A -# negative value will reverse behavior: e.g. spread-first -# (floating point value) -#compute_fill_first_cost_fn_weight=<None> - - -# # Options defined in nova.scheduler.weights.ram # @@ -1856,7 +1864,8 @@ # Driver to use for controlling virtualization. Options # include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, # fake.FakeDriver, baremetal.BareMetalDriver, -# vmwareapi.VMWareESXDriver (string value) +# vmwareapi.VMwareESXDriver, vmwareapi.VMwareVCDriver (string +# value) #compute_driver=<None> # The default format an ephemeral_volume will be formatted @@ -1886,52 +1895,6 @@ # -# Options defined in nova.virt.hyperv.vif -# - -# External virtual switch Name, if not provided, the first -# external virtual switch is used (string value) -#vswitch_name=<None> - - -# -# Options defined in nova.virt.hyperv.vmops -# - -# Required for live migration among hosts with different CPU -# features (boolean value) -#limit_cpu_features=false - -# Sets the admin password in the config drive image (boolean -# value) -#config_drive_inject_password=false - -# qemu-img is used to convert between different image types -# (string value) -#qemu_img_cmd=qemu-img.exe - -# Attaches the Config Drive image as a cdrom drive instead of -# a disk drive (boolean value) -#config_drive_cdrom=false - - -# -# Options defined in nova.virt.hyperv.volumeops -# - -# The number of times we retry on attaching volume (integer -# value) -#hyperv_attaching_volume_retry_count=10 - -# The seconds to wait between an volume attachment attempt -# (integer value) -#hyperv_wait_between_attach_retry=5 - -# Force volumeutils v1 (boolean value) -#force_volumeutils_v1=false - - -# # Options defined in nova.virt.images # @@ -2040,6 +2003,10 @@ # ["file=directsync","block=none"] (list value) #disk_cachemodes= +# Which pcpus can be used by vcpus of instance e.g: +# "4-12,^8,15" (string value) +#vcpu_pin_set=<None> + # # Options defined in nova.virt.libvirt.imagebackend @@ -2252,7 +2219,8 @@ # # Optional VIM Service WSDL Location e.g -# http://<server>/vimService.wsdl +# http://<server>/vimService.wsdl. Optional over-ride to +# default location for bug work-arounds (string value) #vmwareapi_wsdl_loc=<None> @@ -2278,11 +2246,18 @@ # (string value) #xenapi_agent_path=usr/sbin/xe-update-networking -# Disable XenAPI agent. Reduces the amount of time it takes -# nova to detect that a VM has started, when that VM does not -# have the agent installed (boolean value) +# Disables the use of the XenAPI agent in any image regardless +# of what image properties are present. (boolean value) #xenapi_disable_agent=false +# Determines if the xenapi agent should be used when the image +# used does not contain a hint to declare if the agent is +# present or not. The hint is a glance property +# "xenapi_use_agent" that has the value "true" or "false". +# Note that waiting for the agent when it is not present will +# significantly increase server boot times. (boolean value) +#xenapi_use_agent_default=false + # # Options defined in nova.virt.xenapi.driver @@ -2513,8 +2488,8 @@ # region name of this node (string value) #os_region_name=<None> -# Allow for a ca certificates file to be specified for cinder -# client requests (string value) +# Location of ca certicates file to use for cinder client +# requests. (string value) #cinder_ca_certificates_file=<None> # Number of cinderclient retries on failed http calls (integer @@ -2530,7 +2505,7 @@ #cinder_cross_az_attach=true -[HYPERV] +[hyperv] # # Options defined in nova.virt.hyperv.pathutils @@ -2544,6 +2519,71 @@ #instances_path_share= +# +# Options defined in nova.virt.hyperv.vif +# + +# External virtual switch Name, if not provided, the first +# external virtual switch is used (string value) +#vswitch_name=<None> + + +# +# Options defined in nova.virt.hyperv.vmops +# + +# Required for live migration among hosts with different CPU +# features (boolean value) +#limit_cpu_features=false + +# Sets the admin password in the config drive image (boolean +# value) +#config_drive_inject_password=false + +# qemu-img is used to convert between different image types +# (string value) +#qemu_img_cmd=qemu-img.exe + +# Attaches the Config Drive image as a cdrom drive instead of +# a disk drive (boolean value) +#config_drive_cdrom=false + + +# +# Options defined in nova.virt.hyperv.volumeops +# + +# The number of times to retry to attach a volume (integer +# value) +#volume_attach_retry_count=10 + +# Interval between volume attachment attempts, in seconds +# (integer value) +#volume_attach_retry_interval=5 + +# Force volumeutils v1 (boolean value) +#force_volumeutils_v1=false + + +[osapi_v3] + +# +# Options defined in nova.api.openstack +# + +# Whether the V3 API is enabled or not (boolean value) +#enabled=false + +# A list of v3 API extensions to never load. Specify the +# extension aliases here. (list value) +#extensions_blacklist= + +# If the list is not empty then a v3 API extension will only +# be loaded if it exists in this list. Specify the extension +# aliases here. (list value) +#extensions_whitelist= + + [conductor] # @@ -2613,6 +2653,13 @@ # value) #call_timeout=60 +# Percentage of cell capacity to hold in reserve. Affects both +# memory and disk utilization (floating point value) +#reserve_percent=10.0 + +# Type of cell: api or compute (string value) +#cell_type=<None> + # # Options defined in nova.cells.rpc_driver @@ -2628,6 +2675,16 @@ # Options defined in nova.cells.scheduler # +# Filter classes the cells scheduler should use. An entry of +# "nova.cells.filters.all_filters"maps to all cells filters +# included with nova. (list value) +#scheduler_filter_classes=nova.cells.filters.all_filters + +# Weigher classes the cells scheduler should use. An entry of +# "nova.cells.weights.all_weighers"maps to all cell weighers +# included with nova. (list value) +#scheduler_weight_classes=nova.cells.weights.all_weighers + # How many retries when no cells are available. (integer # value) #scheduler_retries=10 @@ -2646,6 +2703,33 @@ #db_check_interval=60 +# +# Options defined in nova.cells.weights.mute_child +# + +# Multiplier used to weigh mute children. (The value should +# be negative.) (floating point value) +#mute_weight_multiplier=-10.0 + +# Weight value assigned to mute children. (The value should +# be positive.) (floating point value) +#mute_weight_value=1000.0 + +# Number of seconds after which a lack of capability and +# capacity updates signals the child cell is to be treated as +# a mute. (integer value) +#mute_child_interval=300 + + +# +# Options defined in nova.cells.weights.ram_by_instance_type +# + +# Multiplier used for weighing ram. Negative numbers mean to +# stack vs spread. (floating point value) +#ram_weight_multiplier=10.0 + + [zookeeper] # @@ -2767,15 +2851,42 @@ # +# Options defined in nova.virt.baremetal.tilera_pdu +# + +# ip address of tilera pdu (string value) +#tile_pdu_ip=10.0.100.1 + +# management script for tilera pdu (string value) +#tile_pdu_mgr=/tftpboot/pdu_mgr + +# power status of tilera PDU is OFF (integer value) +#tile_pdu_off=2 + +# power status of tilera PDU is ON (integer value) +#tile_pdu_on=1 + +# power status of tilera PDU (integer value) +#tile_pdu_status=9 + +# wait time in seconds until check the result after tilera +# power operations (integer value) +#tile_power_wait=9 + + +# # Options defined in nova.virt.baremetal.virtual_power_driver # # ip or name to virtual power host (string value) #virtual_power_ssh_host= +# Port to use for ssh to virtual power host (integer value) +#virtual_power_ssh_port=22 + # base command to use for virtual power(vbox,virsh) (string # value) -#virtual_power_type=vbox +#virtual_power_type=virsh # user to execute virtual power commands as (string value) #virtual_power_host_user= @@ -2783,6 +2894,9 @@ # password for virtual power host_user (string value) #virtual_power_host_pass= +# ssh key for virtual power host_user (string value) +#virtual_power_host_key=<None> + # # Options defined in nova.virt.baremetal.volume_driver @@ -2808,6 +2922,22 @@ #topics=notifications +[matchmaker_redis] + +# +# Options defined in nova.openstack.common.rpc.matchmaker_redis +# + +# Host to locate redis (string value) +#host=127.0.0.1 + +# Use this port to connect to redis host. (integer value) +#port=6379 + +# Password for Redis server. (optional) (string value) +#password=<None> + + [trusted_computing] # @@ -2872,25 +3002,4 @@ #keymap=en-us -[osapi_v3] - -# -# Options defined in nova.api.openstack -# - -# Whether the V3 API is enabled or not -#enabled=False - -# If the list is not empty then a v3 API extension -# will only be loaded if it exists in this list. -# Specify the extension aliases here -#extensions_whitelist= - -# A list of v3 API extensions to never load. -# Specify the extension aliases here. -# Note that if an extension is in both the blacklist and -# and whitelist then it will not be loaded -#extensions_blacklist= - - -# Total option count: 584 +# Total option count: 609 diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index da0a52caa..8f4e20798 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -1058,8 +1058,9 @@ class CloudController(object): """Format InstanceBlockDeviceMappingResponseItemType.""" root_device_type = 'instance-store' mapping = [] - for bdm in db.block_device_mapping_get_all_by_instance(context, - instance_uuid): + for bdm in block_device.legacy_mapping( + db.block_device_mapping_get_all_by_instance(context, + instance_uuid)): volume_id = bdm['volume_id'] if (volume_id is None or bdm['no_device']): continue diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index 3687ce111..3455b812d 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -28,6 +28,7 @@ import webob.exc from nova.api.openstack import extensions from nova.api.openstack import wsgi +from nova import exception from nova import notifications from nova.openstack.common import log as logging from nova import utils @@ -55,6 +56,11 @@ CONF = cfg.CONF CONF.register_group(api_opts_group) CONF.register_opts(api_opts, api_opts_group) +# List of v3 API extensions which are considered to form +# the core API and so must be present +# TODO(cyeoh): Expand this list as the core APIs are ported to V3 +API_V3_CORE_EXTENSIONS = set(['servers']) + class FaultWrapper(base_wsgi.Middleware): """Calls down the middleware stack, making exceptions into faults.""" @@ -306,8 +312,22 @@ class APIRouterV3(base_wsgi.Router): mapper=mapper) self.api_extension_manager.map(self._register_controllers) + missing_core_extensions = self.get_missing_core_extensions( + self.loaded_extension_info.get_extensions().keys()) + if missing_core_extensions: + LOG.critical(_("Missing core API extensions: %s"), + missing_core_extensions) + raise exception.CoreAPIMissing( + missing_apis=missing_core_extensions) + super(APIRouterV3, self).__init__(mapper) + @staticmethod + def get_missing_core_extensions(extensions_loaded): + extensions_loaded = set(extensions_loaded) + missing_extensions = API_V3_CORE_EXTENSIONS - extensions_loaded + return missing_extensions + @property def loaded_extension_info(self): raise NotImplementedError() diff --git a/nova/api/openstack/compute/contrib/keypairs.py b/nova/api/openstack/compute/contrib/keypairs.py index a79b39aae..4245355e5 100644 --- a/nova/api/openstack/compute/contrib/keypairs.py +++ b/nova/api/openstack/compute/contrib/keypairs.py @@ -94,12 +94,10 @@ class KeypairController(object): raise webob.exc.HTTPRequestEntityTooLarge( explanation=msg, headers={'Retry-After': 0}) - except exception.InvalidKeypair: - msg = _("Keypair data is invalid") - raise webob.exc.HTTPBadRequest(explanation=msg) - except exception.KeyPairExists: - msg = _("Key pair '%s' already exists.") % name - raise webob.exc.HTTPConflict(explanation=msg) + except exception.InvalidKeypair as exc: + raise webob.exc.HTTPBadRequest(explanation=exc.format_message()) + except exception.KeyPairExists as exc: + raise webob.exc.HTTPConflict(explanation=exc.format_message()) def delete(self, req, id): """ diff --git a/nova/api/openstack/compute/plugins/v3/keypairs.py b/nova/api/openstack/compute/plugins/v3/keypairs.py index 4051a3497..bf740641e 100644 --- a/nova/api/openstack/compute/plugins/v3/keypairs.py +++ b/nova/api/openstack/compute/plugins/v3/keypairs.py @@ -95,12 +95,10 @@ class KeypairController(object): raise webob.exc.HTTPRequestEntityTooLarge( explanation=msg, headers={'Retry-After': 0}) - except exception.InvalidKeypair: - msg = _("Keypair data is invalid") - raise webob.exc.HTTPBadRequest(explanation=msg) - except exception.KeyPairExists: - msg = _("Key pair '%s' already exists.") % name - raise webob.exc.HTTPConflict(explanation=msg) + except exception.InvalidKeypair as exc: + raise webob.exc.HTTPBadRequest(explanation=exc.format_message()) + except exception.KeyPairExists as exc: + raise webob.exc.HTTPConflict(explanation=exc.format_message()) def delete(self, req, id): """ diff --git a/nova/api/openstack/compute/plugins/v3/servers.py b/nova/api/openstack/compute/plugins/v3/servers.py index a0bc52e3e..e4dc5db92 100644 --- a/nova/api/openstack/compute/plugins/v3/servers.py +++ b/nova/api/openstack/compute/plugins/v3/servers.py @@ -41,14 +41,9 @@ from nova.openstack.common import uuidutils from nova import utils -server_opts = [ - cfg.BoolOpt('enable_instance_password', - default=True, - help='Allows use of instance password during ' - 'server creation'), -] CONF = cfg.CONF -CONF.register_opts(server_opts) +CONF.import_opt('enable_instance_password', + 'nova.api.openstack.compute.servers') CONF.import_opt('network_api_class', 'nova.network') CONF.import_opt('reclaim_instance_interval', 'nova.compute.manager') diff --git a/nova/block_device.py b/nova/block_device.py index b7a9881b1..746fd6bb4 100644 --- a/nova/block_device.py +++ b/nova/block_device.py @@ -17,9 +17,14 @@ import re +from oslo.config import cfg + +from nova import exception from nova.openstack.common import log as logging from nova.virt import driver +CONF = cfg.CONF +CONF.import_opt('default_ephemeral_format', 'nova.virt.driver') LOG = logging.getLogger(__name__) DEFAULT_ROOT_DEV_NAME = '/dev/sda1' @@ -29,6 +34,176 @@ _DEFAULT_MAPPINGS = {'ami': 'sda1', 'swap': 'sda3'} +bdm_legacy_fields = set(['device_name', 'delete_on_termination', + 'virtual_name', 'snapshot_id', + 'volume_id', 'volume_size', 'no_device', + 'connection_info']) + + +bdm_new_fields = set(['source_type', 'destination_type', + 'guest_format', 'device_type', 'disk_bus', 'boot_index', + 'device_name', 'delete_on_termination', 'snapshot_id', + 'volume_id', 'volume_size', 'image_id', 'no_device', + 'connection_info']) + + +bdm_db_only_fields = set(['id', 'instance_uuid']) + + +bdm_db_inherited_fields = set(['created_at', 'updated_at', + 'deleted_at', 'deleted']) + + +class BlockDeviceDict(dict): + """Represents a Block Device Mapping in Nova.""" + + _fields = bdm_new_fields + _db_only_fields = (bdm_db_only_fields | + bdm_db_inherited_fields) + + def __init__(self, bdm_dict=None, do_not_default=None): + super(BlockDeviceDict, self).__init__() + + bdm_dict = bdm_dict or {} + do_not_default = do_not_default or set() + + self._validate(bdm_dict) + # NOTE (ndipanov): Never default db fields + self.update( + dict((field, None) + for field in self._fields - do_not_default)) + self.update(bdm_dict) + + def _validate(self, bdm_dict): + """Basic data format validations.""" + if (not set(key for key, _ in bdm_dict.iteritems()) <= + (self._fields | self._db_only_fields)): + raise exception.InvalidBDMFormat() + # TODO(ndipanov): Validate must-have fields! + + @classmethod + def from_legacy(cls, legacy_bdm): + + copy_over_fields = bdm_legacy_fields & bdm_new_fields + copy_over_fields |= (bdm_db_only_fields | + bdm_db_inherited_fields) + # NOTE (ndipanov): These fields cannot be computed + # from legacy bdm, so do not default them + # to avoid overwriting meaningful values in the db + non_computable_fields = set(['boot_index', 'disk_bus', + 'guest_format', 'device_type']) + + new_bdm = dict((fld, val) for fld, val in legacy_bdm.iteritems() + if fld in copy_over_fields) + + virt_name = legacy_bdm.get('virtual_name') + volume_size = legacy_bdm.get('volume_size') + + if is_swap_or_ephemeral(virt_name): + new_bdm['source_type'] = 'blank' + new_bdm['delete_on_termination'] = True + new_bdm['destination_type'] = 'local' + + if virt_name == 'swap': + new_bdm['guest_format'] = 'swap' + else: + new_bdm['guest_format'] = CONF.default_ephemeral_format + + elif legacy_bdm.get('snapshot_id'): + new_bdm['source_type'] = 'snapshot' + new_bdm['destination_type'] = 'volume' + + elif legacy_bdm.get('volume_id'): + new_bdm['source_type'] = 'volume' + new_bdm['destination_type'] = 'volume' + + elif legacy_bdm.get('no_device'): + # NOTE (ndipanov): Just keep the BDM for now, + pass + + else: + raise exception.InvalidBDMFormat() + + return cls(new_bdm, non_computable_fields) + + def legacy(self): + copy_over_fields = bdm_legacy_fields - set(['virtual_name']) + copy_over_fields |= (bdm_db_only_fields | + bdm_db_inherited_fields) + + legacy_block_device = dict((field, self.get(field)) + for field in copy_over_fields if field in self) + + source_type = self.get('source_type') + no_device = self.get('no_device') + if source_type == 'blank': + if self['guest_format'] == 'swap': + legacy_block_device['virtual_name'] = 'swap' + else: + # NOTE (ndipanov): Always label as 0, it is up to + # the calling routine to re-enumerate them + legacy_block_device['virtual_name'] = 'ephemeral0' + elif source_type in ('volume', 'snapshot') or no_device: + legacy_block_device['virtual_name'] = None + elif source_type == 'image': + # NOTE(ndipanov): Image bdms have no meaning in + # the legacy format - raise + raise exception.InvalidBDMForLegacy() + + return legacy_block_device + + +def is_safe_for_update(block_device_dict): + """Determine if passed dict is a safe subset for update. + + Safe subset in this case means a safe subset of both legacy + and new versions of data, that can be passed to an UPDATE query + without any transformation. + """ + fields = set(block_device_dict.keys()) + return fields <= (bdm_new_fields | + bdm_db_inherited_fields | + bdm_db_only_fields) + + +def create_image_bdm(image_ref, boot_index=0): + """Create a block device dict based on the image_ref. + + This is useful in the API layer to keep the compatibility + with having an image_ref as a field in the instance requests + """ + return BlockDeviceDict( + {'source_type': 'image', + 'image_id': image_ref, + 'delete_on_termination': True, + 'boot_index': boot_index, + 'device_type': 'disk', + 'destination_type': 'local'}) + + +def legacy_mapping(block_device_mapping): + """Transform a list of block devices of an instance back to the + legacy data format.""" + + legacy_block_device_mapping = [] + + for bdm in block_device_mapping: + try: + legacy_block_device = BlockDeviceDict(bdm).legacy() + except exception.InvalidBDMForLegacy: + continue + + legacy_block_device_mapping.append(legacy_block_device) + + # Re-enumerate the ephemeral devices + for i, dev in enumerate(dev for dev in legacy_block_device_mapping + if dev['virtual_name'] and + is_ephemeral(dev['virtual_name'])): + dev['virtual_name'] = dev['virtual_name'][:-1] + str(i) + + return legacy_block_device_mapping + + def properties_root_device_name(properties): """get root device name from image meta data. If it isn't specified, return None. @@ -61,7 +236,8 @@ def ephemeral_num(ephemeral_name): def is_swap_or_ephemeral(device_name): - return device_name == 'swap' or is_ephemeral(device_name) + return (device_name and + (device_name == 'swap' or is_ephemeral(device_name))) def mappings_prepend_dev(mappings): diff --git a/nova/cmd/novnc.py b/nova/cmd/novnc.py new file mode 100644 index 000000000..c381984da --- /dev/null +++ b/nova/cmd/novnc.py @@ -0,0 +1,44 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg + +opts = [ + cfg.BoolOpt('record', + default=False, + help='Record sessions to FILE.[session_number]'), + cfg.BoolOpt('daemon', + default=False, + help='Become a daemon (background process)'), + cfg.BoolOpt('ssl_only', + default=False, + help='Disallow non-encrypted connections'), + cfg.BoolOpt('source_is_ipv6', + default=False, + help='Source is ipv6'), + cfg.StrOpt('cert', + default='self.pem', + help='SSL certificate file'), + cfg.StrOpt('key', + default=None, + help='SSL key file (if separate from cert)'), + cfg.StrOpt('web', + default='/usr/share/spice-html5', + help='Run webserver on same port. Serve files from DIR.'), + ] + +cfg.CONF.register_cli_opts(opts) diff --git a/nova/cmd/novncproxy.py b/nova/cmd/novncproxy.py index 449aea76e..2abba7c90 100644 --- a/nova/cmd/novncproxy.py +++ b/nova/cmd/novncproxy.py @@ -30,27 +30,6 @@ from nova.console import websocketproxy opts = [ - cfg.BoolOpt('record', - default=False, - help='Record sessions to FILE.[session_number]'), - cfg.BoolOpt('daemon', - default=False, - help='Become a daemon (background process)'), - cfg.BoolOpt('ssl_only', - default=False, - help='Disallow non-encrypted connections'), - cfg.BoolOpt('source_is_ipv6', - default=False, - help='Source is ipv6'), - cfg.StrOpt('cert', - default='self.pem', - help='SSL certificate file'), - cfg.StrOpt('key', - default=None, - help='SSL key file (if separate from cert)'), - cfg.StrOpt('web', - default='/usr/share/novnc', - help='Run webserver on same port. Serve files from DIR.'), cfg.StrOpt('novncproxy_host', default='0.0.0.0', help='Host on which to listen for incoming requests'), @@ -61,11 +40,18 @@ opts = [ CONF = cfg.CONF CONF.register_cli_opts(opts) -CONF.import_opt('debug', 'nova.openstack.common.log') +CONF.import_opt('record', 'nova.cmd.novnc') +CONF.import_opt('daemon', 'nova.cmd.novnc') +CONF.import_opt('ssl_only', 'nova.cmd.novnc') +CONF.import_opt('source_is_ipv6', 'nova.cmd.novnc') +CONF.import_opt('cert', 'nova.cmd.novnc') +CONF.import_opt('key', 'nova.cmd.novnc') +CONF.import_opt('web', 'nova.cmd.novnc') def main(): # Setup flags + CONF.set_defaults(CONF, web='/usr/share/novnc') config.parse_args(sys.argv) if CONF.ssl_only and not os.path.exists(CONF.cert): diff --git a/nova/cmd/spicehtml5proxy.py b/nova/cmd/spicehtml5proxy.py index c6f2be53d..561c6e7aa 100644 --- a/nova/cmd/spicehtml5proxy.py +++ b/nova/cmd/spicehtml5proxy.py @@ -28,29 +28,7 @@ from oslo.config import cfg from nova import config from nova.console import websocketproxy - opts = [ - cfg.BoolOpt('record', - default=False, - help='Record sessions to FILE.[session_number]'), - cfg.BoolOpt('daemon', - default=False, - help='Become a daemon (background process)'), - cfg.BoolOpt('ssl_only', - default=False, - help='Disallow non-encrypted connections'), - cfg.BoolOpt('source_is_ipv6', - default=False, - help='Source is ipv6'), - cfg.StrOpt('cert', - default='self.pem', - help='SSL certificate file'), - cfg.StrOpt('key', - default=None, - help='SSL key file (if separate from cert)'), - cfg.StrOpt('web', - default='/usr/share/spice-html5', - help='Run webserver on same port. Serve files from DIR.'), cfg.StrOpt('spicehtml5proxy_host', default='0.0.0.0', help='Host on which to listen for incoming requests'), @@ -59,13 +37,19 @@ opts = [ help='Port on which to listen for incoming requests'), ] +CONF = cfg.CONF +CONF.register_cli_opts(opts) +CONF.import_opt('record', 'nova.cmd.novnc') +CONF.import_opt('daemon', 'nova.cmd.novnc') +CONF.import_opt('ssl_only', 'nova.cmd.novnc') +CONF.import_opt('source_is_ipv6', 'nova.cmd.novnc') +CONF.import_opt('cert', 'nova.cmd.novnc') +CONF.import_opt('key', 'nova.cmd.novnc') +CONF.import_opt('web', 'nova.cmd.novnc') + def main(): # Setup flags - - CONF = cfg.CONF - CONF.register_cli_opts(opts) - CONF.import_opt('debug', 'nova.openstack.common.log') config.parse_args(sys.argv) if CONF.ssl_only and not os.path.exists(CONF.cert): diff --git a/nova/compute/api.py b/nova/compute/api.py index f676c9797..65142aef0 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -539,7 +539,9 @@ class API(base.Base): files_to_inject): self._check_metadata_properties_quota(context, metadata) self._check_injected_file_quota(context, files_to_inject) - self._check_requested_image(context, image_id, image, instance_type) + if image_id is not None: + self._check_requested_image(context, image_id, + image, instance_type) def _validate_and_build_base_options(self, context, instance_type, image, image_href, image_id, @@ -686,6 +688,26 @@ class API(base.Base): QUOTAS.commit(context, quota_reservations) return instances + def _get_volume(self, context, block_device_mapping): + """If we are booting from a volume, we need to get the + volume details from Cinder and make sure we pass the + metadata back accordingly. + """ + if not block_device_mapping: + return {} + + for bdm in block_device_mapping: + if bdm.get('device_name') == "vda": + volume_id = bdm.get('volume_id') + if volume_id is not None: + try: + volume = self.volume_api.get(context, + volume_id) + return volume + except Exception: + raise exception.InvalidBDMVolume(volume_id) + return None + def _create_instance(self, context, instance_type, image_href, kernel_id, ramdisk_id, min_count, max_count, @@ -710,7 +732,13 @@ class API(base.Base): block_device_mapping = block_device_mapping or [] if not instance_type: instance_type = flavors.get_default_instance_type() - image_id, image = self._get_image(context, image_href) + + if image_href: + image_id, image = self._get_image(context, image_href) + else: + image_id = None + image = self._get_volume(context, + block_device_mapping) handle_az = self._handle_availability_zone availability_zone, forced_host, forced_node = handle_az( @@ -832,8 +860,9 @@ class API(base.Base): values) def _validate_bdm(self, context, instance): - for bdm in self.db.block_device_mapping_get_all_by_instance( - context, instance['uuid']): + for bdm in block_device.legacy_mapping( + self.db.block_device_mapping_get_all_by_instance( + context, instance['uuid'])): # NOTE(vish): For now, just make sure the volumes are accessible. # Additionally, check that the volume can be attached to this # instance. @@ -869,6 +898,16 @@ class API(base.Base): continue self._update_block_device_mapping(context, instance_type, instance_uuid, mapping) + # NOTE(ndipanov): Create an image bdm - at the moment + # this is not used but is done for easier transition + # in the future. + if (instance['image_ref'] and not + self.is_volume_backed_instance(context, instance, None)): + image_bdm = block_device.create_image_bdm(instance['image_ref']) + image_bdm['instance_uuid'] = instance_uuid + self.db.block_device_mapping_update_or_create(context, + image_bdm, + legacy=False) def _populate_instance_shutdown_terminate(self, instance, image, block_device_mapping): @@ -1077,8 +1116,9 @@ class API(base.Base): return host = instance['host'] - bdms = self.db.block_device_mapping_get_all_by_instance( - context, instance['uuid']) + bdms = block_device.legacy_mapping( + self.db.block_device_mapping_get_all_by_instance( + context, instance['uuid'])) reservations = None if context.is_admin and context.project_id != instance['project_id']: @@ -1653,7 +1693,8 @@ class API(base.Base): properties['root_device_name'] = instance['root_device_name'] properties.update(extra_properties or {}) - bdms = self.get_instance_bdms(context, instance) + bdms = block_device.legacy_mapping( + self.get_instance_bdms(context, instance)) mapping = [] for bdm in bdms: @@ -1726,8 +1767,9 @@ class API(base.Base): return min_ram, min_disk def _get_block_device_info(self, context, instance_uuid): - bdms = self.db.block_device_mapping_get_all_by_instance(context, - instance_uuid) + bdms = block_device.legacy_mapping( + self.db.block_device_mapping_get_all_by_instance(context, + instance_uuid)) block_device_mapping = [] for bdm in bdms: if not bdm['volume_id']: @@ -1844,8 +1886,10 @@ class API(base.Base): # system metadata... and copy in the properties for the new image. orig_sys_metadata = _reset_image_metadata() - bdms = self.db.block_device_mapping_get_all_by_instance(context, - instance['uuid']) + bdms = block_device.legacy_mapping( + self.db.block_device_mapping_get_all_by_instance( + context, + instance['uuid'])) self._record_action_start(context, instance, instance_actions.REBUILD) @@ -2180,7 +2224,8 @@ class API(base.Base): def rescue(self, context, instance, rescue_password=None): """Rescue the given instance.""" - bdms = self.get_instance_bdms(context, instance) + bdms = block_device.legacy_mapping( + self.get_instance_bdms(context, instance)) for bdm in bdms: if bdm['volume_id']: volume = self.volume_api.get(context, bdm['volume_id']) @@ -2474,7 +2519,8 @@ class API(base.Base): return True if bdms is None: - bdms = self.get_instance_bdms(context, instance) + bdms = block_device.legacy_mapping( + self.get_instance_bdms(context, instance)) for bdm in bdms: if (block_device.strip_dev(bdm['device_name']) == @@ -2752,24 +2798,18 @@ class AggregateAPI(base.Base): class KeypairAPI(base.Base): - """Sub-set of the Compute Manager API for managing key pairs.""" - def __init__(self, **kwargs): - super(KeypairAPI, self).__init__(**kwargs) + """Subset of the Compute Manager API for managing key pairs.""" - def _validate_keypair_name(self, context, user_id, key_name): - safechars = "_- " + string.digits + string.ascii_letters - clean_value = "".join(x for x in key_name if x in safechars) + def _validate_new_key_pair(self, context, user_id, key_name): + safe_chars = "_- " + string.digits + string.ascii_letters + clean_value = "".join(x for x in key_name if x in safe_chars) if clean_value != key_name: - msg = _("Keypair name contains unsafe characters") - raise exception.InvalidKeypair(explanation=msg) + raise exception.InvalidKeypair( + _("Keypair name contains unsafe characters")) if not 0 < len(key_name) < 256: - msg = _('Keypair name must be between 1 and 255 characters long') - raise exception.InvalidKeypair(explanation=msg) - - def import_key_pair(self, context, user_id, key_name, public_key): - """Import a key pair using an existing public key.""" - self._validate_keypair_name(context, user_id, key_name) + raise exception.InvalidKeypair( + _('Keypair name must be between 1 and 255 characters long')) count = QUOTAS.count(context, 'key_pairs', user_id) try: @@ -2777,11 +2817,11 @@ class KeypairAPI(base.Base): except exception.OverQuota: raise exception.KeypairLimitExceeded() - try: - fingerprint = crypto.generate_fingerprint(public_key) - except exception.InvalidKeypair: - msg = _("Keypair data is invalid") - raise exception.InvalidKeypair(explanation=msg) + def import_key_pair(self, context, user_id, key_name, public_key): + """Import a key pair using an existing public key.""" + self._validate_new_key_pair(context, user_id, key_name) + + fingerprint = crypto.generate_fingerprint(public_key) keypair = {'user_id': user_id, 'name': key_name, @@ -2793,13 +2833,7 @@ class KeypairAPI(base.Base): def create_key_pair(self, context, user_id, key_name): """Create a new key pair.""" - self._validate_keypair_name(context, user_id, key_name) - - count = QUOTAS.count(context, 'key_pairs', user_id) - try: - QUOTAS.limit_check(context, key_pairs=count + 1) - except exception.OverQuota: - raise exception.KeypairLimitExceeded() + self._validate_new_key_pair(context, user_id, key_name) private_key, public_key, fingerprint = crypto.generate_key_pair() @@ -2808,6 +2842,7 @@ class KeypairAPI(base.Base): 'fingerprint': fingerprint, 'public_key': public_key, 'private_key': private_key} + self.db.key_pair_create(context, keypair) return keypair @@ -2815,24 +2850,20 @@ class KeypairAPI(base.Base): """Delete a keypair by name.""" self.db.key_pair_destroy(context, user_id, key_name) + def _get_key_pair(self, key_pair): + return {'name': key_pair['name'], + 'public_key': key_pair['public_key'], + 'fingerprint': key_pair['fingerprint']} + def get_key_pairs(self, context, user_id): """List key pairs.""" key_pairs = self.db.key_pair_get_all_by_user(context, user_id) - rval = [] - for key_pair in key_pairs: - rval.append({ - 'name': key_pair['name'], - 'public_key': key_pair['public_key'], - 'fingerprint': key_pair['fingerprint'], - }) - return rval + return [self._get_key_pair(k) for k in key_pairs] def get_key_pair(self, context, user_id, key_name): """Get a keypair by name.""" key_pair = self.db.key_pair_get(context, user_id, key_name) - return {'name': key_pair['name'], - 'public_key': key_pair['public_key'], - 'fingerprint': key_pair['fingerprint']} + return self._get_key_pair(key_pair) class SecurityGroupAPI(base.Base, security_group_base.SecurityGroupBase): diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 8a2ddf877..42f8029a5 100755 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -959,6 +959,9 @@ class ComputeManager(manager.SchedulerDependentManager): except exception.InstanceNotFound: # the instance got deleted during the spawn with excutils.save_and_reraise_exception(): + # Make sure the async call finishes + if network_info is not None: + network_info.wait(do_raise=False) try: self._deallocate_network(context, instance) except Exception: @@ -966,6 +969,10 @@ class ComputeManager(manager.SchedulerDependentManager): 'for deleted instance') LOG.exception(msg, instance=instance) except exception.UnexpectedTaskStateError as e: + exc_info = sys.exc_info() + # Make sure the async call finishes + if network_info is not None: + network_info.wait(do_raise=False) actual_task_state = e.kwargs.get('actual', None) if actual_task_state == 'deleting': msg = _('Instance was deleted during spawn.') @@ -973,10 +980,13 @@ class ComputeManager(manager.SchedulerDependentManager): raise exception.BuildAbortException( instance_uuid=instance['uuid'], reason=msg) else: - raise + raise exc_info[0], exc_info[1], exc_info[2] except Exception: exc_info = sys.exc_info() # try to re-schedule instance: + # Make sure the async call finishes + if network_info is not None: + network_info.wait(do_raise=False) rescheduled = self._reschedule_or_error(context, instance, exc_info, requested_networks, admin_password, injected_files_orig, is_first_time, request_spec, @@ -1111,29 +1121,37 @@ class ComputeManager(manager.SchedulerDependentManager): def _allocate_network(self, context, instance, requested_networks, macs, security_groups): - """Allocate networks for an instance and return the network info.""" + """Start network allocation asynchronously. Return an instance + of NetworkInfoAsyncWrapper that can be used to retrieve the + allocated networks when the operation has finished. + """ + # NOTE(comstud): Since we're allocating networks asynchronously, + # this task state has little meaning, as we won't be in this + # state for very long. instance = self._instance_update(context, instance['uuid'], vm_state=vm_states.BUILDING, task_state=task_states.NETWORKING, expected_task_state=None) is_vpn = pipelib.is_vpn_image(instance['image_ref']) - try: - # allocate and get network info - network_info = self.network_api.allocate_for_instance( - context, instance, vpn=is_vpn, - requested_networks=requested_networks, - macs=macs, - conductor_api=self.conductor_api, - security_groups=security_groups) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.exception(_('Instance failed network setup'), - instance=instance) - - LOG.debug(_('Instance network_info: |%s|'), network_info, - instance=instance) - return network_info + def async_alloc(): + LOG.debug(_("Allocating IP information in the background."), + instance=instance) + try: + nwinfo = self.network_api.allocate_for_instance( + context, instance, vpn=is_vpn, + requested_networks=requested_networks, + macs=macs, + conductor_api=self.conductor_api, + security_groups=security_groups) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(_('Instance failed network setup'), + instance=instance) + LOG.debug(_('Instance network_info: |%s|'), nwinfo, + instance=instance) + return nwinfo + return network_model.NetworkInfoAsyncWrapper(async_alloc) def _prep_block_device(self, context, instance, bdms): """Set up the block device for an instance with error logging.""" @@ -1279,6 +1297,16 @@ class ComputeManager(manager.SchedulerDependentManager): admin_password, is_first_time, node, instance) do_run_instance() + def _try_deallocate_network(self, context, instance): + try: + # tear down allocated network structure + self._deallocate_network(context, instance) + except Exception: + with excutils.save_and_reraise_exception(): + LOG.error(_('Failed to deallocate network for instance.'), + instance=instance) + self._set_instance_error_state(context, instance['uuid']) + def _shutdown_instance(self, context, instance, bdms): """Shutdown an instance on this host.""" context = context.elevated() @@ -1293,21 +1321,28 @@ class ComputeManager(manager.SchedulerDependentManager): except exception.NetworkNotFound: network_info = network_model.NetworkInfo() - try: - # tear down allocated network structure - self._deallocate_network(context, instance) - except Exception: - with excutils.save_and_reraise_exception(): - LOG.error(_('Failed to deallocate network for instance.'), - instance=instance) - self._set_instance_error_state(context, instance['uuid']) - # NOTE(vish) get bdms before destroying the instance vol_bdms = self._get_volume_bdms(bdms) block_device_info = self._get_instance_volume_block_device_info( context, instance, bdms=bdms) - self.driver.destroy(instance, self._legacy_nw_info(network_info), - block_device_info) + + # NOTE(melwitt): attempt driver destroy before releasing ip, may + # want to keep ip allocated for certain failures + try: + self.driver.destroy(instance, self._legacy_nw_info(network_info), + block_device_info) + except exception.InstancePowerOffFailure: + # if the instance can't power off, don't release the ip + with excutils.save_and_reraise_exception(): + pass + except Exception: + with excutils.save_and_reraise_exception(): + # deallocate ip and fail without proceeding to + # volume api calls, preserving current behavior + self._try_deallocate_network(context, instance) + + self._try_deallocate_network(context, instance) + for bdm in vol_bdms: try: # NOTE(vish): actual driver detach done in driver.destroy, so @@ -1341,6 +1376,7 @@ class ComputeManager(manager.SchedulerDependentManager): as necessary. """ instance_uuid = instance['uuid'] + image = instance['image_ref'] if context.is_admin and context.project_id != instance['project_id']: project_id = instance['project_id'] @@ -1394,6 +1430,15 @@ class ComputeManager(manager.SchedulerDependentManager): self._quota_commit(context, reservations, project_id=project_id) # ensure block device mappings are not leaked self.conductor_api.block_device_mapping_destroy(context, bdms) + # NOTE(ndipanov): Delete the dummy image BDM as well. This will not + # be needed once the manager code is using the image + if image: + # Do not convert to legacy here - we want them all + leftover_bdm = \ + self.conductor_api.block_device_mapping_get_all_by_instance( + context, instance) + self.conductor_api.block_device_mapping_destroy(context, + leftover_bdm) self._notify_about_instance_usage(context, instance, "delete.end", system_metadata=system_meta) diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py index a8d7eaa47..2305810a9 100644 --- a/nova/compute/rpcapi.py +++ b/nova/compute/rpcapi.py @@ -422,9 +422,10 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy): filter_properties=None, node=None): instance_p = jsonutils.to_primitive(instance) instance_type_p = jsonutils.to_primitive(instance_type) + image_p = jsonutils.to_primitive(image) self.cast(ctxt, self.make_msg('prep_resize', instance=instance_p, instance_type=instance_type_p, - image=image, reservations=reservations, + image=image_p, reservations=reservations, request_spec=request_spec, filter_properties=filter_properties, node=node), diff --git a/nova/conductor/api.py b/nova/conductor/api.py index 33415233e..74b8ce700 100644 --- a/nova/conductor/api.py +++ b/nova/conductor/api.py @@ -196,9 +196,10 @@ class LocalAPI(object): return self._manager.block_device_mapping_update_or_create(context, values) - def block_device_mapping_get_all_by_instance(self, context, instance): + def block_device_mapping_get_all_by_instance(self, context, instance, + legacy=True): return self._manager.block_device_mapping_get_all_by_instance( - context, instance) + context, instance, legacy) def block_device_mapping_destroy(self, context, bdms): return self._manager.block_device_mapping_destroy(context, bdms=bdms) @@ -361,11 +362,11 @@ class LocalComputeTaskAPI(object): block_device_mapping=block_device_mapping) -class API(object): +class API(LocalAPI): """Conductor API that does updates via RPC to the ConductorManager.""" def __init__(self): - self.conductor_rpcapi = rpcapi.ConductorAPI() + self._manager = rpcapi.ConductorAPI() self.base_rpcapi = baserpc.BaseAPI(topic=CONF.conductor.topic) def wait_until_ready(self, context, early_timeout=10, early_attempts=10): @@ -400,296 +401,8 @@ class API(object): def instance_update(self, context, instance_uuid, **updates): """Perform an instance update in the database.""" - return self.conductor_rpcapi.instance_update(context, instance_uuid, - updates, 'conductor') - - def instance_destroy(self, context, instance): - return self.conductor_rpcapi.instance_destroy(context, instance) - - def instance_get(self, context, instance_id): - return self.conductor_rpcapi.instance_get(context, instance_id) - - def instance_get_by_uuid(self, context, instance_uuid, - columns_to_join=None): - return self.conductor_rpcapi.instance_get_by_uuid(context, - instance_uuid, - columns_to_join) - - def instance_get_all_by_host(self, context, host, columns_to_join=None): - return self.conductor_rpcapi.instance_get_all_by_host( - context, host, columns_to_join=columns_to_join) - - def instance_get_all_by_host_and_node(self, context, host, node): - return self.conductor_rpcapi.instance_get_all_by_host(context, - host, node) - - def instance_get_all_by_filters(self, context, filters, - sort_key='created_at', - sort_dir='desc', - columns_to_join=None): - return self.conductor_rpcapi.instance_get_all_by_filters( - context, filters, sort_key, sort_dir, columns_to_join) - - def instance_get_active_by_window_joined(self, context, begin, end=None, - project_id=None, host=None): - return self.conductor_rpcapi.instance_get_active_by_window_joined( - context, begin, end, project_id, host) - - def instance_info_cache_update(self, context, instance, values): - return self.conductor_rpcapi.instance_info_cache_update(context, - instance, values) - - def instance_info_cache_delete(self, context, instance): - return self.conductor_rpcapi.instance_info_cache_delete(context, - instance) - - def instance_type_get(self, context, instance_type_id): - return self.conductor_rpcapi.instance_type_get(context, - instance_type_id) - - def instance_fault_create(self, context, values): - return self.conductor_rpcapi.instance_fault_create(context, values) - - def migration_get(self, context, migration_id): - return self.conductor_rpcapi.migration_get(context, migration_id) - - def migration_get_unconfirmed_by_dest_compute(self, context, - confirm_window, - dest_compute): - crpcapi = self.conductor_rpcapi - return crpcapi.migration_get_unconfirmed_by_dest_compute( - context, confirm_window, dest_compute) - - def migration_get_in_progress_by_host_and_node(self, context, host, node): - crpcapi = self.conductor_rpcapi - return crpcapi.migration_get_in_progress_by_host_and_node(context, - host, node) - - def migration_create(self, context, instance, values): - return self.conductor_rpcapi.migration_create(context, instance, - values) - - def migration_update(self, context, migration, status): - return self.conductor_rpcapi.migration_update(context, migration, - status) - - def aggregate_host_add(self, context, aggregate, host): - return self.conductor_rpcapi.aggregate_host_add(context, aggregate, - host) - - def aggregate_host_delete(self, context, aggregate, host): - return self.conductor_rpcapi.aggregate_host_delete(context, aggregate, - host) - - def aggregate_get(self, context, aggregate_id): - return self.conductor_rpcapi.aggregate_get(context, aggregate_id) - - def aggregate_get_by_host(self, context, host, key=None): - return self.conductor_rpcapi.aggregate_get_by_host(context, host, key) - - def aggregate_metadata_add(self, context, aggregate, metadata, - set_delete=False): - return self.conductor_rpcapi.aggregate_metadata_add(context, aggregate, - metadata, - set_delete) - - def aggregate_metadata_delete(self, context, aggregate, key): - return self.conductor_rpcapi.aggregate_metadata_delete(context, - aggregate, - key) - - def aggregate_metadata_get_by_host(self, context, host, - key='availability_zone'): - return self.conductor_rpcapi.aggregate_metadata_get_by_host(context, - host, - key) - - def bw_usage_get(self, context, uuid, start_period, mac): - return self.conductor_rpcapi.bw_usage_update(context, uuid, mac, - start_period) - - def bw_usage_update(self, context, uuid, mac, start_period, - bw_in, bw_out, last_ctr_in, last_ctr_out, - last_refreshed=None): - return self.conductor_rpcapi.bw_usage_update( - context, uuid, mac, start_period, - bw_in, bw_out, last_ctr_in, last_ctr_out, - last_refreshed) - - def security_group_get_by_instance(self, context, instance): - return self.conductor_rpcapi.security_group_get_by_instance(context, - instance) - - def security_group_rule_get_by_security_group(self, context, secgroup): - return self.conductor_rpcapi.security_group_rule_get_by_security_group( - context, secgroup) - - def provider_fw_rule_get_all(self, context): - return self.conductor_rpcapi.provider_fw_rule_get_all(context) - - def agent_build_get_by_triple(self, context, hypervisor, os, architecture): - return self.conductor_rpcapi.agent_build_get_by_triple(context, - hypervisor, - os, - architecture) - - def block_device_mapping_create(self, context, values): - return self.conductor_rpcapi.block_device_mapping_update_or_create( - context, values, create=True) - - def block_device_mapping_update(self, context, bdm_id, values): - values = dict(values) - values['id'] = bdm_id - return self.conductor_rpcapi.block_device_mapping_update_or_create( - context, values, create=False) - - def block_device_mapping_update_or_create(self, context, values): - return self.conductor_rpcapi.block_device_mapping_update_or_create( - context, values) - - def block_device_mapping_get_all_by_instance(self, context, instance): - return self.conductor_rpcapi.block_device_mapping_get_all_by_instance( - context, instance) - - def block_device_mapping_destroy(self, context, bdms): - return self.conductor_rpcapi.block_device_mapping_destroy(context, - bdms=bdms) - - def block_device_mapping_destroy_by_instance_and_device(self, context, - instance, - device_name): - return self.conductor_rpcapi.block_device_mapping_destroy( - context, instance=instance, device_name=device_name) - - def block_device_mapping_destroy_by_instance_and_volume(self, context, - instance, - volume_id): - return self.conductor_rpcapi.block_device_mapping_destroy( - context, instance=instance, volume_id=volume_id) - - def vol_get_usage_by_time(self, context, start_time): - return self.conductor_rpcapi.vol_get_usage_by_time(context, start_time) - - def vol_usage_update(self, context, vol_id, rd_req, rd_bytes, wr_req, - wr_bytes, instance, last_refreshed=None, - update_totals=False): - return self.conductor_rpcapi.vol_usage_update(context, vol_id, - rd_req, rd_bytes, - wr_req, wr_bytes, - instance, last_refreshed, - update_totals) - - def service_get_all(self, context): - return self.conductor_rpcapi.service_get_all_by(context) - - def service_get_all_by_topic(self, context, topic): - return self.conductor_rpcapi.service_get_all_by(context, topic=topic) - - def service_get_all_by_host(self, context, host): - return self.conductor_rpcapi.service_get_all_by(context, host=host) - - def service_get_by_host_and_topic(self, context, host, topic): - return self.conductor_rpcapi.service_get_all_by(context, topic, host) - - def service_get_by_compute_host(self, context, host): - result = self.conductor_rpcapi.service_get_all_by(context, 'compute', - host) - # FIXME(comstud): A major revision bump to 2.0 should return a - # single entry, so we should just return 'result' at that point. - return result[0] - - def service_get_by_args(self, context, host, binary): - return self.conductor_rpcapi.service_get_all_by(context, host=host, - binary=binary) - - def action_event_start(self, context, values): - return self.conductor_rpcapi.action_event_start(context, values) - - def action_event_finish(self, context, values): - return self.conductor_rpcapi.action_event_finish(context, values) - - def service_create(self, context, values): - return self.conductor_rpcapi.service_create(context, values) - - def service_destroy(self, context, service_id): - return self.conductor_rpcapi.service_destroy(context, service_id) - - def compute_node_create(self, context, values): - return self.conductor_rpcapi.compute_node_create(context, values) - - def compute_node_update(self, context, node, values, prune_stats=False): - return self.conductor_rpcapi.compute_node_update(context, node, - values, prune_stats) - - def compute_node_delete(self, context, node): - return self.conductor_rpcapi.compute_node_delete(context, node) - - def service_update(self, context, service, values): - return self.conductor_rpcapi.service_update(context, service, values) - - def task_log_get(self, context, task_name, begin, end, host, state=None): - return self.conductor_rpcapi.task_log_get(context, task_name, begin, - end, host, state) - - def task_log_begin_task(self, context, task_name, begin, end, host, - task_items=None, message=None): - return self.conductor_rpcapi.task_log_begin_task(context, task_name, - begin, end, host, - task_items, message) - - def task_log_end_task(self, context, task_name, begin, end, host, - errors, message=None): - return self.conductor_rpcapi.task_log_end_task(context, task_name, - begin, end, host, - errors, message) - - def notify_usage_exists(self, context, instance, current_period=False, - ignore_missing_network_data=True, - system_metadata=None, extra_usage_info=None): - return self.conductor_rpcapi.notify_usage_exists( - context, instance, current_period, ignore_missing_network_data, - system_metadata, extra_usage_info) - - def security_groups_trigger_handler(self, context, event, *args): - return self.conductor_rpcapi.security_groups_trigger_handler(context, - event, - args) - - def security_groups_trigger_members_refresh(self, context, group_ids): - return self.conductor_rpcapi.security_groups_trigger_members_refresh( - context, group_ids) - - def network_migrate_instance_start(self, context, instance, migration): - return self.conductor_rpcapi.network_migrate_instance_start(context, - instance, - migration) - - def network_migrate_instance_finish(self, context, instance, migration): - return self.conductor_rpcapi.network_migrate_instance_finish(context, - instance, - migration) - - def quota_commit(self, context, reservations, project_id=None): - return self.conductor_rpcapi.quota_commit(context, reservations, - project_id=project_id) - - def quota_rollback(self, context, reservations, project_id=None): - return self.conductor_rpcapi.quota_rollback(context, reservations, - project_id=project_id) - - def get_ec2_ids(self, context, instance): - return self.conductor_rpcapi.get_ec2_ids(context, instance) - - def compute_stop(self, context, instance, do_cast=True): - return self.conductor_rpcapi.compute_stop(context, instance, do_cast) - - def compute_confirm_resize(self, context, instance, migration_ref): - return self.conductor_rpcapi.compute_confirm_resize(context, - instance, - migration_ref) - - def compute_unrescue(self, context, instance): - return self.conductor_rpcapi.compute_unrescue(context, instance) + return self._manager.instance_update(context, instance_uuid, + updates, 'conductor') class ComputeTaskAPI(object): diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py index bd4268963..6eccaf341 100644 --- a/nova/conductor/manager.py +++ b/nova/conductor/manager.py @@ -17,6 +17,7 @@ import copy from nova.api.ec2 import ec2utils +from nova import block_device from nova.compute import api as compute_api from nova.compute import utils as compute_utils from nova import exception @@ -66,7 +67,7 @@ class ConductorManager(manager.Manager): namespace. See the ComputeTaskManager class for details. """ - RPC_API_VERSION = '1.50' + RPC_API_VERSION = '1.51' def __init__(self, *args, **kwargs): super(ConductorManager, self).__init__(service_name='conductor', @@ -264,9 +265,12 @@ class ConductorManager(manager.Manager): else: self.db.block_device_mapping_update(context, values['id'], values) - def block_device_mapping_get_all_by_instance(self, context, instance): + def block_device_mapping_get_all_by_instance(self, context, instance, + legacy=True): bdms = self.db.block_device_mapping_get_all_by_instance( context, instance['uuid']) + if legacy: + bdms = block_device.legacy_mapping(bdms) return jsonutils.to_primitive(bdms) def block_device_mapping_destroy(self, context, bdms=None, diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py index e1f65fae2..c9b1c0af7 100644 --- a/nova/conductor/rpcapi.py +++ b/nova/conductor/rpcapi.py @@ -90,6 +90,8 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy): 1.48 - Added compute_unrescue 1.49 - Added columns_to_join to instance_get_by_uuid 1.50 - Added object_action() and object_class_action() + 1.51 - Added the 'legacy' argument to + block_device_mapping_get_all_by_instance """ BASE_RPC_API_VERSION = '1.0' @@ -230,11 +232,12 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy): values=values, create=create) return self.call(context, msg, version='1.12') - def block_device_mapping_get_all_by_instance(self, context, instance): + def block_device_mapping_get_all_by_instance(self, context, instance, + legacy=True): instance_p = jsonutils.to_primitive(instance) msg = self.make_msg('block_device_mapping_get_all_by_instance', - instance=instance_p) - return self.call(context, msg, version='1.13') + instance=instance_p, legacy=legacy) + return self.call(context, msg, version='1.51') def block_device_mapping_destroy(self, context, bdms=None, instance=None, volume_id=None, diff --git a/nova/db/api.py b/nova/db/api.py index 78e2eb7a4..8a7c6dc48 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -1013,20 +1013,20 @@ def ec2_snapshot_create(context, snapshot_id, forced_id=None): #################### -def block_device_mapping_create(context, values): +def block_device_mapping_create(context, values, legacy=True): """Create an entry of block device mapping.""" - return IMPL.block_device_mapping_create(context, values) + return IMPL.block_device_mapping_create(context, values, legacy) -def block_device_mapping_update(context, bdm_id, values): +def block_device_mapping_update(context, bdm_id, values, legacy=True): """Update an entry of block device mapping.""" - return IMPL.block_device_mapping_update(context, bdm_id, values) + return IMPL.block_device_mapping_update(context, bdm_id, values, legacy) -def block_device_mapping_update_or_create(context, values): +def block_device_mapping_update_or_create(context, values, legacy=True): """Update an entry of block device mapping. If not existed, create a new entry""" - return IMPL.block_device_mapping_update_or_create(context, values) + return IMPL.block_device_mapping_update_or_create(context, values, legacy) def block_device_mapping_get_all_by_instance(context, instance_uuid): diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index af9486b3e..adacc6ead 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -3122,24 +3122,35 @@ def _scrub_empty_str_values(dct, keys_to_scrub): del dct[key] +def _from_legacy_values(values, legacy, allow_updates=False): + if legacy: + if allow_updates and block_device.is_safe_for_update(values): + return values + else: + return block_device.BlockDeviceDict.from_legacy(values) + else: + return values + + @require_context -def block_device_mapping_create(context, values): +def block_device_mapping_create(context, values, legacy=True): _scrub_empty_str_values(values, ['volume_size']) + values = _from_legacy_values(values, legacy) bdm_ref = models.BlockDeviceMapping() bdm_ref.update(values) bdm_ref.save() @require_context -def block_device_mapping_update(context, bdm_id, values): +def block_device_mapping_update(context, bdm_id, values, legacy=True): _scrub_empty_str_values(values, ['volume_size']) + values = _from_legacy_values(values, legacy, allow_updates=True) _block_device_mapping_get_query(context).\ filter_by(id=bdm_id).\ update(values) -@require_context -def block_device_mapping_update_or_create(context, values): +def block_device_mapping_update_or_create(context, values, legacy=True): _scrub_empty_str_values(values, ['volume_size']) session = get_session() with session.begin(): @@ -3148,24 +3159,32 @@ def block_device_mapping_update_or_create(context, values): filter_by(device_name=values['device_name']).\ first() if not result: + values = _from_legacy_values(values, legacy) bdm_ref = models.BlockDeviceMapping() bdm_ref.update(values) bdm_ref.save(session=session) else: + values = _from_legacy_values(values, legacy, allow_updates=True) result.update(values) # NOTE(yamahata): same virtual device name can be specified multiple # times. So delete the existing ones. - virtual_name = values['virtual_name'] - if (virtual_name is not None and - block_device.is_swap_or_ephemeral(virtual_name)): - - _block_device_mapping_get_query(context, session=session).\ - filter_by(instance_uuid=values['instance_uuid']).\ - filter_by(virtual_name=virtual_name).\ + # TODO(ndipanov): Just changed to use new format for now - + # should be moved out of db layer or removed completely + if values.get('source_type') == 'blank': + is_swap = values.get('guest_format') == 'swap' + query = (_block_device_mapping_get_query(context, session=session). + filter_by(instance_uuid=values['instance_uuid']). + filter_by(source_type='blank'). filter(models.BlockDeviceMapping.device_name != - values['device_name']).\ - soft_delete() + values['device_name'])) + if is_swap: + query.filter_by(guest_format='swap').soft_delete() + else: + (query.filter(or_( + models.BlockDeviceMapping.guest_format == None, + models.BlockDeviceMapping.guest_format != 'swap')). + soft_delete()) @require_context diff --git a/nova/db/sqlalchemy/migrate_repo/versions/185_rename_unique_constraints.py b/nova/db/sqlalchemy/migrate_repo/versions/185_rename_unique_constraints.py new file mode 100644 index 000000000..af7fd1f79 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/185_rename_unique_constraints.py @@ -0,0 +1,128 @@ +# Copyright 2013 Mirantis Inc. +# All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# @author: Victor Sergeyev, Mirantis Inc. +# +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +from migrate.changeset import UniqueConstraint +from migrate import ForeignKeyConstraint +from sqlalchemy import MetaData, Table + +from nova.db.sqlalchemy import utils + +UC_DATA = { + # (table_name: ((columns,), old_uc_name_1), (columns,), old_uc_name_2) + "floating_ips": ( + (("address", "deleted",), "uniq_address_x_deleted"), + ), + "instance_type_projects": ( + (("instance_type_id", "project_id", "deleted"), + "uniq_instance_type_id_x_project_id_x_deleted"), + ), + "instance_types": ( + (("name", "deleted"), "uniq_name_x_deleted"), + (("flavorid", "deleted",), "uniq_flavorid_x_deleted"), + ), + "key_pairs": ( + (("user_id", "name", "deleted"), "key_pairs_uniq_name_and_user_id"), + ), + "networks": ( + (("vlan", "deleted",), "uniq_vlan_x_deleted"), + ), + "task_log": ( + (("task_name", "host", "period_beginning", "period_ending"), + "uniq_task_name_x_host_x_period_beginning_x_period_ending"), + ), +} +# some UC names are different for mysql and postgresql +UC_SPEC_DB_DATA = { + # {engine: {table_name: (((columns,), old_uc_name), (...))}} + "sqlite": { + "instance_info_caches": ( + (("instance_uuid",), "instance_uuid"), + ), + "virtual_interfaces": ( + (("address",), "virtual_interfaces_instance_uuid_fkey"), + ), + }, + "mysql": { + "instance_info_caches": ( + (("instance_uuid",), "instance_uuid"), + ), + "virtual_interfaces": ( + (("address",), "virtual_interfaces_instance_uuid_fkey"), + ), + }, + "postgresql": { + "instance_info_caches": ( + (("instance_uuid",), "instance_info_caches_instance_uuid_key"), + ), + "virtual_interfaces": ( + (("address",), "virtual_interfaces_address_key"), + ), + }, +} + + +constraint_names = { + "instance_info_caches": "instance_info_caches_instance_uuid_fkey", + "virtual_interfaces": "virtual_interfaces_instance_uuid_fkey", +} + + +def _uc_rename(migrate_engine, upgrade=True): + UC_DATA.update(UC_SPEC_DB_DATA[migrate_engine.name]) + + meta = MetaData(bind=migrate_engine) + + for table in UC_DATA: + t = Table(table, meta, autoload=True) + + for columns, old_uc_name in UC_DATA[table]: + new_uc_name = "uniq_{0}0{1}".format(table, "0".join(columns)) + + if table in constraint_names and migrate_engine.name == "mysql": + instances = Table("instances", meta, autoload=True) + + ForeignKeyConstraint( + columns=[t.c.instance_uuid], + refcolumns=[instances.c.uuid], + name=constraint_names[table] + ).drop(engine=migrate_engine) + + if upgrade: + old_name, new_name = old_uc_name, new_uc_name + else: + old_name, new_name = new_uc_name, old_uc_name + + utils.drop_unique_constraint(migrate_engine, table, + old_name, *(columns)) + UniqueConstraint(*columns, table=t, name=new_name).create() + + if table in constraint_names and migrate_engine.name == "mysql": + ForeignKeyConstraint( + columns=[t.c.instance_uuid], + refcolumns=[instances.c.uuid], + name=constraint_names[table] + ).create(engine=migrate_engine) + + +def upgrade(migrate_engine): + return _uc_rename(migrate_engine, upgrade=True) + + +def downgrade(migrate_engine): + return _uc_rename(migrate_engine, upgrade=False) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/186_new_bdm_format.py b/nova/db/sqlalchemy/migrate_repo/versions/186_new_bdm_format.py new file mode 100644 index 000000000..bb16d7bbf --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/186_new_bdm_format.py @@ -0,0 +1,262 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import itertools +import re + +from sqlalchemy import Column, Integer, MetaData, String, Table +from sqlalchemy.sql.expression import select + +from nova.openstack.common import log as logging +from oslo.config import cfg + + +CONF = cfg.CONF +CONF.import_opt('default_ephemeral_format', 'nova.virt.driver') +LOG = logging.getLogger(__name__) + + +_ephemeral = re.compile('^ephemeral(\d|[1-9]\d+)$') + + +def _is_ephemeral(device_name): + return bool(_ephemeral.match(device_name)) + + +def _is_swap_or_ephemeral(device_name): + return (device_name and + (device_name == 'swap' or _is_ephemeral(device_name))) + + +_dev = re.compile('^/dev/') + + +def strip_dev(device_name): + """remove leading '/dev/'.""" + return _dev.sub('', device_name) if device_name else device_name + + +def upgrade(migrate_engine): + meta = MetaData(bind=migrate_engine) + + for table in ('block_device_mapping', 'shadow_block_device_mapping'): + block_device_mapping = Table(table, + meta, autoload=True) + + source_type = Column('source_type', String(255)) + destination_type = Column('destination_type', String(255)) + guest_format = Column('guest_format', String(255)) + device_type = Column('device_type', String(255)) + disk_bus = Column('disk_bus', String(255)) + boot_index = Column('boot_index', Integer) + image_id = Column('image_id', String(36)) + + source_type.create(block_device_mapping) + destination_type.create(block_device_mapping) + guest_format.create(block_device_mapping) + device_type.create(block_device_mapping) + disk_bus.create(block_device_mapping) + boot_index.create(block_device_mapping) + image_id.create(block_device_mapping) + + device_name = block_device_mapping.c.device_name + device_name.alter(nullable=True) + + _upgrade_bdm_v2(meta, block_device_mapping) + + virtual_name = block_device_mapping.c.virtual_name + virtual_name.drop() + + +def downgrade(migrate_engine): + meta = MetaData(bind=migrate_engine) + + for table in ('block_device_mapping', 'shadow_block_device_mapping'): + block_device_mapping = Table(table, meta, autoload=True) + + virtual_name = Column('virtual_name', String(255), nullable=True) + virtual_name.create(block_device_mapping) + + _downgrade_bdm_v2(meta, block_device_mapping) + + device_name = block_device_mapping.c.device_name + device_name.alter(nullable=True) + + block_device_mapping.c.source_type.drop() + block_device_mapping.c.destination_type.drop() + block_device_mapping.c.guest_format.drop() + block_device_mapping.c.device_type.drop() + block_device_mapping.c.disk_bus.drop() + block_device_mapping.c.boot_index.drop() + block_device_mapping.c.image_id.drop() + + +def _upgrade_bdm_v2(meta, bdm_table): + # Rows needed to do the upgrade + _bdm_rows_v1 = ('id', 'device_name', 'virtual_name', + 'snapshot_id', 'volume_id', 'instance_uuid') + + _bdm_rows_v2 = ('id', 'source_type', 'destination_type', 'guest_format', + 'device_type', 'disk_bus', 'boot_index', 'image_id') + + def _get_columns(table, names): + return [getattr(table.c, name) for name in names] + + def _default_bdm(): + # Set some common default values + default = {} + default['destination_type'] = 'local' + default['device_type'] = 'disk' + default['boot_index'] = -1 + return default + + instance_table = Table('instances', meta, autoload=True) + instance_shadow_table = Table('shadow_instances', meta, autoload=True) + + for instance in itertools.chain( + instance_table.select().execute().fetchall(), + instance_shadow_table.select().execute().fetchall()): + # Get all the bdms for an instance + bdm_q = select(_get_columns(bdm_table, _bdm_rows_v1)).where( + bdm_table.c.instance_uuid == instance.uuid) + + bdms_v1 = [val for val in bdm_q.execute().fetchall()] + bdms_v2 = [] + image_bdm = None + + for bdm in bdms_v1: + bdm_v2 = _default_bdm() + # Copy over some fields we'll need + bdm_v2['id'] = bdm['id'] + bdm_v2['device_name'] = bdm['device_name'] + + virt_name = bdm.virtual_name + if _is_swap_or_ephemeral(virt_name): + bdm_v2['source_type'] = 'blank' + + if virt_name == 'swap': + bdm_v2['guest_format'] = 'swap' + else: + bdm_v2['guest_format'] = CONF.default_ephemeral_format + + bdms_v2.append(bdm_v2) + + elif bdm.snapshot_id: + bdm_v2['source_type'] = 'snapshot' + bdm_v2['destination_type'] = 'volume' + + bdms_v2.append(bdm_v2) + + elif bdm.volume_id: + bdm_v2['source_type'] = 'volume' + bdm_v2['destination_type'] = 'volume' + + bdms_v2.append(bdm_v2) + else: # Log a warning that the bdm is not as expected + LOG.warn("Got an unexpected block device %s" + "that cannot be converted to v2 format" % bdm) + + if instance.image_ref: + image_bdm = _default_bdm() + image_bdm['source_type'] = 'image' + image_bdm['instance_uuid'] = instance.uuid + image_bdm['image_id'] = instance.image_ref + + # NOTE (ndipanov): Mark only the image or the bootable volume + # with boot index, as we don't support it yet. + # Also, make sure that instances started with + # the old syntax of specifying an image *and* + # a bootable volume still have consistend data. + bootable = [bdm for bdm in bdms_v2 + if strip_dev(bdm['device_name']) == + strip_dev(instance.root_device_name) + and bdm['source_type'] != 'blank'] + + if len(bootable) > 1: + LOG.warn("Found inconsistent block device data for " + "instance %s - non-unique bootable device." + % instance.uuid) + if bootable: + bootable[0]['boot_index'] = 0 + elif instance.image_ref: + image_bdm['boot_index'] = 0 + else: + LOG.warn("No bootable device found for instance %s." + % instance.uuid) + + # Update the DB + if image_bdm: + bdm_table.insert().values(**image_bdm).execute() + + for bdm in bdms_v2: + bdm_table.update().where( + bdm_table.c.id == bdm['id'] + ).values(**bdm).execute() + + +def _downgrade_bdm_v2(meta, bdm_table): + # First delete all the image bdms + + # NOTE (ndipanov): This will delete all the image bdms, even the ones + # that were potentially created as part of th normal + # operation, not only the upgrade. We have to do it, + # as we have no way of handling them in the old code. + bdm_table.delete().where(bdm_table.c.source_type == 'image').execute() + + # NOTE (ndipanov): Set all NULL device_names (if any) to '' and let the + # Nova code deal with that. This is needed so that the + # return of nullable=True does not break, and should + # happen only if there are instances that are just + # starting up when we do the downgrade + bdm_table.update().where( + bdm_table.c.device_name == None + ).values(device_name='').execute() + + instance = Table('instances', meta, autoload=True) + instance_shadow = Table('shadow_instances', meta, autoload=True) + instance_q = select([instance.c.uuid]) + instance_shadow_q = select([instance_shadow.c.uuid]) + + for instance_uuid, in itertools.chain( + instance_q.execute().fetchall(), + instance_shadow_q.execute().fetchall()): + # Get all the bdms for an instance + bdm_q = select( + [bdm_table.c.id, bdm_table.c.source_type, bdm_table.c.guest_format] + ).where( + (bdm_table.c.instance_uuid == instance_uuid) & + (bdm_table.c.source_type == 'blank') + ).order_by(bdm_table.c.id.asc()) + + blanks = [ + dict(zip(('id', 'source', 'format'), row)) + for row in bdm_q.execute().fetchall() + ] + + swap = [dev for dev in blanks if dev['format'] == 'swap'] + assert len(swap) < 2 + ephemerals = [dev for dev in blanks if dev not in swap] + + for index, eph in enumerate(ephemerals): + eph['virtual_name'] = 'ephemeral' + str(index) + + if swap: + swap[0]['virtual_name'] = 'swap' + + for bdm in swap + ephemerals: + bdm_table.update().where( + bdm_table.c.id == bdm['id'] + ).values(**bdm).execute() diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index f10bc8c32..386fcbdad 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -45,10 +45,12 @@ class Service(BASE, NovaBase): """Represents a running service on a host.""" __tablename__ = 'services' + __table_args__ = () + id = Column(Integer, primary_key=True) - host = Column(String(255)) # , ForeignKey('hosts.id')) - binary = Column(String(255)) - topic = Column(String(255)) + host = Column(String(255), nullable=True) # , ForeignKey('hosts.id')) + binary = Column(String(255), nullable=True) + topic = Column(String(255), nullable=True) report_count = Column(Integer, nullable=False, default=0) disabled = Column(Boolean, default=False) @@ -246,6 +248,10 @@ class InstanceInfoCache(BASE, NovaBase): Represents a cache of information about an instance """ __tablename__ = 'instance_info_caches' + __table_args__ = ( + schema.UniqueConstraint( + "instance_uuid", + name="uniq_instance_info_caches0instance_uuid"),) id = Column(Integer, primary_key=True, autoincrement=True) # text column used for storing a json object of network data for api @@ -262,6 +268,14 @@ class InstanceInfoCache(BASE, NovaBase): class InstanceTypes(BASE, NovaBase): """Represent possible instance_types or flavor of VM offered.""" __tablename__ = "instance_types" + + __table_args__ = ( + schema.UniqueConstraint("flavorid", "deleted", + name="uniq_instance_types0flavorid0deleted"), + schema.UniqueConstraint("name", "deleted", + name="uniq_instance_types0name0deleted") + ) + id = Column(Integer, primary_key=True) name = Column(String(255)) memory_mb = Column(Integer) @@ -432,7 +446,16 @@ class BlockDeviceMapping(BASE, NovaBase): 'Instance.uuid,' 'BlockDeviceMapping.deleted==' '0)') - device_name = Column(String(255), nullable=False) + + source_type = Column(String(255)) + destination_type = Column(String(255)) + guest_format = Column(String(255)) + device_type = Column(String(255)) + disk_bus = Column(String(255)) + + boot_index = Column(Integer) + + device_name = Column(String(255)) # default=False for compatibility of the existing code. # With EC2 API, @@ -440,14 +463,13 @@ class BlockDeviceMapping(BASE, NovaBase): # default False for created with other timing. delete_on_termination = Column(Boolean, default=False) - # for ephemeral device - virtual_name = Column(String(255), nullable=True) - snapshot_id = Column(String(36)) volume_id = Column(String(36), nullable=True) volume_size = Column(Integer, nullable=True) + image_id = Column('image_id', String(36)) + # for no device to suppress devices. no_device = Column(Boolean, nullable=True) @@ -552,7 +574,10 @@ class ProviderFirewallRule(BASE, NovaBase): class KeyPair(BASE, NovaBase): """Represents a public key pair for ssh.""" __tablename__ = 'key_pairs' - __table_args__ = (schema.UniqueConstraint("name", "user_id"), ) + __table_args__ = ( + schema.UniqueConstraint("name", "user_id", "deleted", + name="uniq_key_pairs0user_id0name0deleted"), + ) id = Column(Integer, primary_key=True) name = Column(String(255)) @@ -591,8 +616,11 @@ class Migration(BASE, NovaBase): class Network(BASE, NovaBase): """Represents a network.""" __tablename__ = 'networks' - __table_args__ = (schema.UniqueConstraint("vpn_public_address", - "vpn_public_port"), ) + __table_args__ = ( + schema.UniqueConstraint("vlan", "deleted", + name="uniq_networks0vlan0deleted"), + ) + id = Column(Integer, primary_key=True) label = Column(String(255)) @@ -628,6 +656,10 @@ class Network(BASE, NovaBase): class VirtualInterface(BASE, NovaBase): """Represents a virtual interface on an instance.""" __tablename__ = 'virtual_interfaces' + __table_args__ = ( + schema.UniqueConstraint("address", + name="unique_virtual_interfaces0address"), + ) id = Column(Integer, primary_key=True) address = Column(String(255), unique=True) network_id = Column(Integer, nullable=False) @@ -669,6 +701,10 @@ class FixedIp(BASE, NovaBase): class FloatingIp(BASE, NovaBase): """Represents a floating ip that dynamically forwards to a fixed ip.""" __tablename__ = 'floating_ips' + __table_args__ = ( + schema.UniqueConstraint("address", "deleted", + name="uniq_floating_ips0address0deleted"), + ) id = Column(Integer, primary_key=True) address = Column(types.IPAddress()) fixed_ip_id = Column(Integer, nullable=True) @@ -757,6 +793,11 @@ class InstanceSystemMetadata(BASE, NovaBase): class InstanceTypeProjects(BASE, NovaBase): """Represent projects associated instance_types.""" __tablename__ = "instance_type_projects" + __table_args__ = (schema.UniqueConstraint( + "instance_type_id", "project_id", "deleted", + name="uniq_instance_type_projects0instance_type_id0project_id0deleted" + ), + ) id = Column(Integer, primary_key=True) instance_type_id = Column(Integer, ForeignKey('instance_types.id'), nullable=False) @@ -983,6 +1024,12 @@ class InstanceIdMapping(BASE, NovaBase): class TaskLog(BASE, NovaBase): """Audit log for background periodic tasks.""" __tablename__ = 'task_log' + __table_args__ = ( + schema.UniqueConstraint( + 'task_name', 'host', 'period_beginning', 'period_ending', + name="uniq_task_log0task_name0host0period_beginning0period_ending" + ), + ) id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) task_name = Column(String(255), nullable=False) state = Column(String(255), nullable=False) diff --git a/nova/exception.py b/nova/exception.py index bbe5442f1..905ddf2da 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -221,6 +221,17 @@ class InvalidBDMVolume(InvalidBDM): "failed to get volume %(id)s.") +class InvalidBDMFormat(InvalidBDM): + message = _("Block Device Mapping is Invalid: " + "some fields are not recognized, " + "or have invalid values.") + + +class InvalidBDMForLegacy(InvalidBDM): + message = _("Block Device Mapping cannot " + "be converted to legacy format. ") + + class VolumeUnattached(Invalid): message = _("Volume %(volume_id)s is not attached to anything") @@ -895,7 +906,7 @@ class RotationRequiredForBackup(NovaException): class KeyPairExists(Duplicate): - message = _("Key pair %(key_name)s already exists.") + message = _("Key pair '%(key_name)s' already exists.") class InstanceExists(Duplicate): @@ -1167,7 +1178,7 @@ class InstanceRecreateNotSupported(Invalid): class ServiceGroupUnavailable(NovaException): - message = _("The service from servicegroup driver %(driver) is " + message = _("The service from servicegroup driver %(driver)s is " "temporarily unavailable.") @@ -1220,3 +1231,7 @@ class OrphanedObjectError(NovaException): class IncompatibleObjectVersion(NovaException): message = _('Version %(objver)s of %(objname)s is not supported') + + +class CoreAPIMissing(NovaException): + message = _("Core API extensions are missing: %(missing_apis)s") diff --git a/nova/network/model.py b/nova/network/model.py index cf01d98cf..240911ea9 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -15,6 +15,9 @@ # License for the specific language governing permissions and limitations # under the License. +import functools + +import eventlet import netaddr from nova import exception @@ -434,3 +437,75 @@ class NetworkInfo(list): network_info.append((network_dict, info_dict)) return network_info + + +class NetworkInfoAsyncWrapper(NetworkInfo): + """Wrapper around NetworkInfo that allows retrieving NetworkInfo + in an async manner. + + This allows one to start querying for network information before + you know you will need it. If you have a long-running + operation, this allows the network model retrieval to occur in the + background. When you need the data, it will ensure the async + operation has completed. + + As an example: + + def allocate_net_info(arg1, arg2) + return call_quantum_to_allocate(arg1, arg2) + + network_info = NetworkInfoAsyncWrapper(allocate_net_info, arg1, arg2) + [do a long running operation -- real network_info will be retrieved + in the background] + [do something with network_info] + """ + + def __init__(self, async_method, *args, **kwargs): + self._gt = eventlet.spawn(async_method, *args, **kwargs) + methods = ['json', 'legacy', 'fixed_ips', 'floating_ips'] + for method in methods: + fn = getattr(self, method) + wrapper = functools.partial(self._sync_wrapper, fn) + functools.update_wrapper(wrapper, fn) + setattr(self, method, wrapper) + + def _sync_wrapper(self, wrapped, *args, **kwargs): + """Synchronize the model before running a method.""" + self.wait() + return wrapped(*args, **kwargs) + + def __getitem__(self, *args, **kwargs): + fn = super(NetworkInfoAsyncWrapper, self).__getitem__ + return self._sync_wrapper(fn, *args, **kwargs) + + def __iter__(self, *args, **kwargs): + fn = super(NetworkInfoAsyncWrapper, self).__iter__ + return self._sync_wrapper(fn, *args, **kwargs) + + def __len__(self, *args, **kwargs): + fn = super(NetworkInfoAsyncWrapper, self).__len__ + return self._sync_wrapper(fn, *args, **kwargs) + + def __str__(self, *args, **kwargs): + fn = super(NetworkInfoAsyncWrapper, self).__str__ + return self._sync_wrapper(fn, *args, **kwargs) + + def __repr__(self, *args, **kwargs): + fn = super(NetworkInfoAsyncWrapper, self).__repr__ + return self._sync_wrapper(fn, *args, **kwargs) + + def wait(self, do_raise=True): + """Wait for async call to finish.""" + if self._gt is not None: + try: + # NOTE(comstud): This looks funky, but this object is + # subclassed from list. In other words, 'self' is really + # just a list with a bunch of extra methods. So this + # line just replaces the current list (which should be + # empty) with the result. + self[:] = self._gt.wait() + except Exception: + if do_raise: + raise + finally: + self._gt = None diff --git a/nova/openstack/common/config/__init__.py b/nova/openstack/common/config/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/nova/openstack/common/config/__init__.py diff --git a/tools/conf/extract_opts.py b/nova/openstack/common/config/generator.py index deb49f7a9..7ab3abc70 100644..100755 --- a/tools/conf/extract_opts.py +++ b/nova/openstack/common/config/generator.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 SINA Corporation @@ -17,7 +18,6 @@ # # @author: Zhongyue Luo, SINA Corporation. # - """Extracts OpenStack config option info from module(s).""" import imp @@ -29,8 +29,10 @@ import textwrap from oslo.config import cfg +from nova.openstack.common import gettextutils from nova.openstack.common import importutils +gettextutils.install('nova') STROPT = "StrOpt" BOOLOPT = "BoolOpt" @@ -58,7 +60,7 @@ BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../")) WORDWRAP_WIDTH = 60 -def main(srcfiles): +def generate(srcfiles): mods_by_pkg = dict() for filepath in srcfiles: pkg_name = filepath.split(os.sep)[1] @@ -104,58 +106,37 @@ def _import_module(mod_str): return sys.modules[mod_str[4:]] else: return importutils.import_module(mod_str) - except (ValueError, AttributeError) as err: - return None except ImportError as ie: sys.stderr.write("%s\n" % str(ie)) return None - except Exception as e: + except Exception: return None -def _guess_groups(opt, mod_obj): - groups = [] +def _is_in_group(opt, group): + "Check if opt is in group." + for key, value in group._opts.items(): + if value['opt'] == opt: + return True + return False + +def _guess_groups(opt, mod_obj): # is it in the DEFAULT group? - if (opt.dest in cfg.CONF and - not isinstance(cfg.CONF[opt.dest], cfg.CONF.GroupAttr)): - groups.append('DEFAULT') + if _is_in_group(opt, cfg.CONF): + return 'DEFAULT' # what other groups is it in? for key, value in cfg.CONF.items(): - if not isinstance(value, cfg.CONF.GroupAttr): - continue - if opt.dest not in value: - continue - groups.append(key) - - if len(groups) == 1: - return groups[0] - - group = None - for g in groups: - if g in mod_obj.__name__: - group = g - break - - if group is None and 'DEFAULT' in groups: - sys.stderr.write("Guessing that " + opt.dest + - " in " + mod_obj.__name__ + - " is in DEFAULT group out of " + - ','.join(groups) + "\n") - return 'DEFAULT' - - if group is None: - sys.stderr.write("Unable to guess what group " + opt.dest + - " in " + mod_obj.__name__ + - " is in out of " + ','.join(groups) + "\n") - sys.exit(1) + if isinstance(value, cfg.CONF.GroupAttr): + if _is_in_group(opt, value._group): + return value._group.name - sys.stderr.write("Guessing that " + opt.dest + - " in " + mod_obj.__name__ + - " is in the " + group + - " group out of " + ','.join(groups) + "\n") - return group + raise RuntimeError( + "Unable to find group for option %s, " + "maybe it's defined twice in the same group?" + % opt.name + ) def _list_opts(obj): @@ -262,8 +243,11 @@ def _print_opt(opt): sys.exit(1) -if __name__ == '__main__': +def main(): if len(sys.argv) < 2: - print "usage: python %s [srcfile]...\n" % sys.argv[0] + print "usage: %s [srcfile]...\n" % sys.argv[0] sys.exit(0) - main(sys.argv[1:]) + generate(sys.argv[1:]) + +if __name__ == '__main__': + main() diff --git a/nova/openstack/common/db/sqlalchemy/session.py b/nova/openstack/common/db/sqlalchemy/session.py index a3f4283d0..093d8b082 100644 --- a/nova/openstack/common/db/sqlalchemy/session.py +++ b/nova/openstack/common/db/sqlalchemy/session.py @@ -386,14 +386,15 @@ def _raise_if_duplicate_entry_error(integrity_error, engine_name): """ def get_columns_from_uniq_cons_or_name(columns): - # note(boris-42): UniqueConstraint name convention: "uniq_c1_x_c2_x_c3" - # means that columns c1, c2, c3 are in UniqueConstraint. + # note(vsergeyev): UniqueConstraint name convention: "uniq_t0c10c2" + # where `t` it is table name, `0` it is delimiter and + # columns `c1`, `c2` are in UniqueConstraint. uniqbase = "uniq_" if not columns.startswith(uniqbase): if engine_name == "postgresql": return [columns[columns.index("_") + 1:columns.rindex("_")]] return [columns] - return columns[len(uniqbase):].split("_x_") + return columns[len(uniqbase):].split("0")[1:] if engine_name not in ["mysql", "sqlite", "postgresql"]: return diff --git a/nova/scheduler/chance.py b/nova/scheduler/chance.py index 6e9688d81..0f8f3c45a 100644 --- a/nova/scheduler/chance.py +++ b/nova/scheduler/chance.py @@ -60,9 +60,12 @@ class ChanceScheduler(driver.Scheduler): def select_hosts(self, context, request_spec, filter_properties): """Selects a set of random hosts.""" - return [self._schedule(context, CONF.compute_topic, + hosts = [self._schedule(context, CONF.compute_topic, request_spec, filter_properties) for instance_uuid in request_spec.get('instance_uuids', [])] + if not hosts: + raise exception.NoValidHost(reason="") + return hosts def schedule_run_instance(self, context, request_spec, admin_password, injected_files, diff --git a/nova/storage/linuxscsi.py b/nova/storage/linuxscsi.py index afd8aa81c..d8c8b7f50 100644 --- a/nova/storage/linuxscsi.py +++ b/nova/storage/linuxscsi.py @@ -112,8 +112,11 @@ def find_multipath_device(device): # on /etc/multipath.conf settings. if info[1][:2] == "dm": mdev = "/dev/%s" % info[1] + mdev_id = info[0] elif info[2][:2] == "dm": mdev = "/dev/%s" % info[2] + mdev_id = info[1].replace('(', '') + mdev_id = mdev_id.replace(')', '') if mdev is None: LOG.warn(_("Couldn't find multipath device %(line)s") @@ -139,6 +142,7 @@ def find_multipath_device(device): if mdev is not None: info = {"device": mdev, + "id": mdev_id, "devices": devices} return info return None diff --git a/nova/test.py b/nova/test.py index f0dba7c7c..d7502b4ea 100644 --- a/nova/test.py +++ b/nova/test.py @@ -189,7 +189,12 @@ class TestingException(Exception): class TestCase(testtools.TestCase): - """Test case base class for all unit tests.""" + """Test case base class for all unit tests. + + Due to the slowness of DB access, please consider deriving from + `NoDBTestCase` first. + """ + USES_DB = True def setUp(self): """Run before each test method to initialize test environment.""" @@ -217,13 +222,15 @@ class TestCase(testtools.TestCase): self.log_fixture = self.useFixture(fixtures.FakeLogger()) self.useFixture(conf_fixture.ConfFixture(CONF)) - global _DB_CACHE - if not _DB_CACHE: - _DB_CACHE = Database(session, migration, - sql_connection=CONF.sql_connection, - sqlite_db=CONF.sqlite_db, - sqlite_clean_db=CONF.sqlite_clean_db) - self.useFixture(_DB_CACHE) + if self.USES_DB: + global _DB_CACHE + if not _DB_CACHE: + _DB_CACHE = Database(session, migration, + sql_connection=CONF.sql_connection, + sqlite_db=CONF.sqlite_db, + sqlite_clean_db=CONF.sqlite_clean_db) + + self.useFixture(_DB_CACHE) mox_fixture = self.useFixture(MoxStubout()) self.mox = mox_fixture.mox @@ -274,3 +281,12 @@ class TimeOverride(fixtures.Fixture): super(TimeOverride, self).setUp() timeutils.set_time_override() self.addCleanup(timeutils.clear_time_override) + + +class NoDBTestCase(TestCase): + """ + `NoDBTestCase` differs from TestCase in that DB access is not supported. + This makes tests run significantly faster. If possible, all new tests + should derive from this class. + """ + USES_DB = False diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py index 22f9c2d81..8a10712cb 100644 --- a/nova/tests/api/ec2/test_cloud.py +++ b/nova/tests/api/ec2/test_cloud.py @@ -2119,9 +2119,10 @@ class CloudTestCase(test.TestCase): def fake_block_device_mapping_get_all_by_instance(context, inst_id): return [dict(id=1, + source_type='snapshot', + destination_type='volume', snapshot_id=snapshots[0], volume_id=volumes[0], - virtual_name=None, volume_size=1, device_name='sda1', delete_on_termination=False, @@ -2210,45 +2211,54 @@ class CloudTestCase(test.TestCase): @staticmethod def _fake_bdm_get(ctxt, id): return [{'volume_id': 87654321, + 'source_type': 'volume', + 'destination_type': 'volume', 'snapshot_id': None, 'no_device': None, - 'virtual_name': None, 'delete_on_termination': True, 'device_name': '/dev/sdh'}, {'volume_id': None, 'snapshot_id': 98765432, + 'source_type': 'snapshot', + 'destination_type': 'volume', 'no_device': None, - 'virtual_name': None, 'delete_on_termination': True, 'device_name': '/dev/sdi'}, {'volume_id': None, 'snapshot_id': None, 'no_device': True, - 'virtual_name': None, 'delete_on_termination': None, 'device_name': None}, {'volume_id': None, 'snapshot_id': None, 'no_device': None, - 'virtual_name': 'ephemeral0', + 'source_type': 'blank', + 'destination_type': 'local', + 'guest_format': None, 'delete_on_termination': None, 'device_name': '/dev/sdb'}, {'volume_id': None, 'snapshot_id': None, 'no_device': None, - 'virtual_name': 'swap', + 'source_type': 'blank', + 'destination_type': 'local', + 'guest_format': 'swap', 'delete_on_termination': None, 'device_name': '/dev/sdc'}, {'volume_id': None, 'snapshot_id': None, 'no_device': None, - 'virtual_name': 'ephemeral1', + 'source_type': 'blank', + 'destination_type': 'local', + 'guest_format': None, 'delete_on_termination': None, 'device_name': '/dev/sdd'}, {'volume_id': None, 'snapshot_id': None, 'no_device': None, - 'virtual_name': 'ephemeral2', + 'source_type': 'blank', + 'destination_type': 'local', + 'guest_format': None, 'delete_on_termination': None, 'device_name': '/dev/sd3'}, ] diff --git a/nova/tests/api/openstack/compute/contrib/test_keypairs.py b/nova/tests/api/openstack/compute/contrib/test_keypairs.py index e338cad69..56b9fe84b 100644 --- a/nova/tests/api/openstack/compute/contrib/test_keypairs.py +++ b/nova/tests/api/openstack/compute/contrib/test_keypairs.py @@ -100,8 +100,12 @@ class KeypairsTest(test.TestCase): req.headers['Content-Type'] = 'application/json' res = req.get_response(self.app) self.assertEqual(res.status_int, 400) + res_dict = jsonutils.loads(res.body) + self.assertEqual( + 'Keypair name must be between 1 and 255 characters long', + res_dict['badRequest']['message']) - def test_keypair_create_with_invalid_name(self): + def test_keypair_create_with_name_too_long(self): body = { 'keypair': { 'name': 'a' * 256 @@ -113,6 +117,10 @@ class KeypairsTest(test.TestCase): req.headers['Content-Type'] = 'application/json' res = req.get_response(self.app) self.assertEqual(res.status_int, 400) + res_dict = jsonutils.loads(res.body) + self.assertEqual( + 'Keypair name must be between 1 and 255 characters long', + res_dict['badRequest']['message']) def test_keypair_create_with_non_alphanumeric_name(self): body = { @@ -127,6 +135,10 @@ class KeypairsTest(test.TestCase): res = req.get_response(self.app) res_dict = jsonutils.loads(res.body) self.assertEqual(res.status_int, 400) + res_dict = jsonutils.loads(res.body) + self.assertEqual( + "Keypair name contains unsafe characters", + res_dict['badRequest']['message']) def test_keypair_import(self): body = { @@ -183,6 +195,10 @@ class KeypairsTest(test.TestCase): req.headers['Content-Type'] = 'application/json' res = req.get_response(self.app) self.assertEqual(res.status_int, 413) + res_dict = jsonutils.loads(res.body) + self.assertEqual( + "Quota exceeded, too many key pairs.", + res_dict['overLimit']['message']) def test_keypair_create_quota_limit(self): @@ -203,6 +219,10 @@ class KeypairsTest(test.TestCase): req.headers['Content-Type'] = 'application/json' res = req.get_response(self.app) self.assertEqual(res.status_int, 413) + res_dict = jsonutils.loads(res.body) + self.assertEqual( + "Quota exceeded, too many key pairs.", + res_dict['overLimit']['message']) def test_keypair_create_duplicate(self): self.stubs.Set(db, "key_pair_create", db_key_pair_create_duplicate) @@ -213,6 +233,10 @@ class KeypairsTest(test.TestCase): req.headers['Content-Type'] = 'application/json' res = req.get_response(self.app) self.assertEqual(res.status_int, 409) + res_dict = jsonutils.loads(res.body) + self.assertEqual( + "Key pair 'create_duplicate' already exists.", + res_dict['conflictingRequest']['message']) def test_keypair_import_bad_key(self): body = { @@ -229,6 +253,10 @@ class KeypairsTest(test.TestCase): res = req.get_response(self.app) self.assertEqual(res.status_int, 400) + res_dict = jsonutils.loads(res.body) + self.assertEqual("Keypair data is invalid", + res_dict['badRequest']['message']) + def test_keypair_delete(self): req = webob.Request.blank('/v2/fake/os-keypairs/FAKE') req.method = 'DELETE' @@ -305,7 +333,7 @@ class KeypairsTest(test.TestCase): self.assertTrue('key_name' in server_dict) self.assertEquals(server_dict['key_name'], '') - def test_keypair_create_with_invalid_keypairBody(self): + def test_keypair_create_with_invalid_keypair_body(self): body = {'alpha': {'name': 'create_test'}} req = webob.Request.blank('/v1.1/fake/os-keypairs') req.method = 'POST' diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_keypairs.py b/nova/tests/api/openstack/compute/plugins/v3/test_keypairs.py index 529c5eb71..bb74fdafc 100644 --- a/nova/tests/api/openstack/compute/plugins/v3/test_keypairs.py +++ b/nova/tests/api/openstack/compute/plugins/v3/test_keypairs.py @@ -102,8 +102,12 @@ class KeypairsTest(test.TestCase): req.headers['Content-Type'] = 'application/json' res = req.get_response(self.app) self.assertEqual(res.status_int, 400) + res_dict = jsonutils.loads(res.body) + self.assertEqual( + 'Keypair name must be between 1 and 255 characters long', + res_dict['badRequest']['message']) - def test_keypair_create_with_invalid_name(self): + def test_keypair_create_with_name_too_long(self): body = { 'keypair': { 'name': 'a' * 256 @@ -115,6 +119,10 @@ class KeypairsTest(test.TestCase): req.headers['Content-Type'] = 'application/json' res = req.get_response(self.app) self.assertEqual(res.status_int, 400) + res_dict = jsonutils.loads(res.body) + self.assertEqual( + 'Keypair name must be between 1 and 255 characters long', + res_dict['badRequest']['message']) def test_keypair_create_with_non_alphanumeric_name(self): body = { @@ -129,6 +137,10 @@ class KeypairsTest(test.TestCase): res = req.get_response(self.app) res_dict = jsonutils.loads(res.body) self.assertEqual(res.status_int, 400) + res_dict = jsonutils.loads(res.body) + self.assertEqual( + "Keypair name contains unsafe characters", + res_dict['badRequest']['message']) def test_keypair_import(self): body = { @@ -185,6 +197,10 @@ class KeypairsTest(test.TestCase): req.headers['Content-Type'] = 'application/json' res = req.get_response(self.app) self.assertEqual(res.status_int, 413) + res_dict = jsonutils.loads(res.body) + self.assertEqual( + "Quota exceeded, too many key pairs.", + res_dict['overLimit']['message']) def test_keypair_create_quota_limit(self): @@ -205,6 +221,10 @@ class KeypairsTest(test.TestCase): req.headers['Content-Type'] = 'application/json' res = req.get_response(self.app) self.assertEqual(res.status_int, 413) + res_dict = jsonutils.loads(res.body) + self.assertEqual( + "Quota exceeded, too many key pairs.", + res_dict['overLimit']['message']) def test_keypair_create_duplicate(self): self.stubs.Set(db, "key_pair_create", db_key_pair_create_duplicate) @@ -215,6 +235,10 @@ class KeypairsTest(test.TestCase): req.headers['Content-Type'] = 'application/json' res = req.get_response(self.app) self.assertEqual(res.status_int, 409) + res_dict = jsonutils.loads(res.body) + self.assertEqual( + "Key pair 'create_duplicate' already exists.", + res_dict['conflictingRequest']['message']) def test_keypair_import_bad_key(self): body = { @@ -230,6 +254,9 @@ class KeypairsTest(test.TestCase): req.headers['Content-Type'] = 'application/json' res = req.get_response(self.app) self.assertEqual(res.status_int, 400) + res_dict = jsonutils.loads(res.body) + self.assertEqual("Keypair data is invalid", + res_dict['badRequest']['message']) def test_keypair_delete(self): req = webob.Request.blank('/v3/os-keypairs/FAKE') @@ -307,7 +334,7 @@ class KeypairsTest(test.TestCase): self.assertTrue('key_name' in server_dict) self.assertEquals(server_dict['key_name'], '') - def test_keypair_create_with_invalid_keypairBody(self): + def test_keypair_create_with_invalid_keypair_body(self): body = {'alpha': {'name': 'create_test'}} req = webob.Request.blank('/v3/os-keypairs') req.method = 'POST' diff --git a/nova/tests/api/openstack/compute/test_server_actions.py b/nova/tests/api/openstack/compute/test_server_actions.py index 473d3a253..f1defe039 100644 --- a/nova/tests/api/openstack/compute/test_server_actions.py +++ b/nova/tests/api/openstack/compute/test_server_actions.py @@ -843,7 +843,8 @@ class ServerActionsControllerTest(test.TestCase): def fake_block_device_mapping_get_all_by_instance(context, inst_id): return [dict(volume_id=_fake_id('a'), - virtual_name=None, + source_type='snapshot', + destination_type='volume', volume_size=1, device_name='vda', snapshot_id=1, diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py index 6cdad1294..97a1a5826 100644 --- a/nova/tests/api/openstack/compute/test_servers.py +++ b/nova/tests/api/openstack/compute/test_servers.py @@ -24,6 +24,7 @@ import uuid import iso8601 from lxml import etree +import mox from oslo.config import cfg import webob @@ -1781,6 +1782,8 @@ class ServersControllerCreateTest(test.TestCase): self.ext_mgr.extensions = {} self.controller = servers.Controller(self.ext_mgr) + self.volume_id = 'fake' + def instance_create(context, inst): inst_type = flavors.get_instance_type_by_flavor_id(3) image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' @@ -1804,6 +1807,7 @@ class ServersControllerCreateTest(test.TestCase): "fixed_ips": [], "task_state": "", "vm_state": "", + "root_device_name": inst.get('root_device_name', 'vda'), } self.instance_cache_by_id[instance['id']] = instance @@ -2408,7 +2412,7 @@ class ServersControllerCreateTest(test.TestCase): def test_create_instance_with_volumes_enabled(self): self.ext_mgr.extensions = {'os-volumes': 'fake'} - bdm = [{'device_name': 'foo'}] + bdm = [{'device_name': 'foo', 'volume_id': 'fake_vol'}] params = {'block_device_mapping': bdm} old_create = compute_api.API.create @@ -2416,7 +2420,11 @@ class ServersControllerCreateTest(test.TestCase): self.assertEqual(kwargs['block_device_mapping'], bdm) return old_create(*args, **kwargs) + def _validate_bdm(*args, **kwargs): + pass + self.stubs.Set(compute_api.API, 'create', create) + self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm) self._test_create_extra(params) def test_create_instance_with_volumes_enabled_no_image(self): @@ -2441,7 +2449,25 @@ class ServersControllerCreateTest(test.TestCase): os-volumes extension is enabled and bdms are supplied """ self.ext_mgr.extensions = {'os-volumes': 'fake'} - bdm = [{'device_name': 'foo'}] + self.mox.StubOutWithMock(compute_api.API, '_validate_bdm') + self.mox.StubOutWithMock(compute_api.API, '_get_volume') + bdm = [{ + 'id': 1, + 'no_device': None, + 'virtual_name': None, + 'snapshot_id': None, + 'volume_id': self.volume_id, + 'status': 'active', + 'device_name': 'vda', + 'delete_on_termination': False, + 'volume_image_metadata': + {'test_key': 'test_value'} + }] + volume = bdm[0] + compute_api.API._validate_bdm(mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn(True) + compute_api.API._get_volume(mox.IgnoreArg(), + bdm).AndReturn(volume) params = {'block_device_mapping': bdm} old_create = compute_api.API.create @@ -2450,7 +2476,11 @@ class ServersControllerCreateTest(test.TestCase): self.assertNotIn('imageRef', kwargs) return old_create(*args, **kwargs) + def _validate_bdm(*args, **kwargs): + pass + self.stubs.Set(compute_api.API, 'create', create) + self.mox.ReplayAll() self._test_create_extra(params, no_image=True) def test_create_instance_with_volumes_disabled(self): @@ -2535,17 +2565,27 @@ class ServersControllerCreateTest(test.TestCase): def test_create_instance_with_bdm_delete_on_termination(self): self.ext_mgr.extensions = {'os-volumes': 'fake'} - bdm = [{'device_name': 'foo1', 'delete_on_termination': 1}, - {'device_name': 'foo2', 'delete_on_termination': True}, - {'device_name': 'foo3', 'delete_on_termination': 'invalid'}, - {'device_name': 'foo4', 'delete_on_termination': 0}, - {'device_name': 'foo5', 'delete_on_termination': False}] + bdm = [{'device_name': 'foo1', 'volume_id': 'fake_vol', + 'delete_on_termination': 1}, + {'device_name': 'foo2', 'volume_id': 'fake_vol', + 'delete_on_termination': True}, + {'device_name': 'foo3', 'volume_id': 'fake_vol', + 'delete_on_termination': 'invalid'}, + {'device_name': 'foo4', 'volume_id': 'fake_vol', + 'delete_on_termination': 0}, + {'device_name': 'foo5', 'volume_id': 'fake_vol', + 'delete_on_termination': False}] expected_bdm = [ - {'device_name': 'foo1', 'delete_on_termination': True}, - {'device_name': 'foo2', 'delete_on_termination': True}, - {'device_name': 'foo3', 'delete_on_termination': False}, - {'device_name': 'foo4', 'delete_on_termination': False}, - {'device_name': 'foo5', 'delete_on_termination': False}] + {'device_name': 'foo1', 'volume_id': 'fake_vol', + 'delete_on_termination': True}, + {'device_name': 'foo2', 'volume_id': 'fake_vol', + 'delete_on_termination': True}, + {'device_name': 'foo3', 'volume_id': 'fake_vol', + 'delete_on_termination': False}, + {'device_name': 'foo4', 'volume_id': 'fake_vol', + 'delete_on_termination': False}, + {'device_name': 'foo5', 'volume_id': 'fake_vol', + 'delete_on_termination': False}] params = {'block_device_mapping': bdm} old_create = compute_api.API.create @@ -2553,7 +2593,11 @@ class ServersControllerCreateTest(test.TestCase): self.assertEqual(expected_bdm, kwargs['block_device_mapping']) return old_create(*args, **kwargs) + def _validate_bdm(*args, **kwargs): + pass + self.stubs.Set(compute_api.API, 'create', create) + self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm) self._test_create_extra(params) def test_create_instance_with_user_data_enabled(self): diff --git a/nova/tests/api/openstack/compute/test_v3_extensions.py b/nova/tests/api/openstack/compute/test_v3_extensions.py index f7c1bf39c..97429ca45 100644 --- a/nova/tests/api/openstack/compute/test_v3_extensions.py +++ b/nova/tests/api/openstack/compute/test_v3_extensions.py @@ -15,9 +15,12 @@ # under the License. from oslo.config import cfg +import stevedore +from nova.api import openstack from nova.api.openstack import compute from nova.api.openstack.compute import plugins +from nova import exception from nova import test CONF = cfg.CONF @@ -28,8 +31,35 @@ class fake_bad_extension(object): alias = "fake-bad" +class fake_stevedore_enabled_extensions(object): + def __init__(self, namespace, check_func, invoke_on_load=False, + invoke_args=(), invoke_kwds={}): + self.extensions = [] + + def map(self, func, *args, **kwds): + pass + + def __iter__(self): + return iter(self.extensions) + + +class fake_loaded_extension_info(object): + def __init__(self): + self.extensions = {} + + def register_extension(self, ext): + self.extensions[ext] = ext + return True + + def get_extensions(self): + return {'core1': None, 'core2': None, 'noncore1': None} + + class ExtensionLoadingTestCase(test.TestCase): + def _set_v3_core(self, core_extensions): + openstack.API_V3_CORE_EXTENSIONS = core_extensions + def test_extensions_loaded(self): app = compute.APIRouterV3() self.assertIn('servers', app._loaded_extension_info.extensions) @@ -70,3 +100,42 @@ class ExtensionLoadingTestCase(test.TestCase): self.assertNotIn('os-fixed-ips', app._loaded_extension_info.extensions) self.assertIn('servers', app._loaded_extension_info.extensions) self.assertEqual(len(app._loaded_extension_info.extensions), 1) + + def test_get_missing_core_extensions(self): + v3_core = openstack.API_V3_CORE_EXTENSIONS + openstack.API_V3_CORE_EXTENSIONS = set(['core1', 'core2']) + self.addCleanup(self._set_v3_core, v3_core) + self.assertEqual(len(compute.APIRouterV3.get_missing_core_extensions( + ['core1', 'core2', 'noncore1'])), 0) + missing_core = compute.APIRouterV3.get_missing_core_extensions( + ['core1']) + self.assertEqual(len(missing_core), 1) + self.assertIn('core2', missing_core) + missing_core = compute.APIRouterV3.get_missing_core_extensions([]) + self.assertEqual(len(missing_core), 2) + self.assertIn('core1', missing_core) + self.assertIn('core2', missing_core) + missing_core = compute.APIRouterV3.get_missing_core_extensions( + ['noncore1']) + self.assertEqual(len(missing_core), 2) + self.assertIn('core1', missing_core) + self.assertIn('core2', missing_core) + + def test_core_extensions_present(self): + self.stubs.Set(stevedore.enabled, 'EnabledExtensionManager', + fake_stevedore_enabled_extensions) + self.stubs.Set(plugins, 'LoadedExtensionInfo', + fake_loaded_extension_info) + v3_core = openstack.API_V3_CORE_EXTENSIONS + openstack.API_V3_CORE_EXTENSIONS = set(['core1', 'core2']) + self.addCleanup(self._set_v3_core, v3_core) + # if no core API extensions are missing then an exception will + # not be raised when creating an instance of compute.APIRouterV3 + _ = compute.APIRouterV3() + + def test_core_extensions_missing(self): + self.stubs.Set(stevedore.enabled, 'EnabledExtensionManager', + fake_stevedore_enabled_extensions) + self.stubs.Set(plugins, 'LoadedExtensionInfo', + fake_loaded_extension_info) + self.assertRaises(exception.CoreAPIMissing, compute.APIRouterV3) diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index 91a8c9c95..4dea52dfb 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -31,6 +31,7 @@ import mox from oslo.config import cfg import nova +from nova import block_device from nova import compute from nova.compute import api as compute_api from nova.compute import flavors @@ -378,6 +379,27 @@ class ComputeVolumeTestCase(BaseTestCase): block_device_mapping) self.assertEqual(self.cinfo.get('serial'), self.volume_id) + def test_boot_volume_metadata(self): + self.mox.StubOutWithMock(self.compute_api.volume_api, 'get') + block_device_mapping = [{ + 'id': 1, + 'no_device': None, + 'virtual_name': None, + 'snapshot_id': None, + 'volume_id': self.volume_id, + 'status': 'active', + 'device_name': 'vda', + 'delete_on_termination': False, + 'volume_image_metadata': + {'test_key': 'test_value'} + }] + sentinel = object() + self.compute_api.volume_api.get(self.context, + self.volume_id).AndReturn(sentinel) + self.mox.ReplayAll() + vol = self.compute_api._get_volume(self.context, block_device_mapping) + self.assertIs(vol, sentinel) + def test_poll_volume_usage_disabled(self): ctxt = 'MockContext' self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms') @@ -526,7 +548,8 @@ class ComputeVolumeTestCase(BaseTestCase): block_device_mapping = [{ 'id': 1, 'no_device': None, - 'virtual_name': None, + 'source_type': 'volume', + 'destination_type': 'volume', 'snapshot_id': None, 'volume_id': self.volume_id, 'device_name': 'vda', @@ -2337,6 +2360,36 @@ class ComputeTestCase(BaseTestCase): instance=jsonutils.to_primitive(instance), bdms={}) + def test_delete_instance_keeps_net_on_power_off_fail(self): + self.mox.StubOutWithMock(self.compute.driver, 'destroy') + self.mox.StubOutWithMock(self.compute, '_deallocate_network') + exp = exception.InstancePowerOffFailure(reason='') + self.compute.driver.destroy(mox.IgnoreArg(), mox.IgnoreArg(), + mox.IgnoreArg()).AndRaise(exp) + # mox will detect if _deallocate_network gets called unexpectedly + self.mox.ReplayAll() + instance = self._create_fake_instance() + self.assertRaises(exception.InstancePowerOffFailure, + self.compute._delete_instance, + self.context, + instance=jsonutils.to_primitive(instance), + bdms={}) + + def test_delete_instance_loses_net_on_other_fail(self): + self.mox.StubOutWithMock(self.compute.driver, 'destroy') + self.mox.StubOutWithMock(self.compute, '_deallocate_network') + exp = test.TestingException() + self.compute.driver.destroy(mox.IgnoreArg(), mox.IgnoreArg(), + mox.IgnoreArg()).AndRaise(exp) + self.compute._deallocate_network(mox.IgnoreArg(), mox.IgnoreArg()) + self.mox.ReplayAll() + instance = self._create_fake_instance() + self.assertRaises(test.TestingException, + self.compute._delete_instance, + self.context, + instance=jsonutils.to_primitive(instance), + bdms={}) + def test_delete_instance_deletes_console_auth_tokens(self): instance = self._create_fake_instance() self.flags(vnc_enabled=True) @@ -6094,6 +6147,8 @@ class ComputeAPITestCase(BaseTestCase): def fake_get_instance_bdms(*args, **kwargs): return [{'device_name': '/dev/vda', + 'source_type': 'volume', + 'destination_type': 'volume', 'volume_id': 'bf0b6b00-a20c-11e2-9e96-0800200c9a66'}] self.stubs.Set(self.compute_api, 'get_instance_bdms', @@ -7146,12 +7201,14 @@ class ComputeAPITestCase(BaseTestCase): self.context, instance_type, instance['uuid'], mappings) bdms = [self._parse_db_block_device_mapping(bdm_ref) - for bdm_ref in db.block_device_mapping_get_all_by_instance( - self.context, instance['uuid'])] + for bdm_ref in block_device.legacy_mapping( + db.block_device_mapping_get_all_by_instance( + self.context, instance['uuid']))] expected_result = [ {'virtual_name': 'swap', 'device_name': '/dev/sdb1', - 'volume_size': swap_size}, - {'virtual_name': 'ephemeral0', 'device_name': '/dev/sdc1'}, + 'volume_size': swap_size, 'delete_on_termination': True}, + {'virtual_name': 'ephemeral0', 'device_name': '/dev/sdc1', + 'delete_on_termination': True}, # NOTE(yamahata): ATM only ephemeral0 is supported. # they're ignored for now @@ -7166,21 +7223,23 @@ class ComputeAPITestCase(BaseTestCase): self.context, flavors.get_default_instance_type(), instance['uuid'], block_device_mapping) bdms = [self._parse_db_block_device_mapping(bdm_ref) - for bdm_ref in db.block_device_mapping_get_all_by_instance( - self.context, instance['uuid'])] + for bdm_ref in block_device.legacy_mapping( + db.block_device_mapping_get_all_by_instance( + self.context, instance['uuid']))] expected_result = [ {'snapshot_id': '00000000-aaaa-bbbb-cccc-000000000000', 'device_name': '/dev/sda1'}, {'virtual_name': 'swap', 'device_name': '/dev/sdb1', - 'volume_size': swap_size}, + 'volume_size': swap_size, 'delete_on_termination': True}, {'snapshot_id': '11111111-aaaa-bbbb-cccc-111111111111', 'device_name': '/dev/sdb2'}, {'snapshot_id': '22222222-aaaa-bbbb-cccc-222222222222', 'device_name': '/dev/sdb3'}, {'no_device': True, 'device_name': '/dev/sdb4'}, - {'virtual_name': 'ephemeral0', 'device_name': '/dev/sdc1'}, + {'virtual_name': 'ephemeral0', 'device_name': '/dev/sdc1', + 'delete_on_termination': True}, {'snapshot_id': '33333333-aaaa-bbbb-cccc-333333333333', 'device_name': '/dev/sdc2'}, {'snapshot_id': '44444444-aaaa-bbbb-cccc-444444444444', @@ -7204,6 +7263,56 @@ class ComputeAPITestCase(BaseTestCase): instance = db.instance_get_by_uuid(self.context, instance['uuid']) self.compute.terminate_instance(self.context, instance) + def test_populate_instance_for_bdm(self): + # Test that the image bdm is created + instance_type = {'swap': 1} + instance = self._create_fake_instance( + {'root_device_name': 'vda'} + ) + image = {'uuid': FAKE_IMAGE_REF} + fake_bdms = [{'device_name': '/dev/vda', + 'snapshot_id': '33333333-aaaa-bbbb-cccc-333333333333', + 'delete_on_termination': False}] + + # Has an image but no bdms + self.compute_api._populate_instance_for_bdm(self.context, + instance, + instance_type, + image, []) + bdms = db.block_device_mapping_get_all_by_instance( + self.context, instance['uuid']) + self.assertEqual(len(bdms), 1) + self.assertEqual(bdms[0]['image_id'], FAKE_IMAGE_REF) + for bdm in bdms: + db.block_device_mapping_destroy(self.context, bdm['id']) + + # Has an image and is volume backed - legacy style + self.compute_api._populate_instance_for_bdm(self.context, + instance, + instance_type, + image, fake_bdms) + bdms = db.block_device_mapping_get_all_by_instance( + self.context, instance['uuid']) + self.assertEqual(len(bdms), 1) + self.assertEqual(bdms[0]['snapshot_id'], + '33333333-aaaa-bbbb-cccc-333333333333') + for bdm in bdms: + db.block_device_mapping_destroy(self.context, bdm['id']) + + # Is volume backed and has no image + instance['image_ref'] = '' + self.compute_api._populate_instance_for_bdm(self.context, + instance, + instance_type, + image, fake_bdms) + bdms = db.block_device_mapping_get_all_by_instance( + self.context, instance['uuid']) + self.assertEqual(len(bdms), 1) + self.assertEqual(bdms[0]['snapshot_id'], + '33333333-aaaa-bbbb-cccc-333333333333') + for bdm in bdms: + db.block_device_mapping_destroy(self.context, bdm['id']) + def test_volume_size(self): ephemeral_size = 2 swap_size = 3 @@ -7368,6 +7477,8 @@ class ComputeAPITestCase(BaseTestCase): def fake_get_instance_bdms(*args, **kwargs): return [{'device_name': '/dev/vda', + 'source_type': 'volume', + 'destination_type': 'volume', 'volume_id': 'bf0b6b00-a20c-11e2-9e96-0800200c9a66'}] self.stubs.Set(self.compute_api, 'get_instance_bdms', @@ -8338,124 +8449,6 @@ class ComputePolicyTestCase(BaseTestCase): availability_zone='1:1') -class KeypairAPITestCase(BaseTestCase): - def setUp(self): - super(KeypairAPITestCase, self).setUp() - self.keypair_api = compute_api.KeypairAPI() - self.ctxt = context.RequestContext('fake', 'fake') - self._keypair_db_call_stubs() - self.existing_key_name = 'fake existing key name' - self.pub_key = ('ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLnVkqJu9WVf' - '/5StU3JCrBR2r1s1j8K1tux+5XeSvdqaM8lMFNorzbY5iyoBbR' - 'S56gy1jmm43QsMPJsrpfUZKcJpRENSe3OxIIwWXRoiapZe78u/' - 'a9xKwj0avFYMcws9Rk9iAB7W4K1nEJbyCPl5lRBoyqeHBqrnnu' - 'XWEgGxJCK0Ah6wcOzwlEiVjdf4kxzXrwPHyi7Ea1qvnNXTziF8' - 'yYmUlH4C8UXfpTQckwSwpDyxZUc63P8q+vPbs3Q2kw+/7vvkCK' - 'HJAXVI+oCiyMMfffoTq16M1xfV58JstgtTqAXG+ZFpicGajREU' - 'E/E3hO5MGgcHmyzIrWHKpe1n3oEGuz') - self.fingerprint = '4e:48:c6:a0:4a:f9:dd:b5:4c:85:54:5a:af:43:47:5a' - - def _keypair_db_call_stubs(self): - - def db_key_pair_get_all_by_user(self, user_id): - return [] - - def db_key_pair_create(self, keypair): - pass - - def db_key_pair_destroy(context, user_id, name): - pass - - def db_key_pair_get(context, user_id, name): - if name == self.existing_key_name: - return {'name': self.existing_key_name, - 'public_key': self.pub_key, - 'fingerprint': self.fingerprint} - else: - raise exception.KeypairNotFound(user_id=user_id, name=name) - - self.stubs.Set(db, "key_pair_get_all_by_user", - db_key_pair_get_all_by_user) - self.stubs.Set(db, "key_pair_create", - db_key_pair_create) - self.stubs.Set(db, "key_pair_destroy", - db_key_pair_destroy) - self.stubs.Set(db, "key_pair_get", - db_key_pair_get) - - def test_create_keypair(self): - keypair = self.keypair_api.create_key_pair(self.ctxt, - self.ctxt.user_id, 'foo') - self.assertEqual('foo', keypair['name']) - - def test_create_keypair_name_too_long(self): - self.assertRaises(exception.InvalidKeypair, - self.keypair_api.create_key_pair, - self.ctxt, self.ctxt.user_id, 'x' * 256) - - def test_create_keypair_invalid_chars(self): - self.assertRaises(exception.InvalidKeypair, - self.keypair_api.create_key_pair, - self.ctxt, self.ctxt.user_id, '* BAD CHARACTERS! *') - - def test_create_keypair_already_exists(self): - def db_key_pair_create_duplicate(context, keypair): - raise exception.KeyPairExists(key_name=keypair.get('name', '')) - self.stubs.Set(db, "key_pair_create", db_key_pair_create_duplicate) - self.assertRaises(exception.KeyPairExists, - self.keypair_api.create_key_pair, - self.ctxt, self.ctxt.user_id, - self.existing_key_name) - - def test_create_keypair_quota_limit(self): - def fake_quotas_count(self, context, resource, *args, **kwargs): - return CONF.quota_key_pairs - self.stubs.Set(QUOTAS, "count", fake_quotas_count) - self.assertRaises(exception.KeypairLimitExceeded, - self.keypair_api.create_key_pair, - self.ctxt, self.ctxt.user_id, 'foo') - - def test_import_keypair(self): - keypair = self.keypair_api.import_key_pair(self.ctxt, - self.ctxt.user_id, - 'foo', - self.pub_key) - self.assertEqual('foo', keypair['name']) - self.assertEqual(self.fingerprint, keypair['fingerprint']) - self.assertEqual(self.pub_key, keypair['public_key']) - - def test_import_keypair_bad_public_key(self): - self.assertRaises(exception.InvalidKeypair, - self.keypair_api.import_key_pair, - self.ctxt, self.ctxt.user_id, 'foo', 'bad key data') - - def test_import_keypair_name_too_long(self): - self.assertRaises(exception.InvalidKeypair, - self.keypair_api.import_key_pair, - self.ctxt, self.ctxt.user_id, 'x' * 256, - self.pub_key) - - def test_import_keypair_invalid_chars(self): - self.assertRaises(exception.InvalidKeypair, - self.keypair_api.import_key_pair, - self.ctxt, self.ctxt.user_id, - '* BAD CHARACTERS! *', self.pub_key) - - def test_import_keypair_quota_limit(self): - def fake_quotas_count(self, context, resource, *args, **kwargs): - return CONF.quota_key_pairs - self.stubs.Set(QUOTAS, "count", fake_quotas_count) - self.assertRaises(exception.KeypairLimitExceeded, - self.keypair_api.import_key_pair, - self.ctxt, self.ctxt.user_id, 'foo', self.pub_key) - - def test_get_keypair(self): - keypair = self.keypair_api.get_key_pair(self.ctxt, - self.ctxt.user_id, - self.existing_key_name) - self.assertEqual(self.existing_key_name, keypair['name']) - - class DisabledInstanceTypesTestCase(BaseTestCase): """ Some instance-types are marked 'disabled' which means that they will not diff --git a/nova/tests/compute/test_keypairs.py b/nova/tests/compute/test_keypairs.py new file mode 100644 index 000000000..f82d69ccb --- /dev/null +++ b/nova/tests/compute/test_keypairs.py @@ -0,0 +1,174 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Tests for keypair API.""" + +from oslo.config import cfg + +from nova.compute import api as compute_api +from nova import context +from nova import db +from nova import exception +from nova import quota +from nova.tests.compute import test_compute + + +CONF = cfg.CONF +QUOTAS = quota.QUOTAS + + +class KeypairAPITestCase(test_compute.BaseTestCase): + def setUp(self): + super(KeypairAPITestCase, self).setUp() + self.keypair_api = compute_api.KeypairAPI() + self.ctxt = context.RequestContext('fake', 'fake') + self._keypair_db_call_stubs() + self.existing_key_name = 'fake existing key name' + self.pub_key = ('ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLnVkqJu9WVf' + '/5StU3JCrBR2r1s1j8K1tux+5XeSvdqaM8lMFNorzbY5iyoBbR' + 'S56gy1jmm43QsMPJsrpfUZKcJpRENSe3OxIIwWXRoiapZe78u/' + 'a9xKwj0avFYMcws9Rk9iAB7W4K1nEJbyCPl5lRBoyqeHBqrnnu' + 'XWEgGxJCK0Ah6wcOzwlEiVjdf4kxzXrwPHyi7Ea1qvnNXTziF8' + 'yYmUlH4C8UXfpTQckwSwpDyxZUc63P8q+vPbs3Q2kw+/7vvkCK' + 'HJAXVI+oCiyMMfffoTq16M1xfV58JstgtTqAXG+ZFpicGajREU' + 'E/E3hO5MGgcHmyzIrWHKpe1n3oEGuz') + self.fingerprint = '4e:48:c6:a0:4a:f9:dd:b5:4c:85:54:5a:af:43:47:5a' + + def _keypair_db_call_stubs(self): + + def db_key_pair_get_all_by_user(context, user_id): + return [{'name': self.existing_key_name, + 'public_key': self.pub_key, + 'fingerprint': self.fingerprint}] + + def db_key_pair_create(context, keypair): + pass + + def db_key_pair_destroy(context, user_id, name): + pass + + def db_key_pair_get(context, user_id, name): + if name == self.existing_key_name: + return {'name': self.existing_key_name, + 'public_key': self.pub_key, + 'fingerprint': self.fingerprint} + else: + raise exception.KeypairNotFound(user_id=user_id, name=name) + + self.stubs.Set(db, "key_pair_get_all_by_user", + db_key_pair_get_all_by_user) + self.stubs.Set(db, "key_pair_create", + db_key_pair_create) + self.stubs.Set(db, "key_pair_destroy", + db_key_pair_destroy) + self.stubs.Set(db, "key_pair_get", + db_key_pair_get) + + +class CreateImportSharedTestMixIn(object): + """Tests shared between create and import_key. + + Mix-in pattern is used here so that these `test_*` methods aren't picked + up by the test runner unless they are part of a 'concrete' test case. + """ + + def assertKeyNameRaises(self, exc_class, expected_message, name): + func = getattr(self.keypair_api, self.func_name) + + args = [] + if self.func_name == 'import_key_pair': + args.append(self.pub_key) + + exc = self.assertRaises(exc_class, func, self.ctxt, self.ctxt.user_id, + name, *args) + self.assertEqual(expected_message, unicode(exc)) + + def test_name_too_short(self): + msg = _('Keypair name must be between 1 and 255 characters long') + self.assertKeyNameRaises(exception.InvalidKeypair, msg, '') + + def test_name_too_long(self): + msg = _('Keypair name must be between 1 and 255 characters long') + self.assertKeyNameRaises(exception.InvalidKeypair, msg, 'x' * 256) + + def test_invalid_chars(self): + msg = _("Keypair name contains unsafe characters") + self.assertKeyNameRaises(exception.InvalidKeypair, msg, + '* BAD CHARACTERS! *') + + def test_already_exists(self): + def db_key_pair_create_duplicate(context, keypair): + raise exception.KeyPairExists(key_name=keypair.get('name', '')) + + self.stubs.Set(db, "key_pair_create", db_key_pair_create_duplicate) + + msg = (_("Key pair '%(key_name)s' already exists.") % + {'key_name': self.existing_key_name}) + self.assertKeyNameRaises(exception.KeyPairExists, msg, + self.existing_key_name) + + def test_quota_limit(self): + def fake_quotas_count(self, context, resource, *args, **kwargs): + return CONF.quota_key_pairs + + self.stubs.Set(QUOTAS, "count", fake_quotas_count) + + msg = _("Maximum number of key pairs exceeded") + self.assertKeyNameRaises(exception.KeypairLimitExceeded, msg, 'foo') + + +class CreateKeypairTestCase(KeypairAPITestCase, CreateImportSharedTestMixIn): + func_name = 'create_key_pair' + + def test_success(self): + keypair = self.keypair_api.create_key_pair(self.ctxt, + self.ctxt.user_id, 'foo') + self.assertEqual('foo', keypair['name']) + + +class ImportKeypairTestCase(KeypairAPITestCase, CreateImportSharedTestMixIn): + func_name = 'import_key_pair' + + def test_success(self): + keypair = self.keypair_api.import_key_pair(self.ctxt, + self.ctxt.user_id, + 'foo', + self.pub_key) + self.assertEqual('foo', keypair['name']) + self.assertEqual(self.fingerprint, keypair['fingerprint']) + self.assertEqual(self.pub_key, keypair['public_key']) + + def test_bad_key_data(self): + exc = self.assertRaises(exception.InvalidKeypair, + self.keypair_api.import_key_pair, + self.ctxt, self.ctxt.user_id, 'foo', + 'bad key data') + self.assertEqual(u'Keypair data is invalid', unicode(exc)) + + +class GetKeypairTestCase(KeypairAPITestCase): + def test_success(self): + keypair = self.keypair_api.get_key_pair(self.ctxt, + self.ctxt.user_id, + self.existing_key_name) + self.assertEqual(self.existing_key_name, keypair['name']) + + +class GetKeypairsTestCase(KeypairAPITestCase): + def test_success(self): + keypairs = self.keypair_api.get_key_pairs(self.ctxt, self.ctxt.user_id) + self.assertEqual([self.existing_key_name], + [k['name'] for k in keypairs]) diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py index 8b397db02..e5abd1182 100644 --- a/nova/tests/conductor/test_conductor.py +++ b/nova/tests/conductor/test_conductor.py @@ -321,7 +321,7 @@ class _BaseTestCase(object): self.context, fake_inst['uuid']).AndReturn('fake-result') self.mox.ReplayAll() result = self.conductor.block_device_mapping_get_all_by_instance( - self.context, fake_inst) + self.context, fake_inst, legacy=False) self.assertEqual(result, 'fake-result') def test_instance_get_active_by_window_joined(self): diff --git a/nova/tests/db/test_db_api.py b/nova/tests/db/test_db_api.py index efe243d1b..60811e65b 100644 --- a/nova/tests/db/test_db_api.py +++ b/nova/tests/db/test_db_api.py @@ -33,6 +33,7 @@ from sqlalchemy import MetaData from sqlalchemy.orm import query from sqlalchemy.sql.expression import select +from nova import block_device from nova.compute import vm_states from nova import context from nova import db @@ -1928,7 +1929,10 @@ class ModelsObjectComparatorMixin(object): obj1 = self._dict_from_object(obj1, ignored_keys) obj2 = self._dict_from_object(obj2, ignored_keys) - self.assertEqual(len(obj1), len(obj2)) + self.assertEqual(len(obj1), + len(obj2), + "Keys mismatch: %s" % + str(set(obj1.keys()) ^ set(obj2.keys()))) for key, value in obj1.iteritems(): self.assertEqual(value, obj2[key]) @@ -4001,8 +4005,11 @@ class BlockDeviceMappingTestCase(test.TestCase): def _create_bdm(self, values): values.setdefault('instance_uuid', self.instance['uuid']) values.setdefault('device_name', 'fake_device') - db.block_device_mapping_create(self.ctxt, values) - uuid = values['instance_uuid'] + values.setdefault('source_type', 'volume') + values.setdefault('destination_type', 'volume') + block_dev = block_device.BlockDeviceDict(values) + db.block_device_mapping_create(self.ctxt, block_dev, legacy=False) + uuid = block_dev['instance_uuid'] bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid) @@ -4033,81 +4040,90 @@ class BlockDeviceMappingTestCase(test.TestCase): def test_block_device_mapping_update(self): bdm = self._create_bdm({}) db.block_device_mapping_update(self.ctxt, bdm['id'], - {'virtual_name': 'some_virt_name'}) + {'destination_type': 'moon'}, + legacy=False) uuid = bdm['instance_uuid'] bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid) - self.assertEqual(bdm_real[0]['virtual_name'], 'some_virt_name') + self.assertEqual(bdm_real[0]['destination_type'], 'moon') def test_block_device_mapping_update_or_create(self): values = { 'instance_uuid': self.instance['uuid'], 'device_name': 'fake_name', - 'virtual_name': 'some_virt_name' + 'source_type': 'volume', + 'destination_type': 'volume' } # check create - db.block_device_mapping_update_or_create(self.ctxt, values) + db.block_device_mapping_update_or_create(self.ctxt, values, + legacy=False) uuid = values['instance_uuid'] bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid) self.assertEqual(len(bdm_real), 1) self.assertEqual(bdm_real[0]['device_name'], 'fake_name') # check update - values['virtual_name'] = 'virtual_name' - db.block_device_mapping_update_or_create(self.ctxt, values) + values['destination_type'] = 'camelot' + db.block_device_mapping_update_or_create(self.ctxt, values, + legacy=False) bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid) self.assertEqual(len(bdm_real), 1) bdm_real = bdm_real[0] self.assertEqual(bdm_real['device_name'], 'fake_name') - self.assertEqual(bdm_real['virtual_name'], 'virtual_name') + self.assertEqual(bdm_real['destination_type'], 'camelot') def test_block_device_mapping_update_or_create_check_remove_virt(self): uuid = self.instance['uuid'] values = { 'instance_uuid': uuid, - 'virtual_name': 'ephemeral12' + 'source_type': 'blank', + 'guest_format': 'swap', } - # check that old bdm with same virtual_names are deleted on create + # check that old swap bdms are deleted on create val1 = dict(values) val1['device_name'] = 'device1' - db.block_device_mapping_create(self.ctxt, val1) + db.block_device_mapping_create(self.ctxt, val1, legacy=False) val2 = dict(values) val2['device_name'] = 'device2' - db.block_device_mapping_update_or_create(self.ctxt, val2) + db.block_device_mapping_update_or_create(self.ctxt, val2, legacy=False) bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid) self.assertEqual(len(bdm_real), 1) bdm_real = bdm_real[0] self.assertEqual(bdm_real['device_name'], 'device2') - self.assertEqual(bdm_real['virtual_name'], 'ephemeral12') + self.assertEqual(bdm_real['source_type'], 'blank') + self.assertEqual(bdm_real['guest_format'], 'swap') + db.block_device_mapping_destroy(self.ctxt, bdm_real['id']) - # check that old bdm with same virtual_names are deleted on update + # check that old ephemerals are deleted no matter what val3 = dict(values) val3['device_name'] = 'device3' - val3['virtual_name'] = 'some_name' - db.block_device_mapping_create(self.ctxt, val3) + val3['guest_format'] = None + val4 = dict(values) + val4['device_name'] = 'device4' + val4['guest_format'] = None + db.block_device_mapping_create(self.ctxt, val3, legacy=False) + db.block_device_mapping_create(self.ctxt, val4, legacy=False) bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid) self.assertEqual(len(bdm_real), 2) - val3['virtual_name'] = 'ephemeral12' - db.block_device_mapping_update_or_create(self.ctxt, val3) + val5 = dict(values) + val5['device_name'] = 'device5' + val5['guest_format'] = None + db.block_device_mapping_update_or_create(self.ctxt, val5, legacy=False) bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid) self.assertEqual(len(bdm_real), 1) bdm_real = bdm_real[0] - self.assertEqual(bdm_real['device_name'], 'device3') - self.assertEqual(bdm_real['virtual_name'], 'ephemeral12') + self.assertEqual(bdm_real['device_name'], 'device5') def test_block_device_mapping_get_all_by_instance(self): uuid1 = self.instance['uuid'] uuid2 = db.instance_create(self.ctxt, {})['uuid'] bmds_values = [{'instance_uuid': uuid1, - 'virtual_name': 'virtual_name', 'device_name': 'first'}, {'instance_uuid': uuid2, - 'virtual_name': 'virtual_name1', 'device_name': 'second'}, {'instance_uuid': uuid2, - 'virtual_name': 'virtual_name2', 'device_name': 'third'}] for bdm in bmds_values: @@ -4115,7 +4131,6 @@ class BlockDeviceMappingTestCase(test.TestCase): bmd = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid1) self.assertEqual(len(bmd), 1) - self.assertEqual(bmd[0]['virtual_name'], 'virtual_name') self.assertEqual(bmd[0]['device_name'], 'first') bmd = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid2) @@ -4273,8 +4288,8 @@ class VirtualInterfaceTestCase(test.TestCase, ModelsObjectComparatorMixin): "i.nv.ali.ip") def test_virtual_interface_get_by_uuid(self): - vifs = [self._create_virt_interface({}), - self._create_virt_interface({})] + vifs = [self._create_virt_interface({"address": "address_1"}), + self._create_virt_interface({"address": "address_2"})] for vif in vifs: real_vif = db.virtual_interface_get_by_uuid(self.ctxt, vif['uuid']) self._assertEqualObjects(vif, real_vif) diff --git a/nova/tests/db/test_migrations.py b/nova/tests/db/test_migrations.py index 973f5088c..0e89cd521 100644 --- a/nova/tests/db/test_migrations.py +++ b/nova/tests/db/test_migrations.py @@ -1416,6 +1416,172 @@ class TestNovaMigrations(BaseMigrationTestCase, CommonTestsMixIn): self.assertTrue(db_utils.check_shadow_table(engine, 'floating_ips')) self.assertTrue(db_utils.check_shadow_table(engine, 'console_pools')) + def _unique_constraint_check_migrate_185(self, engine, check=True): + """Test check unique constraint behavior. It should be the same before + and after migration because we changed their names only.""" + + data_list = [ + ("floating_ips", {'address': '10.12.14.16', 'deleted': 0}), + ("instance_info_caches", {'instance_uuid': 'm161-uuid1'}), + ('instance_type_projects', {'instance_type_id': 1, + 'project_id': '116', 'deleted': 0}), + ('instance_types', {'flavorid': "flavorid_12", 'deleted': 0, + 'memory_mb': 64, 'vcpus': 10, 'swap': 100}), + ('instance_types', {'name': "name_123", 'deleted': 0, + 'memory_mb': 128, 'vcpus': 11, 'swap': 300}), + ('key_pairs', {'user_id': 1, 'name': "name_qwer", 'deleted': 0}), + ('networks', {'vlan': '123', 'deleted': 0}), + ('task_log', {'task_name': 'task_123', 'host': 'localhost', + 'period_beginning': datetime.datetime(2013, 02, 11), + 'period_ending': datetime.datetime(2015, 01, 01), + 'state': 'state_1', 'message': 'msg_1'}), + ('virtual_interfaces', {'address': '192.168.0.0'}) + ] + + for table_name, data in data_list: + table = db_utils.get_table(engine, table_name) + if not check: + table.insert().values(data).execute() + else: + # we replace values for some columns because they don't + # belong to unique constraint + if table_name == "instance_types": + for key in ("memory_mb", "vcpus", "swap"): + data[key] = data[key] * 2 + if table_name == "task_log": + data["message"] = 'msg_2' + data["state"] = 'state_2' + + self.assertRaises(sqlalchemy.exc.IntegrityError, + table.insert().execute, data) + + def _pre_upgrade_185(self, engine): + self._unique_constraint_check_migrate_185(engine, False) + + def check_185(self, engine): + self._unique_constraint_check_migrate_185(engine) + + def _post_downgrade_185(self, engine): + self._unique_constraint_check_migrate_185(engine) + + def _pre_upgrade_186(self, engine): + fake_instances = [ + dict(uuid='mig186_uuid-1', image_ref='fake_image_1', + root_device_name='/dev/vda'), + dict(uuid='mig186_uuid-2', image_ref='', + root_device_name='vda'), + dict(uuid='mig186_uuid-3', image_ref='fake_image_2', + root_device_name='/dev/vda'), + ] + + fake_bdms = [ + # Instance 1 - image, volume and swap + dict(instance_uuid='mig186_uuid-1', device_name='/dev/vdc', + volume_id='fake_volume_1'), + dict(instance_uuid='mig186_uuid-1', device_name='/dev/vdb', + virtual_name='swap'), + # Instance 2 - no image. snapshot and volume + dict(instance_uuid='mig186_uuid-2', device_name='/dev/vda', + snapshot_id='fake_snap_1', volume_id='fake_volume_2'), + dict(instance_uuid='mig186_uuid-2', device_name='/dev/vdc', + volume_id='fake_volume_3'), + # Instance 3 - ephemerals and swap + dict(instance_uuid='mig186_uuid-3', device_name='/dev/vdc', + virtual_name='ephemeral0'), + dict(instance_uuid='mig186_uuid-3', device_name='/dev/vdd', + virtual_name='ephemeral1'), + dict(instance_uuid='mig186_uuid-3', device_name='/dev/vdb', + virtual_name='swap'), + ] + + instances = db_utils.get_table(engine, 'instances') + block_device = db_utils.get_table(engine, 'block_device_mapping') + engine.execute(instances.insert(), fake_instances) + for fake_bdm in fake_bdms: + engine.execute(block_device.insert(), fake_bdm) + + return fake_instances, fake_bdms + + def _check_186(self, engine, data): + block_device = db_utils.get_table(engine, 'block_device_mapping') + + instance_qs = [] + + for instance in ('mig186_uuid-1', 'mig186_uuid-2', 'mig186_uuid-3'): + q = block_device.select().where( + block_device.c.instance_uuid == instance).order_by( + block_device.c.id.asc() + ) + instance_qs.append(q) + + bdm_1s, bdm_2s, bdm_3s = ( + [bdm for bdm in q.execute()] + for q in instance_qs + ) + + # Instance 1 + self.assertEqual(bdm_1s[0].source_type, 'volume') + self.assertEqual(bdm_1s[0].destination_type, 'volume') + self.assertEqual(bdm_1s[0].volume_id, 'fake_volume_1') + self.assertEqual(bdm_1s[0].device_type, 'disk') + self.assertEqual(bdm_1s[0].boot_index, -1) + self.assertEqual(bdm_1s[0].device_name, '/dev/vdc') + + self.assertEqual(bdm_1s[1].source_type, 'blank') + self.assertEqual(bdm_1s[1].guest_format, 'swap') + self.assertEqual(bdm_1s[1].destination_type, 'local') + self.assertEqual(bdm_1s[1].device_type, 'disk') + self.assertEqual(bdm_1s[1].boot_index, -1) + self.assertEqual(bdm_1s[1].device_name, '/dev/vdb') + + self.assertEqual(bdm_1s[2].source_type, 'image') + self.assertEqual(bdm_1s[2].destination_type, 'local') + self.assertEqual(bdm_1s[2].device_type, 'disk') + self.assertEqual(bdm_1s[2].image_id, 'fake_image_1') + self.assertEqual(bdm_1s[2].boot_index, 0) + + # Instance 2 + self.assertEqual(bdm_2s[0].source_type, 'snapshot') + self.assertEqual(bdm_2s[0].destination_type, 'volume') + self.assertEqual(bdm_2s[0].snapshot_id, 'fake_snap_1') + self.assertEqual(bdm_2s[0].volume_id, 'fake_volume_2') + self.assertEqual(bdm_2s[0].device_type, 'disk') + self.assertEqual(bdm_2s[0].boot_index, 0) + self.assertEqual(bdm_2s[0].device_name, '/dev/vda') + + self.assertEqual(bdm_2s[1].source_type, 'volume') + self.assertEqual(bdm_2s[1].destination_type, 'volume') + self.assertEqual(bdm_2s[1].volume_id, 'fake_volume_3') + self.assertEqual(bdm_2s[1].device_type, 'disk') + self.assertEqual(bdm_2s[1].boot_index, -1) + self.assertEqual(bdm_2s[1].device_name, '/dev/vdc') + + # Instance 3 + self.assertEqual(bdm_3s[0].source_type, 'blank') + self.assertEqual(bdm_3s[0].destination_type, 'local') + self.assertEqual(bdm_3s[0].device_type, 'disk') + self.assertEqual(bdm_3s[0].boot_index, -1) + self.assertEqual(bdm_3s[0].device_name, '/dev/vdc') + + self.assertEqual(bdm_3s[1].source_type, 'blank') + self.assertEqual(bdm_3s[1].destination_type, 'local') + self.assertEqual(bdm_3s[1].device_type, 'disk') + self.assertEqual(bdm_3s[1].boot_index, -1) + self.assertEqual(bdm_3s[1].device_name, '/dev/vdd') + + self.assertEqual(bdm_3s[2].source_type, 'blank') + self.assertEqual(bdm_3s[2].guest_format, 'swap') + self.assertEqual(bdm_3s[2].destination_type, 'local') + self.assertEqual(bdm_3s[2].device_type, 'disk') + self.assertEqual(bdm_3s[2].boot_index, -1) + self.assertEqual(bdm_3s[2].device_name, '/dev/vdb') + + self.assertEqual(bdm_3s[3].source_type, 'image') + self.assertEqual(bdm_3s[3].destination_type, 'local') + self.assertEqual(bdm_3s[3].device_type, 'disk') + self.assertEqual(bdm_3s[3].image_id, 'fake_image_2') + self.assertEqual(bdm_3s[3].boot_index, 0) + class TestBaremetalMigrations(BaseMigrationTestCase, CommonTestsMixIn): """Test sqlalchemy-migrate migrations.""" diff --git a/nova/tests/fake_network.py b/nova/tests/fake_network.py index 42ce71ded..79af362bb 100644 --- a/nova/tests/fake_network.py +++ b/nova/tests/fake_network.py @@ -406,8 +406,11 @@ def set_stub_network_methods(stubs): def fake_networkinfo(*args, **kwargs): return network_model.NetworkInfo() + def fake_async_networkinfo(*args, **kwargs): + return network_model.NetworkInfoAsyncWrapper(fake_networkinfo) + stubs.Set(cm, '_get_instance_nw_info', fake_networkinfo) - stubs.Set(cm, '_allocate_network', fake_networkinfo) + stubs.Set(cm, '_allocate_network', fake_async_networkinfo) stubs.Set(cm, '_deallocate_network', lambda *args, **kwargs: None) diff --git a/nova/tests/network/test_network_info.py b/nova/tests/network/test_network_info.py index 56522e6a5..bb3d91f55 100644 --- a/nova/tests/network/test_network_info.py +++ b/nova/tests/network/test_network_info.py @@ -332,6 +332,34 @@ class NetworkInfoTests(test.TestCase): fake_network_cache_model.new_ip( {'address': '10.10.0.3'})] * 4) + def test_create_async_model(self): + def async_wrapper(): + return model.NetworkInfo( + [fake_network_cache_model.new_vif(), + fake_network_cache_model.new_vif( + {'address': 'bb:bb:bb:bb:bb:bb'})]) + + ninfo = model.NetworkInfoAsyncWrapper(async_wrapper) + self.assertEqual(ninfo.fixed_ips(), + [fake_network_cache_model.new_ip({'address': '10.10.0.2'}), + fake_network_cache_model.new_ip( + {'address': '10.10.0.3'})] * 4) + + def test_create_async_model_exceptions(self): + def async_wrapper(): + raise test.TestingException() + + ninfo = model.NetworkInfoAsyncWrapper(async_wrapper) + self.assertRaises(test.TestingException, ninfo.wait) + # 2nd one doesn't raise + self.assertEqual(None, ninfo.wait()) + # Test that do_raise=False works on .wait() + ninfo = model.NetworkInfoAsyncWrapper(async_wrapper) + self.assertEqual(None, ninfo.wait(do_raise=False)) + # Test we also raise calling a method + ninfo = model.NetworkInfoAsyncWrapper(async_wrapper) + self.assertRaises(test.TestingException, ninfo.fixed_ips) + def test_get_floating_ips(self): vif = fake_network_cache_model.new_vif() vif['network']['subnets'][0]['ips'][0].add_floating_ip('192.168.1.1') diff --git a/nova/tests/scheduler/fakes.py b/nova/tests/scheduler/fakes.py index c9157d11c..3dfc7c452 100644 --- a/nova/tests/scheduler/fakes.py +++ b/nova/tests/scheduler/fakes.py @@ -18,7 +18,6 @@ Fakes For Scheduler tests. import mox -from nova.compute import flavors from nova.compute import vm_states from nova import db from nova.scheduler import filter_scheduler @@ -107,14 +106,14 @@ class FakeHostState(host_manager.HostState): class FakeInstance(object): - def __init__(self, context=None, params=None, type_name='m1.tiny'): + def __init__(self, context=None, params=None): """Create a test instance. Returns uuid.""" self.context = context - i = self._create_fake_instance(params, type_name=type_name) + i = self._create_fake_instance(params=params) self.uuid = i['uuid'] - def _create_fake_instance(self, params=None, type_name='m1.tiny'): + def _create_fake_instance(self, params=None): """Create a test instance.""" if not params: params = {} @@ -125,8 +124,7 @@ class FakeInstance(object): inst['reservation_id'] = 'r-fakeres' inst['user_id'] = 'fake' inst['project_id'] = 'fake' - type_id = flavors.get_instance_type_by_name(type_name)['id'] - inst['instance_type_id'] = type_id + inst['instance_type_id'] = 2 inst['ami_launch_index'] = 0 inst.update(params) return db.instance_create(self.context, inst) diff --git a/nova/tests/scheduler/test_chance_scheduler.py b/nova/tests/scheduler/test_chance_scheduler.py index a589000cd..8a281073a 100644 --- a/nova/tests/scheduler/test_chance_scheduler.py +++ b/nova/tests/scheduler/test_chance_scheduler.py @@ -194,3 +194,12 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase): self.mox.ReplayAll() hosts = self.driver.select_hosts(ctxt, request_spec, {}) self.assertEquals(['host3', 'host1'], hosts) + + def test_select_hosts_no_valid_host(self): + + def _return_no_host(*args, **kwargs): + return [] + + self.stubs.Set(self.driver, '_schedule', _return_no_host) + self.assertRaises(exception.NoValidHost, + self.driver.select_hosts, self.context, {}, {}) diff --git a/nova/tests/scheduler/test_filter_scheduler.py b/nova/tests/scheduler/test_filter_scheduler.py index 80680dda8..b82e079a4 100644 --- a/nova/tests/scheduler/test_filter_scheduler.py +++ b/nova/tests/scheduler/test_filter_scheduler.py @@ -18,7 +18,6 @@ Tests For Filter Scheduler. import mox -from nova.compute import flavors from nova.compute import rpcapi as compute_rpcapi from nova.compute import utils as compute_utils from nova.compute import vm_states @@ -78,6 +77,10 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): compute_utils.add_instance_fault_from_exc(fake_context, mox.IsA(conductor_api.LocalAPI), new_ref, mox.IsA(exception.NoValidHost), mox.IgnoreArg()) + + self.mox.StubOutWithMock(db, 'compute_node_get_all') + db.compute_node_get_all(mox.IgnoreArg()).AndReturn([]) + self.mox.ReplayAll() sched.schedule_run_instance( fake_context, request_spec, None, None, None, None, {}) @@ -240,6 +243,10 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): request_spec = dict(instance_properties=instance_properties) filter_properties = {} + self.mox.StubOutWithMock(db, 'compute_node_get_all') + db.compute_node_get_all(mox.IgnoreArg()).AndReturn([]) + self.mox.ReplayAll() + sched._schedule(self.context, request_spec, filter_properties=filter_properties) @@ -255,6 +262,10 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): request_spec = dict(instance_properties=instance_properties) filter_properties = {} + self.mox.StubOutWithMock(db, 'compute_node_get_all') + db.compute_node_get_all(mox.IgnoreArg()).AndReturn([]) + self.mox.ReplayAll() + sched._schedule(self.context, request_spec, filter_properties=filter_properties) @@ -272,6 +283,10 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): retry = dict(num_attempts=1) filter_properties = dict(retry=retry) + self.mox.StubOutWithMock(db, 'compute_node_get_all') + db.compute_node_get_all(mox.IgnoreArg()).AndReturn([]) + self.mox.ReplayAll() + sched._schedule(self.context, request_spec, filter_properties=filter_properties) @@ -335,10 +350,22 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): sched = fakes.FakeFilterScheduler() image = 'image' - instance = db.instance_create(self.context, {}) + instance = {'disable_terminate': False, + 'uuid': 'fakeuuid', + 'deleted': 0, 'info_cache': {}, + 'created_at': None, + 'system_metadata': [], 'shutdown_terminate': False, + 'id': 1, 'security_groups': [], 'metadata': []} instance_properties = {'project_id': 'fake', 'os_type': 'Linux'} - instance_type = flavors.get_instance_type_by_name("m1.tiny") + instance_type = { + 'memory_mb': 1024, 'root_gb': 40, 'deleted_at': None, + 'name': u'm1.medium', 'deleted': 0, 'created_at': None, + 'ephemeral_gb': 0, 'updated_at': None, 'disabled': False, + 'vcpus': 2, 'extra_specs': {}, 'swap': 0, + 'rxtx_factor': 1.0, 'is_public': True, 'flavorid': u'3', + 'vcpu_weight': None, 'id': 1} + request_spec = {'instance_properties': instance_properties, 'instance_type': instance_type} retry = {'hosts': [], 'num_attempts': 1} @@ -707,3 +734,12 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): hosts = sched.select_hosts(fake_context, request_spec, {}) self.assertEquals(len(hosts), 10) self.assertEquals(hosts, selected_hosts) + + def test_select_hosts_no_valid_host(self): + + def _return_no_host(*args, **kwargs): + return [] + + self.stubs.Set(self.driver, '_schedule', _return_no_host) + self.assertRaises(exception.NoValidHost, + self.driver.select_hosts, self.context, {}, {}) diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py index 7d7c12339..b09e23f1d 100644 --- a/nova/tests/scheduler/test_host_filters.py +++ b/nova/tests/scheduler/test_host_filters.py @@ -44,7 +44,7 @@ class TestBogusFilter(object): pass -class ExtraSpecsOpsTestCase(test.TestCase): +class ExtraSpecsOpsTestCase(test.NoDBTestCase): def _do_extra_specs_ops_test(self, value, req, matches): assertion = self.assertTrue if matches else self.assertFalse assertion(extra_specs_ops.match(value, req)) @@ -230,8 +230,11 @@ class ExtraSpecsOpsTestCase(test.TestCase): matches=False) -class HostFiltersTestCase(test.TestCase): +class HostFiltersTestCase(test.NoDBTestCase): """Test case for host filters.""" + # FIXME(sirp): These tests still require DB access until we can separate + # the testing of the DB API code from the host-filter code. + USES_DB = True def fake_oat_request(self, *args, **kwargs): """Stubs out the response from OAT service.""" diff --git a/nova/tests/scheduler/test_host_manager.py b/nova/tests/scheduler/test_host_manager.py index c03c66c4e..8db62bfea 100644 --- a/nova/tests/scheduler/test_host_manager.py +++ b/nova/tests/scheduler/test_host_manager.py @@ -37,7 +37,7 @@ class FakeFilterClass2(filters.BaseHostFilter): pass -class HostManagerTestCase(test.TestCase): +class HostManagerTestCase(test.NoDBTestCase): """Test case for HostManager class.""" def setUp(self): @@ -358,7 +358,7 @@ class HostManagerTestCase(test.TestCase): 8388608) -class HostManagerChangedNodesTestCase(test.TestCase): +class HostManagerChangedNodesTestCase(test.NoDBTestCase): """Test case for HostManager class.""" def setUp(self): @@ -416,7 +416,7 @@ class HostManagerChangedNodesTestCase(test.TestCase): self.assertEqual(len(host_states_map), 0) -class HostStateTestCase(test.TestCase): +class HostStateTestCase(test.NoDBTestCase): """Test case for HostState class.""" # update_from_compute_node() and consume_from_instance() are tested diff --git a/nova/tests/scheduler/test_rpcapi.py b/nova/tests/scheduler/test_rpcapi.py index 44e6a91b8..cecc55f20 100644 --- a/nova/tests/scheduler/test_rpcapi.py +++ b/nova/tests/scheduler/test_rpcapi.py @@ -28,7 +28,7 @@ from nova import test CONF = cfg.CONF -class SchedulerRpcAPITestCase(test.TestCase): +class SchedulerRpcAPITestCase(test.NoDBTestCase): def _test_scheduler_api(self, method, rpc_method, **kwargs): ctxt = context.RequestContext('fake_user', 'fake_project') rpcapi = scheduler_rpcapi.SchedulerAPI() diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index f4f607647..44ddcc7a6 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -48,7 +48,7 @@ from nova.tests.scheduler import fakes from nova import utils -class SchedulerManagerTestCase(test.TestCase): +class SchedulerManagerTestCase(test.NoDBTestCase): """Test case for scheduler manager.""" manager_cls = manager.SchedulerManager @@ -373,7 +373,7 @@ class SchedulerManagerTestCase(test.TestCase): self.context, None, request) -class SchedulerTestCase(test.TestCase): +class SchedulerTestCase(test.NoDBTestCase): """Test case for base scheduler driver class.""" # So we can subclass this test and re-use tests if we need. @@ -435,9 +435,13 @@ class SchedulerTestCase(test.TestCase): self.assertEqual(result, ['host2']) def _live_migration_instance(self): - inst_type = flavors.get_instance_type(1) - # NOTE(danms): we have _got_ to stop doing this! - inst_type['memory_mb'] = 1024 + inst_type = {'memory_mb': 1024, 'root_gb': 40, 'deleted_at': None, + 'name': u'm1.medium', 'deleted': 0, 'created_at': None, + 'ephemeral_gb': 0, 'updated_at': None, 'disabled': False, + 'vcpus': 2, 'extra_specs': {}, 'swap': 0, + 'rxtx_factor': 1.0, 'is_public': True, 'flavorid': u'3', + 'vcpu_weight': None, 'id': 1} + sys_meta = utils.dict_to_metadata( flavors.save_instance_type_info({}, inst_type)) return {'id': 31337, @@ -951,8 +955,12 @@ class SchedulerDriverBaseTestCase(SchedulerTestCase): self.context, {}, fake_request_spec, {}, {}, {}, None) + def test_unimplemented_select_hosts(self): + self.assertRaises(NotImplementedError, + self.driver.select_hosts, self.context, {}, {}) + -class SchedulerDriverModuleTestCase(test.TestCase): +class SchedulerDriverModuleTestCase(test.NoDBTestCase): """Test case for scheduler driver module methods.""" def setUp(self): diff --git a/nova/tests/scheduler/test_scheduler_options.py b/nova/tests/scheduler/test_scheduler_options.py index df54cde5f..6ab245ef9 100644 --- a/nova/tests/scheduler/test_scheduler_options.py +++ b/nova/tests/scheduler/test_scheduler_options.py @@ -50,7 +50,7 @@ class FakeSchedulerOptions(scheduler_options.SchedulerOptions): return self._time_now -class SchedulerOptionsTestCase(test.TestCase): +class SchedulerOptionsTestCase(test.NoDBTestCase): def test_get_configuration_first_time_no_flag(self): last_checked = None now = datetime.datetime(2012, 1, 1, 1, 1, 1) diff --git a/nova/tests/scheduler/test_weights.py b/nova/tests/scheduler/test_weights.py index aa0f0c15f..7495edacc 100644 --- a/nova/tests/scheduler/test_weights.py +++ b/nova/tests/scheduler/test_weights.py @@ -23,7 +23,7 @@ from nova.tests import matchers from nova.tests.scheduler import fakes -class TestWeighedHost(test.TestCase): +class TestWeighedHost(test.NoDBTestCase): def test_dict_conversion(self): host_state = fakes.FakeHostState('somehost', None, {}) host = weights.WeighedHost(host_state, 'someweight') @@ -38,7 +38,7 @@ class TestWeighedHost(test.TestCase): self.assertIn('RAMWeigher', class_names) -class RamWeigherTestCase(test.TestCase): +class RamWeigherTestCase(test.NoDBTestCase): def setUp(self): super(RamWeigherTestCase, self).setUp() self.host_manager = fakes.FakeHostManager() diff --git a/nova/tests/test_block_device.py b/nova/tests/test_block_device.py index 8189057cb..716e7636d 100644 --- a/nova/tests/test_block_device.py +++ b/nova/tests/test_block_device.py @@ -20,7 +20,9 @@ Tests for Block Device utility functions. """ from nova import block_device +from nova import exception from nova import test +from nova.tests import matchers class BlockDeviceTestCase(test.TestCase): @@ -126,3 +128,124 @@ class BlockDeviceTestCase(test.TestCase): _assert_volume_in_mapping('sdf', True) _assert_volume_in_mapping('sdg', False) _assert_volume_in_mapping('sdh1', False) + + +class TestBlockDeviceDict(test.TestCase): + def setUp(self): + super(TestBlockDeviceDict, self).setUp() + + BDM = block_device.BlockDeviceDict + + self.new_mapping = [ + BDM({'id': 1, 'instance_uuid': 'fake-instance', + 'device_name': '/dev/sdb1', + 'source_type': 'blank', + 'destination_type': 'local', + 'delete_on_termination': True, + 'guest_format': 'swap', + 'boot_index': -1}), + BDM({'id': 2, 'instance_uuid': 'fake-instance', + 'device_name': '/dev/sdc1', + 'source_type': 'blank', + 'destination_type': 'local', + 'delete_on_termination': True, + 'boot_index': -1}), + BDM({'id': 3, 'instance_uuid': 'fake-instance', + 'device_name': '/dev/sda1', + 'source_type': 'volume', + 'destination_type': 'volume', + 'volume_id': 'fake-folume-id-1', + 'connection_info': "{'fake': 'connection_info'}", + 'boot_index': -1}), + BDM({'id': 4, 'instance_uuid': 'fake-instance', + 'device_name': '/dev/sda2', + 'source_type': 'snapshot', + 'destination_type': 'volume', + 'connection_info': "{'fake': 'connection_info'}", + 'snapshot_id': 'fake-snapshot-id-1', + 'volume_id': 'fake-volume-id-2', + 'boot_index': -1}), + BDM({'id': 5, 'instance_uuid': 'fake-instance', + 'no_device': True, + 'device_name': '/dev/vdc'}), + ] + + self.legacy_mapping = [ + {'id': 1, 'instance_uuid': 'fake-instance', + 'device_name': '/dev/sdb1', + 'delete_on_termination': True, + 'virtual_name': 'swap'}, + {'id': 2, 'instance_uuid': 'fake-instance', + 'device_name': '/dev/sdc1', + 'delete_on_termination': True, + 'virtual_name': 'ephemeral0'}, + {'id': 3, 'instance_uuid': 'fake-instance', + 'device_name': '/dev/sda1', + 'volume_id': 'fake-folume-id-1', + 'connection_info': "{'fake': 'connection_info'}"}, + {'id': 4, 'instance_uuid': 'fake-instance', + 'device_name': '/dev/sda2', + 'connection_info': "{'fake': 'connection_info'}", + 'snapshot_id': 'fake-snapshot-id-1', + 'volume_id': 'fake-volume-id-2'}, + {'id': 5, 'instance_uuid': 'fake-instance', + 'no_device': True, + 'device_name': '/dev/vdc'}, + ] + + def test_init(self): + self.stubs.Set(block_device.BlockDeviceDict, '_fields', + set(['field1', 'field2'])) + self.stubs.Set(block_device.BlockDeviceDict, '_db_only_fields', + set(['db_field1', 'db_field2'])) + + # Make sure db fields are not picked up if they are not + # in the original dict + dev_dict = block_device.BlockDeviceDict({'field1': 'foo', + 'field2': 'bar', + 'db_field1': 'baz'}) + self.assertTrue('field1' in dev_dict) + self.assertTrue('field2' in dev_dict) + self.assertTrue('db_field1' in dev_dict) + self.assertFalse('db_field2'in dev_dict) + + # Make sure all expected fields are defaulted + dev_dict = block_device.BlockDeviceDict({'field1': 'foo'}) + self.assertTrue('field1' in dev_dict) + self.assertTrue('field2' in dev_dict) + self.assertTrue(dev_dict['field2'] is None) + self.assertFalse('db_field1' in dev_dict) + self.assertFalse('db_field2'in dev_dict) + + # Unless they are not meant to be + dev_dict = block_device.BlockDeviceDict({'field1': 'foo'}, + do_not_default=set(['field2'])) + self.assertTrue('field1' in dev_dict) + self.assertFalse('field2' in dev_dict) + self.assertFalse('db_field1' in dev_dict) + self.assertFalse('db_field2'in dev_dict) + + # Assert basic validation works + # NOTE (ndipanov): Move to separate test once we have + # more complex validations in place + self.assertRaises(exception.InvalidBDMFormat, + block_device.BlockDeviceDict, + {'field1': 'foo', 'bogus_field': 'lame_val'}) + + def test_from_legacy(self): + for legacy, new in zip(self.legacy_mapping, self.new_mapping): + self.assertThat( + block_device.BlockDeviceDict.from_legacy(legacy), + matchers.IsSubDictOf(new)) + + def test_legacy(self): + for legacy, new in zip(self.legacy_mapping, self.new_mapping): + self.assertThat( + legacy, + matchers.IsSubDictOf(new.legacy())) + + def test_legacy_mapping(self): + got_legacy = block_device.legacy_mapping(self.new_mapping) + + for legacy, expected in zip(got_legacy, self.legacy_mapping): + self.assertThat(expected, matchers.IsSubDictOf(legacy)) diff --git a/nova/tests/test_linuxscsi.py b/nova/tests/test_linuxscsi.py index 0775b9d5b..8c098a846 100644 --- a/nova/tests/test_linuxscsi.py +++ b/nova/tests/test_linuxscsi.py @@ -43,11 +43,20 @@ class StorageLinuxSCSITestCase(test.TestCase): out = ("mpath6 (350002ac20398383d) dm-3 3PARdata,VV\n" "size=2.0G features='0' hwhandler='0' wp=rw\n" "`-+- policy='round-robin 0' prio=-1 status=active\n" - " |- 0:0:0:1 sde 8:64 active undef running\n" + " |- 0:0:0:1 sde 8:64 active undef running\n" " `- 2:0:0:1 sdf 8:80 active undef running\n" ) return out, None + def fake_execute2(*cmd, **kwargs): + out = ("350002ac20398383d dm-3 3PARdata,VV\n" + "size=2.0G features='0' hwhandler='0' wp=rw\n" + "`-+- policy='round-robin 0' prio=-1 status=active\n" + " |- 0:0:0:1 sde 8:64 active undef running\n" + " `- 2:0:0:1 sdf 8:80 active undef running\n" + ) + return out, None + self.stubs.Set(utils, 'execute', fake_execute) info = linuxscsi.find_multipath_device('/dev/sde') diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py index 86d618930..6b84121c4 100644 --- a/nova/tests/test_metadata.py +++ b/nova/tests/test_metadata.py @@ -188,19 +188,24 @@ class MetadataTestCase(test.TestCase): return [{'volume_id': 87654321, 'snapshot_id': None, 'no_device': None, - 'virtual_name': None, + 'source_type': 'volume', + 'destination_type': 'volume', 'delete_on_termination': True, 'device_name': '/dev/sdh'}, {'volume_id': None, 'snapshot_id': None, 'no_device': None, - 'virtual_name': 'swap', + 'source_type': 'blank', + 'destination_type': 'local', + 'guest_format': 'swap', 'delete_on_termination': None, 'device_name': '/dev/sdc'}, {'volume_id': None, 'snapshot_id': None, 'no_device': None, - 'virtual_name': 'ephemeral0', + 'source_type': 'blank', + 'destination_type': 'local', + 'guest_format': None, 'delete_on_termination': None, 'device_name': '/dev/sdb'}] @@ -214,6 +219,7 @@ class MetadataTestCase(test.TestCase): 'ebs0': '/dev/sdh'} capi = conductor_api.LocalAPI() + self.assertEqual(base._format_instance_mapping(capi, ctxt, instance_ref0), block_device._DEFAULT_MAPPINGS) self.assertEqual(base._format_instance_mapping(capi, ctxt, diff --git a/nova/tests/utils.py b/nova/tests/utils.py index 3d429aada..5545b789f 100644 --- a/nova/tests/utils.py +++ b/nova/tests/utils.py @@ -58,7 +58,7 @@ def get_test_instance_type(context=None): try: instance_type_ref = nova.db.instance_type_create(context, test_instance_type) - except exception.InstanceTypeExists: + except (exception.InstanceTypeExists, exception.InstanceTypeIdExists): instance_type_ref = nova.db.instance_type_get_by_name(context, 'kinda.big') return instance_type_ref diff --git a/nova/tests/virt/libvirt/test_imagebackend.py b/nova/tests/virt/libvirt/test_imagebackend.py index 50aacba2f..fa708f4f4 100644 --- a/nova/tests/virt/libvirt/test_imagebackend.py +++ b/nova/tests/virt/libvirt/test_imagebackend.py @@ -258,6 +258,7 @@ class Qcow2TestCase(_ImageTestCase, test.TestCase): if self.OLD_STYLE_INSTANCE_PATH: os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False) os.path.exists(self.TEMPLATE_PATH).AndReturn(False) + os.path.exists(self.PATH).AndReturn(False) imagebackend.disk.get_disk_size(self.TEMPLATE_PATH ).AndReturn(self.SIZE) os.path.exists(self.PATH).AndReturn(False) @@ -279,6 +280,7 @@ class Qcow2TestCase(_ImageTestCase, test.TestCase): if self.OLD_STYLE_INSTANCE_PATH: os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False) os.path.exists(self.TEMPLATE_PATH).AndReturn(False) + os.path.exists(self.PATH).AndReturn(False) imagebackend.disk.get_disk_size(self.TEMPLATE_PATH ).AndReturn(self.SIZE) self.mox.ReplayAll() @@ -288,6 +290,35 @@ class Qcow2TestCase(_ImageTestCase, test.TestCase): image.create_image, fn, self.TEMPLATE_PATH, 1) self.mox.VerifyAll() + def test_generate_resized_backing_files(self): + fn = self.prepare_mocks() + fn(target=self.TEMPLATE_PATH) + self.mox.StubOutWithMock(os.path, 'exists') + self.mox.StubOutWithMock(imagebackend.disk, 'get_disk_size') + self.mox.StubOutWithMock(imagebackend.libvirt_utils, + 'get_disk_backing_file') + if self.OLD_STYLE_INSTANCE_PATH: + os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False) + os.path.exists(self.TEMPLATE_PATH).AndReturn(False) + os.path.exists(self.PATH).AndReturn(True) + + imagebackend.libvirt_utils.get_disk_backing_file(self.PATH)\ + .AndReturn(self.QCOW2_BASE) + os.path.exists(self.QCOW2_BASE).AndReturn(False) + imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, + self.QCOW2_BASE) + imagebackend.disk.extend(self.QCOW2_BASE, self.SIZE) + + imagebackend.disk.get_disk_size(self.TEMPLATE_PATH + ).AndReturn(self.SIZE) + os.path.exists(self.PATH).AndReturn(True) + self.mox.ReplayAll() + + image = self.image_class(self.INSTANCE, self.NAME) + image.create_image(fn, self.TEMPLATE_PATH, self.SIZE) + + self.mox.VerifyAll() + class LvmTestCase(_ImageTestCase, test.TestCase): VG = 'FakeVG' diff --git a/nova/tests/virt/libvirt/test_libvirt.py b/nova/tests/virt/libvirt/test_libvirt.py index 83d564c29..2f5f9c857 100644 --- a/nova/tests/virt/libvirt/test_libvirt.py +++ b/nova/tests/virt/libvirt/test_libvirt.py @@ -2708,6 +2708,7 @@ class LibvirtConnTestCase(test.TestCase): instance_ref = self.test_instance instance_ref['image_ref'] = '' instance_ref['root_device_name'] = '/dev/vda' + instance_ref['uuid'] = uuidutils.generate_uuid() instance = db.instance_create(self.context, instance_ref) conn.spawn(self.context, instance, None, [], None, @@ -2719,6 +2720,7 @@ class LibvirtConnTestCase(test.TestCase): instance_ref = self.test_instance instance_ref['image_ref'] = 'my_fake_image' instance_ref['root_device_name'] = '/dev/vda' + instance_ref['uuid'] = uuidutils.generate_uuid() instance = db.instance_create(self.context, instance_ref) conn.spawn(self.context, instance, None, [], None, @@ -2728,6 +2730,7 @@ class LibvirtConnTestCase(test.TestCase): # Booted from an image instance_ref['image_ref'] = 'my_fake_image' + instance_ref['uuid'] = uuidutils.generate_uuid() instance = db.instance_create(self.context, instance_ref) conn.spawn(self.context, instance, None, [], None) self.assertTrue(self.cache_called_for_disk) diff --git a/nova/tests/virt/libvirt/test_libvirt_volume.py b/nova/tests/virt/libvirt/test_libvirt_volume.py index c96bc8dda..07a1a7b2f 100644 --- a/nova/tests/virt/libvirt/test_libvirt_volume.py +++ b/nova/tests/virt/libvirt/test_libvirt_volume.py @@ -593,6 +593,7 @@ class LibvirtVolumeTestCase(test.TestCase): libvirt_driver = volume.LibvirtFibreChannelVolumeDriver(self.fake_conn) multipath_devname = '/dev/md-1' devices = {"device": multipath_devname, + "id": "1234567890", "devices": [{'device': '/dev/sdb', 'address': '1:0:0:1', 'host': 1, 'channel': 0, diff --git a/nova/utils.py b/nova/utils.py index 2b488a7e7..9067488d5 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -68,9 +68,6 @@ utils_opts = [ cfg.IntOpt('password_length', default=12, help='Length of generated instance admin passwords'), - cfg.BoolOpt('disable_process_locking', - default=False, - help='Whether to disable inter-process locks'), cfg.StrOpt('instance_usage_audit_period', default='month', help='time period to generate instance usages for. ' diff --git a/nova/virt/baremetal/tilera.py b/nova/virt/baremetal/tilera.py index d9a065bf1..7906fdddd 100755 --- a/nova/virt/baremetal/tilera.py +++ b/nova/virt/baremetal/tilera.py @@ -35,22 +35,13 @@ from nova.virt.baremetal import base from nova.virt.baremetal import db from nova.virt.baremetal import utils as bm_utils -tilera_opts = [ - cfg.StrOpt('net_config_template', - default='$pybasedir/nova/virt/baremetal/' - 'net-dhcp.ubuntu.template', - help='Template file for injected network config'), - ] LOG = logging.getLogger(__name__) -baremetal_group = cfg.OptGroup(name='baremetal', - title='Baremetal Options') - CONF = cfg.CONF -CONF.register_group(baremetal_group) -CONF.register_opts(tilera_opts, baremetal_group) CONF.import_opt('use_ipv6', 'nova.netconf') +CONF.import_opt('net_config_template', 'nova.virt.baremetal.pxe', + group='baremetal') CHEETAH = None diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py index 3d394fb79..844d9dfde 100755 --- a/nova/virt/libvirt/imagebackend.py +++ b/nova/virt/libvirt/imagebackend.py @@ -250,8 +250,33 @@ class Qcow2(Image): if size: disk.extend(target, size) + # Download the unmodified base image unless we already have a copy. if not os.path.exists(base): prepare_template(target=base, *args, **kwargs) + + legacy_backing_size = None + legacy_base = base + + # Determine whether an existing qcow2 disk uses a legacy backing by + # actually looking at the image itself and parsing the output of the + # backing file it expects to be using. + if os.path.exists(self.path): + backing_path = libvirt_utils.get_disk_backing_file(self.path) + backing_file = os.path.basename(backing_path) + backing_parts = backing_file.rpartition('_') + if backing_file != backing_parts[-1] and \ + backing_parts[-1].isdigit(): + legacy_backing_size = int(backing_parts[-1]) + legacy_base += '_%d' % legacy_backing_size + legacy_backing_size *= 1024 * 1024 * 1024 + + # Create the legacy backing file if necessary. + if legacy_backing_size: + if not os.path.exists(legacy_base): + with utils.remove_path_on_error(legacy_base): + libvirt_utils.copy_image(base, legacy_base) + disk.extend(legacy_base, legacy_backing_size) + # NOTE(cfb): Having a flavor that sets the root size to 0 and having # nova effectively ignore that size and use the size of the # image is considered a feature at this time, not a bug. diff --git a/nova/virt/libvirt/volume.py b/nova/virt/libvirt/volume.py index 34fd9c772..4a11e2704 100644 --- a/nova/virt/libvirt/volume.py +++ b/nova/virt/libvirt/volume.py @@ -753,6 +753,7 @@ class LibvirtFibreChannelVolumeDriver(LibvirtBaseVolumeDriver): % {'device': mdev_info['device']}) device_path = mdev_info['device'] connection_info['data']['devices'] = mdev_info['devices'] + connection_info['data']['multipath_id'] = mdev_info['id'] else: # we didn't find a multipath device. # so we assume the kernel only sees 1 device @@ -774,6 +775,15 @@ class LibvirtFibreChannelVolumeDriver(LibvirtBaseVolumeDriver): self).disconnect_volume(connection_info, mount_device) devices = connection_info['data']['devices'] + # If this is a multipath device, we need to search again + # and make sure we remove all the devices. Some of them + # might not have shown up at attach time. + if 'multipath_id' in connection_info['data']: + multipath_id = connection_info['data']['multipath_id'] + mdev_info = linuxscsi.find_multipath_device(multipath_id) + devices = mdev_info['devices'] + LOG.debug("devices to remove = %s" % devices) + # There may have been more than 1 device mounted # by the kernel for this volume. We have to remove # all of them diff --git a/openstack-common.conf b/openstack-common.conf index 297029dc1..05fca3167 100644 --- a/openstack-common.conf +++ b/openstack-common.conf @@ -2,6 +2,7 @@ # The list of modules to copy from oslo-incubator.git module=cliutils +module=config module=context module=db module=db.sqlalchemy diff --git a/tools/conf/analyze_opts.py b/tools/conf/analyze_opts.py index 3088d6390..84294c9d3 100755 --- a/tools/conf/analyze_opts.py +++ b/tools/conf/analyze_opts.py @@ -44,8 +44,8 @@ class PropertyCollecter(iniparser.BaseParser): def collect_properties(cls, lineiter, sample_format=False): def clean_sample(f): for line in f: - if line.startswith("# ") and line != '# nova.conf sample #\n': - line = line[2:] + if line.startswith("#") and not line.startswith("# "): + line = line[1:] yield line pc = cls() if sample_format: diff --git a/tools/conf/generate_sample.sh b/tools/conf/generate_sample.sh index 283d7a654..d0f5c83c9 100755 --- a/tools/conf/generate_sample.sh +++ b/tools/conf/generate_sample.sh @@ -19,10 +19,12 @@ FILES=$(find nova -type f -name "*.py" ! -path "nova/tests/*" -exec \ grep -l "Opt(" {} \; | sort -u) -BINS=$(echo bin/nova-*) +BINS=$(echo bin/nova-* | grep -v nova-rootwrap) + +export EVENTLET_NO_GREENDNS=yes PYTHONPATH=./:${PYTHONPATH} \ - python $(dirname "$0")/extract_opts.py ${FILES} ${BINS} > \ + python $(dirname "$0")/../../nova/openstack/common/config/generator.py ${FILES} ${BINS} > \ etc/nova/nova.conf.sample # Remove compiled files created by imp.import_source() |