diff options
100 files changed, 2847 insertions, 419 deletions
diff --git a/.gitignore b/.gitignore index efb88c781..6028b8a44 100644 --- a/.gitignore +++ b/.gitignore @@ -37,3 +37,5 @@ nosetests.xml nova/tests/cover/* nova/vcsversion.py tools/conf/nova.conf* +tools/lintstack.head.py +tools/pylint_exceptions diff --git a/bin/nova-api b/bin/nova-api index 8457ea43d..16cf33cc5 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -44,13 +44,16 @@ from nova import utils CONF = cfg.CONF CONF.import_opt('enabled_apis', 'nova.service') +CONF.import_opt('enabled_ssl_apis', 'nova.service') if __name__ == '__main__': config.parse_args(sys.argv) logging.setup("nova") utils.monkey_patch() + launcher = service.ProcessLauncher() for api in CONF.enabled_apis: - server = service.WSGIService(api) + should_use_ssl = api in CONF.enabled_ssl_apis + server = service.WSGIService(api, use_ssl=should_use_ssl) launcher.launch_server(server, workers=server.workers or 1) launcher.wait() diff --git a/bin/nova-manage b/bin/nova-manage index 4f3d889ea..90d191eca 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -1128,8 +1128,13 @@ def add_command_parsers(subparsers): action_kwargs = [] for args, kwargs in getattr(action_fn, 'args', []): - action_kwargs.append(kwargs['dest']) - kwargs['dest'] = 'action_kwarg_' + kwargs['dest'] + if kwargs['dest'].startswith('action_kwarg_'): + action_kwargs.append( + kwargs['dest'][len('action_kwarg_'):]) + else: + action_kwargs.append(kwargs['dest']) + kwargs['dest'] = 'action_kwarg_' + kwargs['dest'] + parser.add_argument(*args, **kwargs) parser.set_defaults(action_fn=action_fn) diff --git a/bin/nova-novncproxy b/bin/nova-novncproxy index 8562acc53..477510b99 100755 --- a/bin/nova-novncproxy +++ b/bin/nova-novncproxy @@ -61,6 +61,7 @@ opts = [ CONF = cfg.CONF CONF.register_cli_opts(opts) +CONF.import_opt('debug', 'nova.openstack.common.log') if __name__ == '__main__': diff --git a/bin/nova-spicehtml5proxy b/bin/nova-spicehtml5proxy index b1882bbea..089ff9d71 100755 --- a/bin/nova-spicehtml5proxy +++ b/bin/nova-spicehtml5proxy @@ -61,6 +61,7 @@ opts = [ CONF = cfg.CONF CONF.register_cli_opts(opts) +CONF.import_opt('debug', 'nova.openstack.common.log') if __name__ == '__main__': diff --git a/doc/api_samples/all_extensions/extensions-get-resp.json b/doc/api_samples/all_extensions/extensions-get-resp.json index bd002c080..604ad6763 100644 --- a/doc/api_samples/all_extensions/extensions-get-resp.json +++ b/doc/api_samples/all_extensions/extensions-get-resp.json @@ -89,6 +89,14 @@ "updated": "2012-08-09T00:00:00+00:00" }, { + "alias": "os-baremetal-nodes", + "description": "Admin-only bare-metal node administration.", + "links": [], + "name": "BareMetalNodes", + "namespace": "http://docs.openstack.org/compute/ext/baremetal_nodes/api/v2", + "updated": "2013-01-04T00:00:00+00:00" + }, + { "alias": "os-cells", "description": "Enables cells-related functionality such as adding neighbor cells,\n listing neighbor cells, and getting the capabilities of the local cell.\n ", "links": [], diff --git a/doc/api_samples/all_extensions/extensions-get-resp.xml b/doc/api_samples/all_extensions/extensions-get-resp.xml index ebb1c4302..d7f483745 100644 --- a/doc/api_samples/all_extensions/extensions-get-resp.xml +++ b/doc/api_samples/all_extensions/extensions-get-resp.xml @@ -37,6 +37,9 @@ <extension alias="os-availability-zone" updated="2012-08-09T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/availabilityzone/api/v1.1" name="AvailabilityZone"> <description>Add availability_zone to the Create Server v1.1 API.</description> </extension> + <extension alias="os-baremetal-nodes" updated="2013-01-04T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/baremetal_nodes/api/v2" name="BareMetalNodes"> + <description>Admin-only bare-metal node administration.</description> + </extension> <extension alias="os-cells" updated="2011-09-21T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/cells/api/v1.1" name="Cells"> <description>Enables cells-related functionality such as adding child cells, listing child cells, getting the capabilities of the local cell, diff --git a/doc/api_samples/os-baremetal-nodes/baremetal-node-add-interface-req.json b/doc/api_samples/os-baremetal-nodes/baremetal-node-add-interface-req.json new file mode 100644 index 000000000..2e795e483 --- /dev/null +++ b/doc/api_samples/os-baremetal-nodes/baremetal-node-add-interface-req.json @@ -0,0 +1,5 @@ +{ + "add_interface": { + "address": "aa:aa:aa:aa:aa:aa" + } +}
\ No newline at end of file diff --git a/doc/api_samples/os-baremetal-nodes/baremetal-node-add-interface-req.xml b/doc/api_samples/os-baremetal-nodes/baremetal-node-add-interface-req.xml new file mode 100644 index 000000000..63ca9c21e --- /dev/null +++ b/doc/api_samples/os-baremetal-nodes/baremetal-node-add-interface-req.xml @@ -0,0 +1,4 @@ +<?xml version="1.0" encoding="UTF-8"?> +<add_interface + address="aa:aa:aa:aa:aa:aa" +/>
\ No newline at end of file diff --git a/doc/api_samples/os-baremetal-nodes/baremetal-node-add-interface-resp.json b/doc/api_samples/os-baremetal-nodes/baremetal-node-add-interface-resp.json new file mode 100644 index 000000000..d0b9cc3fb --- /dev/null +++ b/doc/api_samples/os-baremetal-nodes/baremetal-node-add-interface-resp.json @@ -0,0 +1,8 @@ +{ + "interface": { + "address": "aa:aa:aa:aa:aa:aa", + "datapath_id": null, + "id": 1, + "port_no": null + } +}
\ No newline at end of file diff --git a/doc/api_samples/os-baremetal-nodes/baremetal-node-add-interface-resp.xml b/doc/api_samples/os-baremetal-nodes/baremetal-node-add-interface-resp.xml new file mode 100644 index 000000000..1da1dd284 --- /dev/null +++ b/doc/api_samples/os-baremetal-nodes/baremetal-node-add-interface-resp.xml @@ -0,0 +1,2 @@ +<?xml version='1.0' encoding='UTF-8'?> +<interface datapath_id="None" id="1" port_no="None" address="aa:aa:aa:aa:aa:aa"/>
\ No newline at end of file diff --git a/doc/api_samples/os-baremetal-nodes/baremetal-node-create-req.json b/doc/api_samples/os-baremetal-nodes/baremetal-node-create-req.json new file mode 100644 index 000000000..d8b9eb452 --- /dev/null +++ b/doc/api_samples/os-baremetal-nodes/baremetal-node-create-req.json @@ -0,0 +1,14 @@ +{ + "node": { + "service_host": "host", + "cpus": 8, + "memory_mb": 8192, + "local_gb": 128, + "pm_address": "10.1.2.3", + "pm_user": "pm_user", + "pm_password": "pm_pass", + "prov_mac_address": "12:34:56:78:90:ab", + "prov_vlan_id": 1234, + "terminal_port": 8000 + } +}
\ No newline at end of file diff --git a/doc/api_samples/os-baremetal-nodes/baremetal-node-create-req.xml b/doc/api_samples/os-baremetal-nodes/baremetal-node-create-req.xml new file mode 100644 index 000000000..85c863a97 --- /dev/null +++ b/doc/api_samples/os-baremetal-nodes/baremetal-node-create-req.xml @@ -0,0 +1,12 @@ +<?xml version="1.0" encoding="UTF-8"?> +<node + service_host="host" + cpus="8" + memory_mb="8192" + local_gb="128" + pm_address="10.1.2.3" + pm_user="pm_user" + prov_mac_address="12:34:56:78:90:ab" + prov_vlan_id="1234" + terminal_port="8000" +/>
\ No newline at end of file diff --git a/doc/api_samples/os-baremetal-nodes/baremetal-node-create-resp.json b/doc/api_samples/os-baremetal-nodes/baremetal-node-create-resp.json new file mode 100644 index 000000000..b62a9e663 --- /dev/null +++ b/doc/api_samples/os-baremetal-nodes/baremetal-node-create-resp.json @@ -0,0 +1,16 @@ +{ + "node": { + "cpus": 8, + "id": 1, + "instance_uuid": null, + "interfaces": [], + "local_gb": 128, + "memory_mb": 8192, + "pm_address": "10.1.2.3", + "pm_user": "pm_user", + "prov_mac_address": "12:34:56:78:90:ab", + "prov_vlan_id": 1234, + "service_host": "host", + "terminal_port": 8000 + } +}
\ No newline at end of file diff --git a/doc/api_samples/os-baremetal-nodes/baremetal-node-create-resp.xml b/doc/api_samples/os-baremetal-nodes/baremetal-node-create-resp.xml new file mode 100644 index 000000000..9b8421f0f --- /dev/null +++ b/doc/api_samples/os-baremetal-nodes/baremetal-node-create-resp.xml @@ -0,0 +1,15 @@ +<?xml version='1.0' encoding='UTF-8'?> +<node + instance_uuid="None" + pm_address="10.1.2.3" + cpus="8" + prov_vlan_id="1234" + memory_mb="8192" + prov_mac_address="12:34:56:78:90:ab" + service_host="host" + local_gb="128" + id="1" + pm_user="pm_user" + terminal_port="8000"> + <interfaces/> +</node>
\ No newline at end of file diff --git a/doc/api_samples/os-baremetal-nodes/baremetal-node-list-resp.json b/doc/api_samples/os-baremetal-nodes/baremetal-node-list-resp.json new file mode 100644 index 000000000..d43d580ed --- /dev/null +++ b/doc/api_samples/os-baremetal-nodes/baremetal-node-list-resp.json @@ -0,0 +1,25 @@ +{ + "nodes": [ + { + "cpus": 8, + "id": 1, + "instance_uuid": null, + "interfaces": [ + { + "address": "aa:aa:aa:aa:aa:aa", + "datapath_id": null, + "id": 1, + "port_no": null + } + ], + "local_gb": 128, + "memory_mb": 8192, + "pm_address": "10.1.2.3", + "pm_user": "pm_user", + "prov_mac_address": "12:34:56:78:90:ab", + "prov_vlan_id": 1234, + "service_host": "host", + "terminal_port": 8000 + } + ] +}
\ No newline at end of file diff --git a/doc/api_samples/os-baremetal-nodes/baremetal-node-list-resp.xml b/doc/api_samples/os-baremetal-nodes/baremetal-node-list-resp.xml new file mode 100644 index 000000000..7cd1b5d8a --- /dev/null +++ b/doc/api_samples/os-baremetal-nodes/baremetal-node-list-resp.xml @@ -0,0 +1,23 @@ +<?xml version='1.0' encoding='UTF-8'?> +<nodes> + <node + instance_uuid="None" + pm_address="10.1.2.3" + cpus="8" + prov_vlan_id="1234" + memory_mb="8192" + prov_mac_address="12:34:56:78:90:ab" + service_host="host" + local_gb="128" + id="1" + pm_user="pm_user" + terminal_port="8000"> + <interfaces> + <interface + datapath_id="None" + id="1" + port_no="None" + address="aa:aa:aa:aa:aa:aa"/> + </interfaces> + </node> +</nodes>
\ No newline at end of file diff --git a/doc/api_samples/os-baremetal-nodes/baremetal-node-remove-interface-req.json b/doc/api_samples/os-baremetal-nodes/baremetal-node-remove-interface-req.json new file mode 100644 index 000000000..0ce85577d --- /dev/null +++ b/doc/api_samples/os-baremetal-nodes/baremetal-node-remove-interface-req.json @@ -0,0 +1,5 @@ +{ + "remove_interface": { + "address": "aa:aa:aa:aa:aa:aa" + } +}
\ No newline at end of file diff --git a/doc/api_samples/os-baremetal-nodes/baremetal-node-remove-interface-req.xml b/doc/api_samples/os-baremetal-nodes/baremetal-node-remove-interface-req.xml new file mode 100644 index 000000000..6457b059b --- /dev/null +++ b/doc/api_samples/os-baremetal-nodes/baremetal-node-remove-interface-req.xml @@ -0,0 +1,4 @@ +<?xml version="1.0" encoding="UTF-8"?> +<remove_interface + address="aa:aa:aa:aa:aa:aa" +/>
\ No newline at end of file diff --git a/doc/api_samples/os-baremetal-nodes/baremetal-node-show-resp.json b/doc/api_samples/os-baremetal-nodes/baremetal-node-show-resp.json new file mode 100644 index 000000000..d42365752 --- /dev/null +++ b/doc/api_samples/os-baremetal-nodes/baremetal-node-show-resp.json @@ -0,0 +1,23 @@ +{ + "node": { + "cpus": 8, + "id": 1, + "instance_uuid": null, + "interfaces": [ + { + "address": "aa:aa:aa:aa:aa:aa", + "datapath_id": null, + "id": 1, + "port_no": null + } + ], + "local_gb": 128, + "memory_mb": 8192, + "pm_address": "10.1.2.3", + "pm_user": "pm_user", + "prov_mac_address": "12:34:56:78:90:ab", + "prov_vlan_id": 1234, + "service_host": "host", + "terminal_port": 8000 + } +}
\ No newline at end of file diff --git a/doc/api_samples/os-baremetal-nodes/baremetal-node-show-resp.xml b/doc/api_samples/os-baremetal-nodes/baremetal-node-show-resp.xml new file mode 100644 index 000000000..6d5f9719f --- /dev/null +++ b/doc/api_samples/os-baremetal-nodes/baremetal-node-show-resp.xml @@ -0,0 +1,21 @@ +<?xml version='1.0' encoding='UTF-8'?> +<node + instance_uuid="None" + pm_address="10.1.2.3" + cpus="8" + prov_vlan_id="1234" + memory_mb="8192" + prov_mac_address="12:34:56:78:90:ab" + service_host="host" + local_gb="128" + id="1" + pm_user="pm_user" + terminal_port="8000"> + <interfaces> + <interface + datapath_id="None" + id="1" + port_no="None" + address="aa:aa:aa:aa:aa:aa"/> + </interfaces> +</node>
\ No newline at end of file diff --git a/etc/nova/nova.conf.sample b/etc/nova/nova.conf.sample index 36a7b0d9c..a5f945618 100644 --- a/etc/nova/nova.conf.sample +++ b/etc/nova/nova.conf.sample @@ -1,47 +1,6 @@ [DEFAULT] # -# Options defined in nova.openstack.common.cfg:CommonConfigOpts -# - -# Print debugging output (boolean value) -#debug=false - -# Print more verbose output (boolean value) -#verbose=false - -# If this option is specified, the logging configuration file -# specified is used and overrides any other logging options -# specified. Please see the Python logging module -# documentation for details on logging configuration files. -# (string value) -#log_config=<None> - -# A logging.Formatter log message format string which may use -# any of the available logging.LogRecord attributes. Default: -# %(default)s (string value) -#log_format=%(asctime)s %(levelname)8s [%(name)s] %(message)s - -# Format string for %%(asctime)s in log records. Default: -# %(default)s (string value) -#log_date_format=%Y-%m-%d %H:%M:%S - -# (Optional) Name of log file to output to. If not set, -# logging will go to stdout. (string value) -#log_file=<None> - -# (Optional) The directory to keep log files in (will be -# prepended to --log-file) (string value) -#log_dir=<None> - -# Use syslog for logging. (boolean value) -#use_syslog=false - -# syslog facility to receive log lines (string value) -#syslog_log_facility=LOG_USER - - -# # Options defined in nova.availability_zones # @@ -486,6 +445,22 @@ # +# Options defined in nova.api.openstack.compute.contrib.os_tenant_networks +# + +# Enables or disables quotaing of tenant networks (boolean +# value) +#enable_network_quota=false + +# Control for checking for default networks (string value) +#use_quantum_default_nets=False + +# Default tenant id when creating quantum networks (string +# value) +#quantum_default_tenant_id=default + + +# # Options defined in nova.api.openstack.compute.extensions # @@ -1123,10 +1098,6 @@ # Autoassigning floating ip to VM (boolean value) #auto_assign_floating_ip=false -# Network host to use for ip allocation in flat modes (string -# value) -#network_host=nova - # If passed, use fake network devices and addresses (boolean # value) #fake_network=false @@ -1207,6 +1178,10 @@ # (string value) #quantum_auth_strategy=keystone +# Name of Integration Bridge used by Open vSwitch (string +# value) +#quantum_ovs_bridge=br-int + # # Options defined in nova.network.rpcapi @@ -1253,6 +1228,14 @@ # Options defined in nova.openstack.common.log # +# Print debugging output (set logging level to DEBUG instead +# of default WARNING level). (boolean value) +#debug=false + +# Print more verbose output (set logging level to INFO instead +# of default WARNING level). (boolean value) +#verbose=false + # Log output to standard error (boolean value) #use_stderr=true @@ -1262,11 +1245,11 @@ # format string to use for log messages with context (string # value) -#logging_context_format_string=%(asctime)s %(levelname)s %(name)s [%(request_id)s %(user_id)s %(project_id)s] %(instance)s%(message)s +#logging_context_format_string=%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user)s %(tenant)s] %(instance)s%(message)s # format string to use for log messages without context # (string value) -#logging_default_format_string=%(asctime)s %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s +#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s # data to append to log format when level is DEBUG (string # value) @@ -1274,7 +1257,7 @@ # prefix each line of exception output with this format # (string value) -#logging_exception_prefix=%(asctime)s %(process)d TRACE %(name)s %(instance)s +#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s # list of logger=LEVEL pairs (list value) #default_log_levels=amqplib=WARN,sqlalchemy=WARN,boto=WARN,suds=INFO,keystone=INFO,eventlet.wsgi.server=WARN @@ -1293,6 +1276,36 @@ # it like this (string value) #instance_uuid_format="[instance: %(uuid)s] " +# If this option is specified, the logging configuration file +# specified is used and overrides any other logging options +# specified. Please see the Python logging module +# documentation for details on logging configuration files. +# (string value) +#log_config=<None> + +# A logging.Formatter log message format string which may use +# any of the available logging.LogRecord attributes. Default: +# %(default)s (string value) +#log_format=%(asctime)s %(levelname)8s [%(name)s] %(message)s + +# Format string for %%(asctime)s in log records. Default: +# %(default)s (string value) +#log_date_format=%Y-%m-%d %H:%M:%S + +# (Optional) Name of log file to output to. If not set, +# logging will go to stdout. (string value) +#log_file=<None> + +# (Optional) The directory to keep log files in (will be +# prepended to --log-file) (string value) +#log_dir=<None> + +# Use syslog for logging. (boolean value) +#use_syslog=false + +# syslog facility to receive log lines (string value) +#syslog_log_facility=LOG_USER + # # Options defined in nova.openstack.common.notifier.api @@ -1724,13 +1737,18 @@ # -# Options defined in nova.virt.hyperv.vmops +# Options defined in nova.virt.hyperv.vif # -# Default vSwitch Name, if none provided first external is -# used (string value) +# External virtual switch Name, if not provided, the first +# external virtual switch is used (string value) #vswitch_name=<None> + +# +# Options defined in nova.virt.hyperv.vmops +# + # Required for live migration among hosts with different CPU # features (boolean value) #limit_cpu_features=false @@ -1985,26 +2003,26 @@ # Options defined in nova.virt.vmwareapi.driver # -# URL for connection to VMWare ESX host.Required if -# compute_driver is vmwareapi.VMWareESXDriver. (string value) +# URL for connection to VMware ESX host.Required if +# compute_driver is vmwareapi.VMwareESXDriver. (string value) #vmwareapi_host_ip=<None> -# Username for connection to VMWare ESX host. Used only if -# compute_driver is vmwareapi.VMWareESXDriver. (string value) +# Username for connection to VMware ESX host. Used only if +# compute_driver is vmwareapi.VMwareESXDriver. (string value) #vmwareapi_host_username=<None> -# Password for connection to VMWare ESX host. Used only if -# compute_driver is vmwareapi.VMWareESXDriver. (string value) +# Password for connection to VMware ESX host. Used only if +# compute_driver is vmwareapi.VMwareESXDriver. (string value) #vmwareapi_host_password=<None> # The interval used for polling of remote tasks. Used only if -# compute_driver is vmwareapi.VMWareESXDriver. (floating point +# compute_driver is vmwareapi.VMwareESXDriver. (floating point # value) #vmwareapi_task_poll_interval=5.0 # The number of times we retry on failures, e.g., socket # error, etc. Used only if compute_driver is -# vmwareapi.VMWareESXDriver. (integer value) +# vmwareapi.VMwareESXDriver. (integer value) #vmwareapi_api_retry_count=10 @@ -2278,12 +2296,15 @@ # (string value) #cinder_endpoint_template=<None> +# region name of this node (string value) +#os_region_name=<None> + # Number of cinderclient retries on failed http calls (integer # value) #cinder_http_retries=3 -# Allow to perform insecure SSL (https) requests to cinder -# (boolean value) +# Allow to perform insecure SSL requests to cinder (boolean +# value) #cinder_api_insecure=false @@ -2550,4 +2571,4 @@ #keymap=en-us -# Total option count: 520 +# Total option count: 525 diff --git a/etc/nova/policy.json b/etc/nova/policy.json index fd1f9c2e0..d94ec7192 100644 --- a/etc/nova/policy.json +++ b/etc/nova/policy.json @@ -29,6 +29,7 @@ "compute_extension:admin_actions:migrate": "rule:admin_api", "compute_extension:aggregates": "rule:admin_api", "compute_extension:agents": "rule:admin_api", + "compute_extension:baremetal_nodes": "rule:admin_api", "compute_extension:cells": "rule:admin_api", "compute_extension:certificates": "", "compute_extension:cloudpipe": "rule:admin_api", @@ -83,6 +84,8 @@ "compute_extension:virtual_storage_arrays": "", "compute_extension:volumes": "", "compute_extension:volumetypes": "", + "compute_extension:availability_zone:list": "", + "compute_extension:availability_zone:detail": "rule:admin_api", "volume:create": "", diff --git a/etc/nova/rootwrap.d/compute.filters b/etc/nova/rootwrap.d/compute.filters index f344a1b1c..e1113a9e7 100644 --- a/etc/nova/rootwrap.d/compute.filters +++ b/etc/nova/rootwrap.d/compute.filters @@ -99,9 +99,11 @@ pygrub: CommandFilter, /usr/bin/pygrub, root fdisk: CommandFilter, /sbin/fdisk, root # nova/virt/xenapi/vm_utils.py: e2fsck, -f, -p, partition_path +# nova/virt/disk/api.py: e2fsck, -f, -p, image e2fsck: CommandFilter, /sbin/e2fsck, root # nova/virt/xenapi/vm_utils.py: resize2fs, partition_path +# nova/virt/disk/api.py: resize2fs, image resize2fs: CommandFilter, /sbin/resize2fs, root # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ... diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 31f486b81..48b0f632f 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -250,32 +250,10 @@ class CloudController(object): else: return self._describe_availability_zones(context, **kwargs) - def _get_zones(self, context): - """Return available and unavailable zones.""" - enabled_services = db.service_get_all(context, False) - disabled_services = db.service_get_all(context, True) - enabled_services = availability_zones.set_availability_zones(context, - enabled_services) - disabled_services = availability_zones.set_availability_zones(context, - disabled_services) - - available_zones = [] - for zone in [service['availability_zone'] for service - in enabled_services]: - if not zone in available_zones: - available_zones.append(zone) - - not_available_zones = [] - zones = [service['available_zones'] for service in disabled_services - if service['available_zones'] not in available_zones] - for zone in zones: - if zone not in not_available_zones: - not_available_zones.append(zone) - return (available_zones, not_available_zones) - def _describe_availability_zones(self, context, **kwargs): ctxt = context.elevated() - available_zones, not_available_zones = self._get_zones(ctxt) + available_zones, not_available_zones = \ + availability_zones.get_availability_zones(ctxt) result = [] for zone in available_zones: @@ -291,7 +269,8 @@ class CloudController(object): def _describe_availability_zones_verbose(self, context, **kwargs): ctxt = context.elevated() - available_zones, not_available_zones = self._get_zones(ctxt) + available_zones, not_available_zones = \ + availability_zones.get_availability_zones(ctxt) # Available services enabled_services = db.service_get_all(context, False) diff --git a/nova/api/openstack/compute/contrib/availability_zone.py b/nova/api/openstack/compute/contrib/availability_zone.py index 2955b68eb..6cde5ca64 100644 --- a/nova/api/openstack/compute/contrib/availability_zone.py +++ b/nova/api/openstack/compute/contrib/availability_zone.py @@ -14,14 +14,165 @@ # License for the specific language governing permissions and limitations # under the License +from nova.api.openstack import common from nova.api.openstack import extensions +from nova.api.openstack import wsgi +from nova.api.openstack import xmlutil +from nova import availability_zones +from nova import db +from nova.openstack.common import cfg +from nova.openstack.common import log as logging +from nova import servicegroup + + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF + +authorize_list = extensions.extension_authorizer('compute', + 'availability_zone:list') +authorize_detail = extensions.extension_authorizer('compute', + 'availability_zone:detail') + + +def make_availability_zone(elem): + elem.set('name', 'zoneName') + + zoneStateElem = xmlutil.SubTemplateElement(elem, 'zoneState', + selector='zoneState') + zoneStateElem.set('available') + + hostsElem = xmlutil.SubTemplateElement(elem, 'hosts', selector='hosts') + hostElem = xmlutil.SubTemplateElement(hostsElem, 'host', + selector=xmlutil.get_items) + hostElem.set('name', 0) + + svcsElem = xmlutil.SubTemplateElement(hostElem, 'services', selector=1) + svcElem = xmlutil.SubTemplateElement(svcsElem, 'service', + selector=xmlutil.get_items) + svcElem.set('name', 0) + + svcStateElem = xmlutil.SubTemplateElement(svcElem, 'serviceState', + selector=1) + svcStateElem.set('available') + svcStateElem.set('active') + svcStateElem.set('updated_at') + + # Attach metadata node + elem.append(common.MetadataTemplate()) + + +class AvailabilityZonesTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('availabilityZones') + zoneElem = xmlutil.SubTemplateElement(root, 'availabilityZone', + selector='availabilityZoneInfo') + make_availability_zone(zoneElem) + return xmlutil.MasterTemplate(root, 1, nsmap={ + Availability_zone.alias: Availability_zone.namespace}) + + +class AvailabilityZoneController(wsgi.Controller): + """The Availability Zone API controller for the OpenStack API.""" + + def __init__(self): + super(AvailabilityZoneController, self).__init__() + self.servicegroup_api = servicegroup.API() + + def _describe_availability_zones(self, context, **kwargs): + ctxt = context.elevated() + available_zones, not_available_zones = \ + availability_zones.get_availability_zones(ctxt) + + result = [] + for zone in available_zones: + # Hide internal_service_availability_zone + if zone == CONF.internal_service_availability_zone: + continue + result.append({'zoneName': zone, + 'zoneState': {'available': True}, + "hosts": None}) + for zone in not_available_zones: + result.append({'zoneName': zone, + 'zoneState': {'available': False}, + "hosts": None}) + return {'availabilityZoneInfo': result} + + def _describe_availability_zones_verbose(self, context, **kwargs): + ctxt = context.elevated() + available_zones, not_available_zones = \ + availability_zones.get_availability_zones(ctxt) + + # Available services + enabled_services = db.service_get_all(context, False) + enabled_services = availability_zones.set_availability_zones(context, + enabled_services) + zone_hosts = {} + host_services = {} + for service in enabled_services: + zone_hosts.setdefault(service['availability_zone'], []) + if not service['host'] in zone_hosts[service['availability_zone']]: + zone_hosts[service['availability_zone']].append( + service['host']) + + host_services.setdefault(service['availability_zone'] + + service['host'], []) + host_services[service['availability_zone'] + service['host']].\ + append(service) + + result = [] + for zone in available_zones: + hosts = {} + for host in zone_hosts[zone]: + hosts[host] = {} + for service in host_services[zone + host]: + alive = self.servicegroup_api.service_is_up(service) + hosts[host][service['binary']] = {'available': alive, + 'active': True != service['disabled'], + 'updated_at': service['updated_at']} + result.append({'zoneName': zone, + 'zoneState': {'available': True}, + "hosts": hosts}) + + for zone in not_available_zones: + result.append({'zoneName': zone, + 'zoneState': {'available': False}, + "hosts": None}) + return {'availabilityZoneInfo': result} + + @wsgi.serializers(xml=AvailabilityZonesTemplate) + def index(self, req): + """Returns a summary list of availability zone.""" + context = req.environ['nova.context'] + authorize_list(context) + + return self._describe_availability_zones(context) + + @wsgi.serializers(xml=AvailabilityZonesTemplate) + def detail(self, req): + """Returns a detailed list of availability zone.""" + context = req.environ['nova.context'] + authorize_detail(context) + + return self._describe_availability_zones_verbose(context) class Availability_zone(extensions.ExtensionDescriptor): - """Add availability_zone to the Create Server v1.1 API.""" + """1. Add availability_zone to the Create Server v1.1 API. + 2. Add availability zones describing. + """ name = "AvailabilityZone" alias = "os-availability-zone" namespace = ("http://docs.openstack.org/compute/ext/" "availabilityzone/api/v1.1") - updated = "2012-08-09T00:00:00+00:00" + updated = "2012-12-21T00:00:00+00:00" + + def get_resources(self): + resources = [] + + res = extensions.ResourceExtension('os-availability-zone', + AvailabilityZoneController(), + collection_actions={'detail': 'GET'}) + resources.append(res) + + return resources diff --git a/nova/api/openstack/compute/contrib/baremetal_nodes.py b/nova/api/openstack/compute/contrib/baremetal_nodes.py new file mode 100644 index 000000000..38d66d2ae --- /dev/null +++ b/nova/api/openstack/compute/contrib/baremetal_nodes.py @@ -0,0 +1,210 @@ +# Copyright (c) 2013 NTT DOCOMO, INC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The bare-metal admin extension.""" + +import webob + +from nova.api.openstack import extensions +from nova.api.openstack import wsgi +from nova.api.openstack import xmlutil +from nova import exception +from nova.openstack.common import log as logging +from nova.virt.baremetal import db + +LOG = logging.getLogger(__name__) +authorize = extensions.extension_authorizer('compute', 'baremetal_nodes') + +node_fields = ['id', 'cpus', 'local_gb', 'memory_mb', 'pm_address', + 'pm_user', 'prov_mac_address', 'prov_vlan_id', + 'service_host', 'terminal_port', 'instance_uuid', + ] + +interface_fields = ['id', 'address', 'datapath_id', 'port_no'] + + +def _node_dict(node_ref): + d = {} + for f in node_fields: + d[f] = node_ref.get(f) + return d + + +def _interface_dict(interface_ref): + d = {} + for f in interface_fields: + d[f] = interface_ref.get(f) + return d + + +def _make_node_elem(elem): + for f in node_fields: + elem.set(f) + + +def _make_interface_elem(elem): + for f in interface_fields: + elem.set(f) + + +class NodeTemplate(xmlutil.TemplateBuilder): + def construct(self): + node_elem = xmlutil.TemplateElement('node', selector='node') + _make_node_elem(node_elem) + ifs_elem = xmlutil.TemplateElement('interfaces') + if_elem = xmlutil.SubTemplateElement(ifs_elem, 'interface', + selector='interfaces') + _make_interface_elem(if_elem) + node_elem.append(ifs_elem) + return xmlutil.MasterTemplate(node_elem, 1) + + +class NodesTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('nodes') + node_elem = xmlutil.SubTemplateElement(root, 'node', selector='nodes') + _make_node_elem(node_elem) + ifs_elem = xmlutil.TemplateElement('interfaces') + if_elem = xmlutil.SubTemplateElement(ifs_elem, 'interface', + selector='interfaces') + _make_interface_elem(if_elem) + node_elem.append(ifs_elem) + return xmlutil.MasterTemplate(root, 1) + + +class InterfaceTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('interface', selector='interface') + _make_interface_elem(root) + return xmlutil.MasterTemplate(root, 1) + + +class BareMetalNodeController(wsgi.Controller): + """The Bare-Metal Node API controller for the OpenStack API.""" + + @wsgi.serializers(xml=NodesTemplate) + def index(self, req): + context = req.environ['nova.context'] + authorize(context) + nodes_from_db = db.bm_node_get_all(context) + nodes = [] + for node_from_db in nodes_from_db: + try: + ifs = db.bm_interface_get_all_by_bm_node_id( + context, node_from_db['id']) + except exception.InstanceNotFound: + ifs = [] + node = _node_dict(node_from_db) + node['interfaces'] = [_interface_dict(i) for i in ifs] + nodes.append(node) + return {'nodes': nodes} + + @wsgi.serializers(xml=NodeTemplate) + def show(self, req, id): + context = req.environ['nova.context'] + authorize(context) + try: + node = db.bm_node_get(context, id) + except exception.InstanceNotFound: + raise webob.exc.HTTPNotFound + try: + ifs = db.bm_interface_get_all_by_bm_node_id(context, id) + except exception.InstanceNotFound: + ifs = [] + node = _node_dict(node) + node['interfaces'] = [_interface_dict(i) for i in ifs] + return {'node': node} + + @wsgi.serializers(xml=NodeTemplate) + def create(self, req, body): + context = req.environ['nova.context'] + authorize(context) + node = db.bm_node_create(context, body['node']) + node = _node_dict(node) + node['interfaces'] = [] + return {'node': node} + + def delete(self, req, id): + context = req.environ['nova.context'] + authorize(context) + try: + db.bm_node_destroy(context, id) + except exception.InstanceNotFound: + raise webob.exc.HTTPNotFound + return webob.Response(status_int=202) + + def _check_node_exists(self, context, node_id): + try: + db.bm_node_get(context, node_id) + except exception.InstanceNotFound: + raise webob.exc.HTTPNotFound + + @wsgi.serializers(xml=InterfaceTemplate) + @wsgi.action('add_interface') + def _add_interface(self, req, id, body): + context = req.environ['nova.context'] + authorize(context) + self._check_node_exists(context, id) + body = body['add_interface'] + address = body['address'] + datapath_id = body.get('datapath_id') + port_no = body.get('port_no') + if_id = db.bm_interface_create(context, + bm_node_id=id, + address=address, + datapath_id=datapath_id, + port_no=port_no) + if_ref = db.bm_interface_get(context, if_id) + return {'interface': _interface_dict(if_ref)} + + @wsgi.response(202) + @wsgi.action('remove_interface') + def _remove_interface(self, req, id, body): + context = req.environ['nova.context'] + authorize(context) + self._check_node_exists(context, id) + body = body['remove_interface'] + print "body(%s)" % body + if_id = body.get('id') + address = body.get('address') + if not if_id and not address: + raise webob.exc.HTTPBadRequest( + explanation=_("Must specify id or address")) + ifs = db.bm_interface_get_all_by_bm_node_id(context, id) + for i in ifs: + if if_id and if_id != i['id']: + continue + if address and address != i['address']: + continue + db.bm_interface_destroy(context, i['id']) + return webob.Response(status_int=202) + raise webob.exc.HTTPNotFound + + +class Baremetal_nodes(extensions.ExtensionDescriptor): + """Admin-only bare-metal node administration.""" + + name = "BareMetalNodes" + alias = "os-baremetal-nodes" + namespace = "http://docs.openstack.org/compute/ext/baremetal_nodes/api/v2" + updated = "2013-01-04T00:00:00+00:00" + + def get_resources(self): + resources = [] + res = extensions.ResourceExtension('os-baremetal-nodes', + BareMetalNodeController(), + member_actions={"action": "POST", }) + resources.append(res) + return resources diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py index f7f186870..93a07ec3f 100644 --- a/nova/api/openstack/compute/servers.py +++ b/nova/api/openstack/compute/servers.py @@ -906,6 +906,8 @@ class Controller(wsgi.Controller): raise exc.HTTPBadRequest(explanation=unicode(error)) except exception.InvalidMetadataSize as error: raise exc.HTTPRequestEntityTooLarge(explanation=unicode(error)) + except exception.InvalidRequest as error: + raise exc.HTTPBadRequest(explanation=unicode(error)) except exception.ImageNotFound as error: msg = _("Can not find requested image") raise exc.HTTPBadRequest(explanation=msg) diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index 733685b14..8b593d742 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -406,6 +406,8 @@ class XMLDictSerializer(DictSerializer): if k in attrs: result.setAttribute(k, str(v)) else: + if k == "deleted": + v = str(bool(v)) node = self._to_xml_node(doc, metadata, k, v) result.appendChild(node) else: diff --git a/nova/availability_zones.py b/nova/availability_zones.py index 62c83f6ed..09cbd98b8 100644 --- a/nova/availability_zones.py +++ b/nova/availability_zones.py @@ -60,3 +60,25 @@ def get_host_availability_zone(context, host): return list(metadata['availability_zone'])[0] else: return CONF.default_availability_zone + + +def get_availability_zones(context): + """Return available and unavailable zones.""" + enabled_services = db.service_get_all(context, False) + disabled_services = db.service_get_all(context, True) + enabled_services = set_availability_zones(context, enabled_services) + disabled_services = set_availability_zones(context, disabled_services) + + available_zones = [] + for zone in [service['availability_zone'] for service + in enabled_services]: + if not zone in available_zones: + available_zones.append(zone) + + not_available_zones = [] + zones = [service['available_zones'] for service in disabled_services + if service['available_zones'] not in available_zones] + for zone in zones: + if zone not in not_available_zones: + not_available_zones.append(zone) + return (available_zones, not_available_zones) diff --git a/nova/compute/api.py b/nova/compute/api.py index f6090b40c..765aeeef5 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -433,7 +433,11 @@ class API(base.Base): max_count = min_count block_device_mapping = block_device_mapping or [] - + if min_count > 1 or max_count > 1: + if any(map(lambda bdm: 'volume_id' in bdm, block_device_mapping)): + msg = _('Cannot attach one or more volumes to multiple' + ' instances') + raise exception.InvalidRequest(msg) if instance_type['disabled']: raise exception.InstanceTypeNotFound( instance_type_id=instance_type['id']) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 652248668..d1cffea7d 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -172,7 +172,6 @@ CONF.import_opt('allow_resize_to_same_host', 'nova.compute.api') CONF.import_opt('console_topic', 'nova.console.rpcapi') CONF.import_opt('host', 'nova.netconf') CONF.import_opt('my_ip', 'nova.netconf') -CONF.import_opt('network_manager', 'nova.service') QUOTAS = quota.QUOTAS @@ -301,8 +300,6 @@ class ComputeManager(manager.SchedulerDependentManager): self.driver = driver.load_compute_driver(self.virtapi, compute_driver) self.network_api = network.API() self.volume_api = volume.API() - self.network_manager = importutils.import_object( - CONF.network_manager, host=kwargs.get('host', None)) self._last_host_check = 0 self._last_bw_usage_poll = 0 self._last_vol_usage_poll = 0 @@ -3385,10 +3382,8 @@ class ComputeManager(manager.SchedulerDependentManager): LOG.exception(_("error during stop() in " "sync_power_state."), instance=db_instance) - elif vm_power_state in (power_state.PAUSED, - power_state.SUSPENDED): - LOG.warn(_("Instance is paused or suspended " - "unexpectedly. Calling " + elif vm_power_state == power_state.SUSPENDED: + LOG.warn(_("Instance is suspended unexpectedly. Calling " "the stop API."), instance=db_instance) try: self.compute_api.stop(context, db_instance) @@ -3396,6 +3391,16 @@ class ComputeManager(manager.SchedulerDependentManager): LOG.exception(_("error during stop() in " "sync_power_state."), instance=db_instance) + elif vm_power_state == power_state.PAUSED: + # Note(maoy): a VM may get into the paused state not only + # because the user request via API calls, but also + # due to (temporary) external instrumentations. + # Before the virt layer can reliably report the reason, + # we simply ignore the state discrepancy. In many cases, + # the VM state will go back to running after the external + # instrumentation is done. See bug 1097806 for details. + LOG.warn(_("Instance is paused unexpectedly. Ignore."), + instance=db_instance) elif vm_state == vm_states.STOPPED: if vm_power_state not in (power_state.NOSTATE, power_state.SHUTDOWN, diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 5317487cd..dff2e6b81 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -172,27 +172,43 @@ def model_query(context, model, *args, **kwargs): :param project_only: if present and context is user-type, then restrict query to match the context's project_id. If set to 'allow_none', restriction includes project_id = None. + :param base_model: Where model_query is passed a "model" parameter which is + not a subclass of NovaBase, we should pass an extra base_model + parameter that is a subclass of NovaBase and corresponds to the + model parameter. """ session = kwargs.get('session') or get_session() read_deleted = kwargs.get('read_deleted') or context.read_deleted project_only = kwargs.get('project_only', False) + def issubclassof_nova_base(obj): + return isinstance(obj, type) and issubclass(obj, models.NovaBase) + + base_model = model + if not issubclassof_nova_base(base_model): + base_model = kwargs.get('base_model', None) + if not issubclassof_nova_base(base_model): + raise Exception(_("model or base_model parameter should be " + "subclass of NovaBase")) + query = session.query(model, *args) + default_deleted_value = base_model.__mapper__.c.deleted.default.arg if read_deleted == 'no': - query = query.filter_by(deleted=False) + query = query.filter(base_model.deleted == default_deleted_value) elif read_deleted == 'yes': pass # omit the filter to include deleted and active elif read_deleted == 'only': - query = query.filter_by(deleted=True) + query = query.filter(base_model.deleted != default_deleted_value) else: - raise Exception( - _("Unrecognized read_deleted value '%s'") % read_deleted) + raise Exception(_("Unrecognized read_deleted value '%s'") + % read_deleted) if is_user_context(context) and project_only: if project_only == 'allow_none': - query = query.filter(or_(model.project_id == context.project_id, - model.project_id == None)) + query = query.\ + filter(or_(base_model.project_id == context.project_id, + base_model.project_id == None)) else: query = query.filter_by(project_id=context.project_id) @@ -408,7 +424,8 @@ def service_get_all_compute_sorted(context): label = 'instance_cores' subq = model_query(context, models.Instance.host, func.sum(models.Instance.vcpus).label(label), - session=session, read_deleted="no").\ + base_model=models.Instance, session=session, + read_deleted="no").\ group_by(models.Instance.host).\ subquery() return _service_get_all_topic_subquery(context, @@ -540,7 +557,7 @@ def _update_stats(context, new_stats, compute_id, session, prune_stats=False): # prune un-touched old stats: for stat in statmap.values(): session.add(stat) - stat.update({'deleted': True}) + stat.soft_delete(session=session) # add new and updated stats for stat in stats: @@ -563,10 +580,9 @@ def compute_node_update(context, compute_id, values, prune_stats=False): def compute_node_get_by_host(context, host): """Get all capacity entries for the given host.""" - result = model_query(context, models.ComputeNode).\ + result = model_query(context, models.ComputeNode, read_deleted="no").\ join('service').\ filter(models.Service.host == host).\ - filter_by(deleted=False).\ first() return result @@ -586,6 +602,7 @@ def compute_node_statistics(context): func.sum(models.ComputeNode.current_workload), func.sum(models.ComputeNode.running_vms), func.sum(models.ComputeNode.disk_available_least), + base_model=models.ComputeNode, read_deleted="no").first() # Build a dict of the info--making no assumptions about result @@ -660,7 +677,8 @@ def floating_ip_get(context, id): @require_context def floating_ip_get_pools(context): pools = [] - for result in model_query(context, models.FloatingIp.pool).distinct(): + for result in model_query(context, models.FloatingIp.pool, + base_model=models.FloatingIp).distinct(): pools.append({'name': result[0]}) return pools @@ -1094,30 +1112,31 @@ def fixed_ip_disassociate_all_by_timeout(context, host, time): # host; i.e. the network host or the instance # host matches. Two queries necessary because # join with update doesn't work. - host_filter = or_(and_(models.Instance.host == host, - models.Network.multi_host == True), - models.Network.host == host) - result = session.query(models.FixedIp.id).\ - filter(models.FixedIp.deleted == False).\ - filter(models.FixedIp.allocated == False).\ - filter(models.FixedIp.updated_at < time).\ - join((models.Network, - models.Network.id == models.FixedIp.network_id)).\ - join((models.Instance, - models.Instance.uuid == - models.FixedIp.instance_uuid)).\ - filter(host_filter).\ - all() - fixed_ip_ids = [fip[0] for fip in result] - if not fixed_ip_ids: - return 0 - result = model_query(context, models.FixedIp, session=session).\ - filter(models.FixedIp.id.in_(fixed_ip_ids)).\ - update({'instance_uuid': None, - 'leased': False, - 'updated_at': timeutils.utcnow()}, - synchronize_session='fetch') - return result + with session.begin(): + host_filter = or_(and_(models.Instance.host == host, + models.Network.multi_host == True), + models.Network.host == host) + result = model_query(context, models.FixedIp.id, + base_model=models.FixedIp, read_deleted="no", + session=session).\ + filter(models.FixedIp.allocated == False).\ + filter(models.FixedIp.updated_at < time).\ + join((models.Network, + models.Network.id == models.FixedIp.network_id)).\ + join((models.Instance, + models.Instance.uuid == models.FixedIp.instance_uuid)).\ + filter(host_filter).\ + all() + fixed_ip_ids = [fip[0] for fip in result] + if not fixed_ip_ids: + return 0 + result = model_query(context, models.FixedIp, session=session).\ + filter(models.FixedIp.id.in_(fixed_ip_ids)).\ + update({'instance_uuid': None, + 'leased': False, + 'updated_at': timeutils.utcnow()}, + synchronize_session='fetch') + return result @require_context @@ -1468,7 +1487,7 @@ def instance_data_get_for_project(context, project_id, session=None): func.count(models.Instance.id), func.sum(models.Instance.vcpus), func.sum(models.Instance.memory_mb), - read_deleted="no", + base_model=models.Instance, session=session).\ filter_by(project_id=project_id).\ first() @@ -1593,12 +1612,12 @@ def instance_get_all_by_filters(context, filters, sort_key, sort_dir, # Instances can be soft or hard deleted and the query needs to # include or exclude both if filters.pop('deleted'): - deleted = or_(models.Instance.deleted == True, + deleted = or_(models.Instance.deleted == models.Instance.id, models.Instance.vm_state == vm_states.SOFT_DELETED) query_prefix = query_prefix.filter(deleted) else: query_prefix = query_prefix.\ - filter_by(deleted=False).\ + filter_by(deleted=0).\ filter(models.Instance.vm_state != vm_states.SOFT_DELETED) if not context.is_admin: @@ -2122,19 +2141,21 @@ def network_create_safe(context, values): def network_delete_safe(context, network_id): session = get_session() with session.begin(): - result = session.query(models.FixedIp).\ + result = model_query(context, models.FixedIp, session=session, + read_deleted="no").\ filter_by(network_id=network_id).\ - filter_by(deleted=False).\ filter_by(allocated=True).\ count() if result != 0: raise exception.NetworkInUse(network_id=network_id) network_ref = network_get(context, network_id=network_id, session=session) - session.query(models.FixedIp).\ + + model_query(context, models.FixedIp, session=session, + read_deleted="no").\ filter_by(network_id=network_id).\ - filter_by(deleted=False).\ soft_delete() + session.delete(network_ref) @@ -2213,9 +2234,9 @@ def network_get_associated_fixed_ips(context, network_id, host=None): # without regenerating the whole list vif_and = and_(models.VirtualInterface.id == models.FixedIp.virtual_interface_id, - models.VirtualInterface.deleted == False) + models.VirtualInterface.deleted == 0) inst_and = and_(models.Instance.uuid == models.FixedIp.instance_uuid, - models.Instance.deleted == False) + models.Instance.deleted == 0) session = get_session() query = session.query(models.FixedIp.address, models.FixedIp.instance_uuid, @@ -2225,7 +2246,7 @@ def network_get_associated_fixed_ips(context, network_id, host=None): models.Instance.hostname, models.Instance.updated_at, models.Instance.created_at).\ - filter(models.FixedIp.deleted == False).\ + filter(models.FixedIp.deleted == 0).\ filter(models.FixedIp.network_id == network_id).\ filter(models.FixedIp.allocated == True).\ join((models.VirtualInterface, vif_and)).\ @@ -2326,6 +2347,7 @@ def network_get_all_by_host(context, host): fixed_host_filter = or_(models.FixedIp.host == host, models.Instance.host == host) fixed_ip_query = model_query(context, models.FixedIp.network_id, + base_model=models.FixedIp, session=session).\ outerjoin((models.VirtualInterface, models.VirtualInterface.id == @@ -3138,13 +3160,14 @@ def security_group_in_use(context, group_id): with session.begin(): # Are there any instances that haven't been deleted # that include this group? - inst_assoc = session.query(models.SecurityGroupInstanceAssociation).\ - filter_by(security_group_id=group_id).\ - filter_by(deleted=False).\ - all() + inst_assoc = model_query(context, + models.SecurityGroupInstanceAssociation, + read_deleted="no", session=session).\ + filter_by(security_group_id=group_id).\ + all() for ia in inst_assoc: - num_instances = session.query(models.Instance).\ - filter_by(deleted=False).\ + num_instances = model_query(context, models.Instance, + session=session, read_deleted="no").\ filter_by(uuid=ia.instance_uuid).\ count() if num_instances: @@ -3595,7 +3618,7 @@ def instance_type_get_all(context, inactive=False, filters=None): if filters['is_public'] and context.project_id is not None: the_filter.extend([ models.InstanceTypes.projects.any( - project_id=context.project_id, deleted=False) + project_id=context.project_id, deleted=0) ]) if len(the_filter) > 1: query = query.filter(or_(*the_filter)) @@ -4037,7 +4060,8 @@ def _instance_type_extra_specs_get_query(context, flavor_id, session=None): # Two queries necessary because join with update doesn't work. t = model_query(context, models.InstanceTypes.id, - session=session, read_deleted="no").\ + base_model=models.InstanceTypes, session=session, + read_deleted="no").\ filter(models.InstanceTypes.flavorid == flavor_id).\ subquery() return model_query(context, models.InstanceTypeExtraSpecs, @@ -4091,6 +4115,7 @@ def instance_type_extra_specs_update_or_create(context, flavor_id, specs): session = get_session() with session.begin(): instance_type_id = model_query(context, models.InstanceTypes.id, + base_model=models.InstanceTypes, session=session, read_deleted="no").\ filter(models.InstanceTypes.flavorid == flavor_id).\ first() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/151_change_task_log_column_type.py b/nova/db/sqlalchemy/migrate_repo/versions/151_change_task_log_column_type.py new file mode 100644 index 000000000..44c3aa41f --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/151_change_task_log_column_type.py @@ -0,0 +1,52 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (C) 2013 Wenhao Xu <xuwenhao2008@gmail.com>. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, String, Table, DateTime +from sqlalchemy.dialects import postgresql + + +def upgrade(migrate_engine): + """Convert period_beginning and period_ending to DateTime.""" + meta = MetaData() + meta.bind = migrate_engine + dialect = migrate_engine.url.get_dialect() + + if dialect is postgresql.dialect: + # We need to handle postresql specially. + # Can't use migrate's alter() because it does not support + # explicit casting + for column in ('period_beginning', 'period_ending'): + migrate_engine.execute( + "ALTER TABLE task_log " + "ALTER COLUMN %s TYPE TIMESTAMP WITHOUT TIME ZONE " + "USING %s::TIMESTAMP WITHOUT TIME ZONE" + % (column, column)) + else: + migrations = Table('task_log', meta, autoload=True) + migrations.c.period_beginning.alter(DateTime) + migrations.c.period_ending.alter(DateTime) + + +def downgrade(migrate_engine): + """Convert columns back to String(255).""" + meta = MetaData() + meta.bind = migrate_engine + + # don't need to handle postgresql here. + migrations = Table('task_log', meta, autoload=True) + migrations.c.period_beginning.alter(String(255)) + migrations.c.period_ending.alter(String(255)) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/152_change_type_of_deleted_column.py b/nova/db/sqlalchemy/migrate_repo/versions/152_change_type_of_deleted_column.py new file mode 100644 index 000000000..d4bd991f7 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/152_change_type_of_deleted_column.py @@ -0,0 +1,226 @@ +from sqlalchemy import CheckConstraint +from sqlalchemy.engine import reflection +from sqlalchemy.ext.compiler import compiles +from sqlalchemy import MetaData, Table, Column, Index +from sqlalchemy import select +from sqlalchemy.sql.expression import UpdateBase +from sqlalchemy.sql import literal_column +from sqlalchemy import String, Integer, Boolean +from sqlalchemy.types import NullType, BigInteger + + +all_tables = ['services', 'compute_nodes', 'compute_node_stats', + 'certificates', 'instances', 'instance_info_caches', + 'instance_types', 'volumes', 'quotas', 'quota_classes', + 'quota_usages', 'reservations', 'snapshots', + 'block_device_mapping', 'iscsi_targets', + 'security_group_instance_association', 'security_groups', + 'security_group_rules', 'provider_fw_rules', 'key_pairs', + 'migrations', 'networks', 'virtual_interfaces', 'fixed_ips', + 'floating_ips', 'console_pools', 'consoles', + 'instance_metadata', 'instance_system_metadata', + 'instance_type_projects', 'instance_type_extra_specs', + 'aggregate_hosts', 'aggregate_metadata', 'aggregates', + 'agent_builds', 's3_images', + 'instance_faults', + 'bw_usage_cache', 'volume_id_mappings', 'snapshot_id_mappings', + 'instance_id_mappings', 'volume_usage_cache', 'task_log', + 'instance_actions', 'instance_actions_events'] +# note(boris-42): We can't do migration for the dns_domains table because it +# doesn't have `id` column. + + +class InsertFromSelect(UpdateBase): + def __init__(self, table, select): + self.table = table + self.select = select + + +@compiles(InsertFromSelect) +def visit_insert_from_select(element, compiler, **kw): + return "INSERT INTO %s %s" % ( + compiler.process(element.table, asfrom=True), + compiler.process(element.select)) + + +def get_default_deleted_value(table): + if isinstance(table.c.id.type, Integer): + return 0 + # NOTE(boris-42): There is only one other type that is used as id (String) + return "" + + +def upgrade_enterprise_dbs(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + for table_name in all_tables: + table = Table(table_name, meta, autoload=True) + + new_deleted = Column('new_deleted', table.c.id.type, + default=get_default_deleted_value(table)) + new_deleted.create(table, populate_default=True) + + table.update().\ + where(table.c.deleted == True).\ + values(new_deleted=table.c.id).\ + execute() + table.c.deleted.drop() + table.c.new_deleted.alter(name="deleted") + + +def upgrade(migrate_engine): + if migrate_engine.name != "sqlite": + return upgrade_enterprise_dbs(migrate_engine) + + # NOTE(boris-42): sqlaclhemy-migrate can't drop column with check + # constraints in sqlite DB and our `deleted` column has + # 2 check constraints. So there is only one way to remove + # these constraints: + # 1) Create new table with the same columns, constraints + # and indexes. (except deleted column). + # 2) Copy all data from old to new table. + # 3) Drop old table. + # 4) Rename new table to old table name. + insp = reflection.Inspector.from_engine(migrate_engine) + meta = MetaData() + meta.bind = migrate_engine + + for table_name in all_tables: + table = Table(table_name, meta, autoload=True) + default_deleted_value = get_default_deleted_value(table) + + columns = [] + for column in table.columns: + column_copy = None + if column.name != "deleted": + # NOTE(boris-42): BigInteger is not supported by sqlite, so + # after copy it will have NullType, other + # types that are used in Nova are supported by + # sqlite. + if isinstance(column.type, NullType): + column_copy = Column(column.name, BigInteger(), default=0) + else: + column_copy = column.copy() + else: + column_copy = Column('deleted', table.c.id.type, + default=default_deleted_value) + columns.append(column_copy) + + def is_deleted_column_constraint(constraint): + # NOTE(boris-42): There is no other way to check is CheckConstraint + # associated with deleted column. + if not isinstance(constraint, CheckConstraint): + return False + sqltext = str(constraint.sqltext) + return (sqltext.endswith("deleted in (0, 1)") or + sqltext.endswith("deleted IN (:deleted_1, :deleted_2)")) + + constraints = [] + for constraint in table.constraints: + if not is_deleted_column_constraint(constraint): + constraints.append(constraint.copy()) + + new_table = Table(table_name + "__tmp__", meta, + *(columns + constraints)) + new_table.create() + + indexes = [] + for index in insp.get_indexes(table_name): + column_names = [new_table.c[c] for c in index['column_names']] + indexes.append(Index(index["name"], + *column_names, + unique=index["unique"])) + + ins = InsertFromSelect(new_table, table.select()) + migrate_engine.execute(ins) + + table.drop() + [index.create(migrate_engine) for index in indexes] + + new_table.rename(table_name) + new_table.update().\ + where(new_table.c.deleted == True).\ + values(deleted=new_table.c.id).\ + execute() + + # NOTE(boris-42): Fix value of deleted column: False -> "" or 0. + new_table.update().\ + where(new_table.c.deleted == False).\ + values(deleted=default_deleted_value).\ + execute() + + +def downgrade_enterprise_dbs(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + + for table_name in all_tables: + table = Table(table_name, meta, autoload=True) + + old_deleted = Column('old_deleted', Boolean, default=False) + old_deleted.create(table, populate_default=False) + + table.update().\ + where(table.c.deleted == table.c.id).\ + values(old_deleted=True).\ + execute() + + table.c.deleted.drop() + table.c.old_deleted.alter(name="deleted") + + +def downgrade(migrate_engine): + if migrate_engine.name != "sqlite": + return downgrade_enterprise_dbs(migrate_engine) + + insp = reflection.Inspector.from_engine(migrate_engine) + meta = MetaData() + meta.bind = migrate_engine + + for table_name in all_tables: + table = Table(table_name, meta, autoload=True) + + columns = [] + for column in table.columns: + column_copy = None + if column.name != "deleted": + if isinstance(column.type, NullType): + column_copy = Column(column.name, BigInteger(), default=0) + else: + column_copy = column.copy() + else: + column_copy = Column('deleted', Boolean, default=0) + columns.append(column_copy) + + constraints = [constraint.copy() for constraint in table.constraints] + + new_table = Table(table_name + "__tmp__", meta, + *(columns + constraints)) + new_table.create() + + indexes = [] + for index in insp.get_indexes(table_name): + column_names = [new_table.c[c] for c in index['column_names']] + indexes.append(Index(index["name"], + *column_names, + unique=index["unique"])) + + c_select = [] + for c in table.c: + if c.name != "deleted": + c_select.append(c) + else: + c_select.append(table.c.deleted == table.c.id) + + ins = InsertFromSelect(new_table, select(c_select)) + migrate_engine.execute(ins) + + table.drop() + [index.create(migrate_engine) for index in indexes] + + new_table.rename(table_name) + new_table.update().\ + where(new_table.c.deleted == new_table.c.id).\ + values(deleted=True).\ + execute() diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 5050cb77e..14c651020 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -42,7 +42,7 @@ class NovaBase(object): created_at = Column(DateTime, default=timeutils.utcnow) updated_at = Column(DateTime, onupdate=timeutils.utcnow) deleted_at = Column(DateTime) - deleted = Column(Boolean, default=False) + deleted = Column(Integer, default=0) metadata = None def save(self, session=None): @@ -63,7 +63,7 @@ class NovaBase(object): def soft_delete(self, session=None): """Mark this object as deleted.""" - self.deleted = True + self.deleted = self.id self.deleted_at = timeutils.utcnow() self.save(session=session) @@ -129,7 +129,7 @@ class ComputeNode(BASE, NovaBase): foreign_keys=service_id, primaryjoin='and_(' 'ComputeNode.service_id == Service.id,' - 'ComputeNode.deleted == False)') + 'ComputeNode.deleted == 0)') vcpus = Column(Integer) memory_mb = Column(Integer) @@ -173,7 +173,7 @@ class ComputeNodeStat(BASE, NovaBase): compute_node_id = Column(Integer, ForeignKey('compute_nodes.id')) primary_join = ('and_(ComputeNodeStat.compute_node_id == ' - 'ComputeNode.id, ComputeNodeStat.deleted == False)') + 'ComputeNode.id, ComputeNodeStat.deleted == 0)') stats = relationship("ComputeNode", backref="stats", primaryjoin=primary_join) @@ -358,6 +358,7 @@ class Volume(BASE, NovaBase): """Represents a block storage device that can be attached to a VM.""" __tablename__ = 'volumes' id = Column(String(36), primary_key=True) + deleted = Column(String(36), default="") @property def name(self): @@ -465,13 +466,14 @@ class Reservation(BASE, NovaBase): "QuotaUsage", foreign_keys=usage_id, primaryjoin='and_(Reservation.usage_id == QuotaUsage.id,' - 'QuotaUsage.deleted == False)') + 'QuotaUsage.deleted == 0)') class Snapshot(BASE, NovaBase): """Represents a block storage device that can be attached to a VM.""" __tablename__ = 'snapshots' id = Column(String(36), primary_key=True) + deleted = Column(String(36), default="") @property def name(self): @@ -507,7 +509,7 @@ class BlockDeviceMapping(BASE, NovaBase): 'instance_uuid==' 'Instance.uuid,' 'BlockDeviceMapping.deleted==' - 'False)') + '0)') device_name = Column(String(255), nullable=False) # default=False for compatibility of the existing code. @@ -542,7 +544,7 @@ class IscsiTarget(BASE, NovaBase): backref=backref('iscsi_target', uselist=False), foreign_keys=volume_id, primaryjoin='and_(IscsiTarget.volume_id==Volume.id,' - 'IscsiTarget.deleted==False)') + 'IscsiTarget.deleted==0)') class SecurityGroupInstanceAssociation(BASE, NovaBase): @@ -567,14 +569,14 @@ class SecurityGroup(BASE, NovaBase): primaryjoin='and_(' 'SecurityGroup.id == ' 'SecurityGroupInstanceAssociation.security_group_id,' - 'SecurityGroupInstanceAssociation.deleted == False,' - 'SecurityGroup.deleted == False)', + 'SecurityGroupInstanceAssociation.deleted == 0,' + 'SecurityGroup.deleted == 0)', secondaryjoin='and_(' 'SecurityGroupInstanceAssociation.instance_uuid == Instance.uuid,' # (anthony) the condition below shouldn't be necessary now that the # association is being marked as deleted. However, removing this # may cause existing deployments to choke, so I'm leaving it - 'Instance.deleted == False)', + 'Instance.deleted == 0)', backref='security_groups') @@ -588,7 +590,7 @@ class SecurityGroupIngressRule(BASE, NovaBase): foreign_keys=parent_group_id, primaryjoin='and_(' 'SecurityGroupIngressRule.parent_group_id == SecurityGroup.id,' - 'SecurityGroupIngressRule.deleted == False)') + 'SecurityGroupIngressRule.deleted == 0)') protocol = Column(String(5)) # "tcp", "udp", or "icmp" from_port = Column(Integer) @@ -602,7 +604,7 @@ class SecurityGroupIngressRule(BASE, NovaBase): foreign_keys=group_id, primaryjoin='and_(' 'SecurityGroupIngressRule.group_id == SecurityGroup.id,' - 'SecurityGroupIngressRule.deleted == False)') + 'SecurityGroupIngressRule.deleted == 0)') class ProviderFirewallRule(BASE, NovaBase): @@ -651,7 +653,7 @@ class Migration(BASE, NovaBase): instance = relationship("Instance", foreign_keys=instance_uuid, primaryjoin='and_(Migration.instance_uuid == ' 'Instance.uuid, Instance.deleted == ' - 'False)') + '0)') class Network(BASE, NovaBase): @@ -735,6 +737,7 @@ class FloatingIp(BASE, NovaBase): class DNSDomain(BASE, NovaBase): """Represents a DNS domain with availability zone or project info.""" __tablename__ = 'dns_domains' + deleted = Column(Boolean, default=False) domain = Column(String(512), primary_key=True) scope = Column(String(255)) availability_zone = Column(String(255)) @@ -779,7 +782,7 @@ class InstanceMetadata(BASE, NovaBase): primaryjoin='and_(' 'InstanceMetadata.instance_uuid == ' 'Instance.uuid,' - 'InstanceMetadata.deleted == False)') + 'InstanceMetadata.deleted == 0)') class InstanceSystemMetadata(BASE, NovaBase): @@ -793,7 +796,7 @@ class InstanceSystemMetadata(BASE, NovaBase): nullable=False) primary_join = ('and_(InstanceSystemMetadata.instance_uuid == ' - 'Instance.uuid, InstanceSystemMetadata.deleted == False)') + 'Instance.uuid, InstanceSystemMetadata.deleted == 0)') instance = relationship(Instance, backref="system_metadata", foreign_keys=instance_uuid, primaryjoin=primary_join) @@ -811,7 +814,7 @@ class InstanceTypeProjects(BASE, NovaBase): foreign_keys=instance_type_id, primaryjoin='and_(' 'InstanceTypeProjects.instance_type_id == InstanceTypes.id,' - 'InstanceTypeProjects.deleted == False)') + 'InstanceTypeProjects.deleted == 0)') class InstanceTypeExtraSpecs(BASE, NovaBase): @@ -826,7 +829,7 @@ class InstanceTypeExtraSpecs(BASE, NovaBase): foreign_keys=instance_type_id, primaryjoin='and_(' 'InstanceTypeExtraSpecs.instance_type_id == InstanceTypes.id,' - 'InstanceTypeExtraSpecs.deleted == False)') + 'InstanceTypeExtraSpecs.deleted == 0)') class Cell(BASE, NovaBase): @@ -880,24 +883,24 @@ class Aggregate(BASE, NovaBase): secondary="aggregate_hosts", primaryjoin='and_(' 'Aggregate.id == AggregateHost.aggregate_id,' - 'AggregateHost.deleted == False,' - 'Aggregate.deleted == False)', + 'AggregateHost.deleted == 0,' + 'Aggregate.deleted == 0)', secondaryjoin='and_(' 'AggregateHost.aggregate_id == Aggregate.id, ' - 'AggregateHost.deleted == False,' - 'Aggregate.deleted == False)', + 'AggregateHost.deleted == 0,' + 'Aggregate.deleted == 0)', backref='aggregates') _metadata = relationship(AggregateMetadata, secondary="aggregate_metadata", primaryjoin='and_(' 'Aggregate.id == AggregateMetadata.aggregate_id,' - 'AggregateMetadata.deleted == False,' - 'Aggregate.deleted == False)', + 'AggregateMetadata.deleted == 0,' + 'Aggregate.deleted == 0)', secondaryjoin='and_(' 'AggregateMetadata.aggregate_id == Aggregate.id, ' - 'AggregateMetadata.deleted == False,' - 'Aggregate.deleted == False)', + 'AggregateMetadata.deleted == 0,' + 'Aggregate.deleted == 0)', backref='aggregates') def _extra_keys(self): @@ -1038,8 +1041,8 @@ class TaskLog(BASE, NovaBase): task_name = Column(String(255), nullable=False) state = Column(String(255), nullable=False) host = Column(String(255)) - period_beginning = Column(String(255), default=timeutils.utcnow) - period_ending = Column(String(255), default=timeutils.utcnow) + period_beginning = Column(DateTime, default=timeutils.utcnow) + period_ending = Column(DateTime, default=timeutils.utcnow) message = Column(String(255), nullable=False) task_items = Column(Integer(), default=0) errors = Column(Integer(), default=0) diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py index 9c896ae97..cfabc7085 100644 --- a/nova/db/sqlalchemy/session.py +++ b/nova/db/sqlalchemy/session.py @@ -536,7 +536,7 @@ def create_engine(sql_connection): class Query(sqlalchemy.orm.query.Query): """Subclass of sqlalchemy.query with soft_delete() method.""" def soft_delete(self, synchronize_session='evaluate'): - return self.update({'deleted': True, + return self.update({'deleted': literal_column('id'), 'updated_at': literal_column('updated_at'), 'deleted_at': timeutils.utcnow()}, synchronize_session=synchronize_session) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 4fefb2db4..a9b44e94a 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -1124,6 +1124,40 @@ def _create_veth_pair(dev1_name, dev2_name): run_as_root=True) +def create_ovs_vif_port(bridge, dev, iface_id, mac, instance_id): + utils.execute('ovs-vsctl', '--', '--may-exist', 'add-port', + bridge, dev, + '--', 'set', 'Interface', dev, + 'external-ids:iface-id=%s' % iface_id, + 'external-ids:iface-status=active', + 'external-ids:attached-mac=%s' % mac, + 'external-ids:vm-uuid=%s' % instance_id, + run_as_root=True) + + +def delete_ovs_vif_port(bridge, dev): + utils.execute('ovs-vsctl', 'del-port', bridge, dev, + run_as_root=True) + utils.execute('ip', 'link', 'delete', dev, + run_as_root=True) + + +def create_tap_dev(dev, mac_address=None): + if not device_exists(dev): + try: + # First, try with 'ip' + utils.execute('ip', 'tuntap', 'add', dev, 'mode', 'tap', + run_as_root=True, check_exit_code=[0, 2, 254]) + except exception.ProcessExecutionError: + # Second option: tunctl + utils.execute('tunctl', '-b', '-t', dev, run_as_root=True) + if mac_address: + utils.execute('ip', 'link', 'set', dev, 'address', mac_address, + run_as_root=True, check_exit_code=[0, 2, 254]) + utils.execute('ip', 'link', 'set', dev, 'up', run_as_root=True, + check_exit_code=[0, 2, 254]) + + # Similar to compute virt layers, the Linux network node # code uses a flexible driver model to support different ways # of creating ethernet interfaces and attaching them to the network. @@ -1535,7 +1569,7 @@ class QuantumLinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver): iptables_manager.ipv4['filter'].add_rule('FORWARD', '--out-interface %s -j ACCEPT' % bridge) - QuantumLinuxBridgeInterfaceDriver.create_tap_dev(dev, mac_address) + create_tap_dev(dev, mac_address) if not device_exists(bridge): LOG.debug(_("Starting bridge %s "), bridge) @@ -1570,22 +1604,6 @@ class QuantumLinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver): LOG.debug(_("Unplugged gateway interface '%s'"), dev) return dev - @classmethod - def create_tap_dev(_self, dev, mac_address=None): - if not device_exists(dev): - try: - # First, try with 'ip' - utils.execute('ip', 'tuntap', 'add', dev, 'mode', 'tap', - run_as_root=True, check_exit_code=[0, 2, 254]) - except exception.ProcessExecutionError: - # Second option: tunctl - utils.execute('tunctl', '-b', '-t', dev, run_as_root=True) - if mac_address: - utils.execute('ip', 'link', 'set', dev, 'address', mac_address, - run_as_root=True, check_exit_code=[0, 2, 254]) - utils.execute('ip', 'link', 'set', dev, 'up', run_as_root=True, - check_exit_code=[0, 2, 254]) - def get_dev(self, network): dev = self.GATEWAY_INTERFACE_PREFIX + str(network['uuid'][0:11]) return dev diff --git a/nova/network/manager.py b/nova/network/manager.py index 9ca7680a5..96f17a855 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -929,7 +929,7 @@ class NetworkManager(manager.SchedulerDependentManager): The one at a time part is to flatten the layout to help scale """ - RPC_API_VERSION = '1.6' + RPC_API_VERSION = '1.7' # If True, this manager requires VIF to create a bridge. SHOULD_CREATE_BRIDGE = False diff --git a/nova/network/model.py b/nova/network/model.py index 0771156c1..9accb883e 100644 --- a/nova/network/model.py +++ b/nova/network/model.py @@ -207,7 +207,7 @@ class Network(Model): class VIF(Model): """Represents a Virtual Interface in Nova.""" def __init__(self, id=None, address=None, network=None, type=None, - devname=None, **kwargs): + devname=None, ovs_interfaceid=None, **kwargs): super(VIF, self).__init__() self['id'] = id @@ -216,6 +216,8 @@ class VIF(Model): self['type'] = type self['devname'] = devname + self['ovs_interfaceid'] = ovs_interfaceid + self._set_meta(kwargs) def __eq__(self, other): @@ -381,6 +383,7 @@ class NetworkInfo(list): 'vif_type': vif['type'], 'vif_devname': vif.get('devname'), 'vif_uuid': vif['id'], + 'ovs_interfaceid': vif.get('ovs_interfaceid'), 'rxtx_cap': vif.get_meta('rxtx_cap', 0), 'dns': [get_ip(ip) for ip in subnet_v4['dns']], 'ips': [fixed_ip_dict(ip, subnet) diff --git a/nova/network/quantumv2/api.py b/nova/network/quantumv2/api.py index 29e5e2f06..704ed5cef 100644 --- a/nova/network/quantumv2/api.py +++ b/nova/network/quantumv2/api.py @@ -661,11 +661,13 @@ class API(base.Base): if fixed_ip.is_in_subnet(subnet)] bridge = None + ovs_interfaceid = None vif_type = port.get('binding:vif_type') # TODO(berrange) Quantum should pass the bridge name # in another binding metadata field if vif_type == network_model.VIF_TYPE_OVS: bridge = CONF.quantum_ovs_bridge + ovs_interfaceid = port['id'] elif vif_type == network_model.VIF_TYPE_BRIDGE: bridge = "brq" + port['network_id'] @@ -688,6 +690,7 @@ class API(base.Base): address=port['mac_address'], network=network, type=port.get('binding:vif_type'), + ovs_interfaceid=ovs_interfaceid, devname=devname)) return nw_info diff --git a/nova/openstack/common/cfg.py b/nova/openstack/common/cfg.py index ad1f2a8a6..534a610c0 100644 --- a/nova/openstack/common/cfg.py +++ b/nova/openstack/common/cfg.py @@ -217,7 +217,7 @@ log files:: ... ] -This module also contains a global instance of the CommonConfigOpts class +This module also contains a global instance of the ConfigOpts class in order to support a common usage pattern in OpenStack:: from nova.openstack.common import cfg @@ -236,10 +236,11 @@ in order to support a common usage pattern in OpenStack:: Positional command line arguments are supported via a 'positional' Opt constructor argument:: - >>> CONF.register_cli_opt(MultiStrOpt('bar', positional=True)) + >>> conf = ConfigOpts() + >>> conf.register_cli_opt(MultiStrOpt('bar', positional=True)) True - >>> CONF(['a', 'b']) - >>> CONF.bar + >>> conf(['a', 'b']) + >>> conf.bar ['a', 'b'] It is also possible to use argparse "sub-parsers" to parse additional @@ -249,10 +250,11 @@ command line arguments using the SubCommandOpt class: ... list_action = subparsers.add_parser('list') ... list_action.add_argument('id') ... - >>> CONF.register_cli_opt(SubCommandOpt('action', handler=add_parsers)) + >>> conf = ConfigOpts() + >>> conf.register_cli_opt(SubCommandOpt('action', handler=add_parsers)) True - >>> CONF(['list', '10']) - >>> CONF.action.name, CONF.action.id + >>> conf(args=['list', '10']) + >>> conf.action.name, conf.action.id ('list', '10') """ @@ -1726,62 +1728,4 @@ class ConfigOpts(collections.Mapping): return value -class CommonConfigOpts(ConfigOpts): - - DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s" - DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" - - common_cli_opts = [ - BoolOpt('debug', - short='d', - default=False, - help='Print debugging output'), - BoolOpt('verbose', - short='v', - default=False, - help='Print more verbose output'), - ] - - logging_cli_opts = [ - StrOpt('log-config', - metavar='PATH', - help='If this option is specified, the logging configuration ' - 'file specified is used and overrides any other logging ' - 'options specified. Please see the Python logging module ' - 'documentation for details on logging configuration ' - 'files.'), - StrOpt('log-format', - default=DEFAULT_LOG_FORMAT, - metavar='FORMAT', - help='A logging.Formatter log message format string which may ' - 'use any of the available logging.LogRecord attributes. ' - 'Default: %(default)s'), - StrOpt('log-date-format', - default=DEFAULT_LOG_DATE_FORMAT, - metavar='DATE_FORMAT', - help='Format string for %%(asctime)s in log records. ' - 'Default: %(default)s'), - StrOpt('log-file', - metavar='PATH', - deprecated_name='logfile', - help='(Optional) Name of log file to output to. ' - 'If not set, logging will go to stdout.'), - StrOpt('log-dir', - deprecated_name='logdir', - help='(Optional) The directory to keep log files in ' - '(will be prepended to --log-file)'), - BoolOpt('use-syslog', - default=False, - help='Use syslog for logging.'), - StrOpt('syslog-log-facility', - default='LOG_USER', - help='syslog facility to receive log lines') - ] - - def __init__(self): - super(CommonConfigOpts, self).__init__() - self.register_cli_opts(self.common_cli_opts) - self.register_cli_opts(self.logging_cli_opts) - - -CONF = CommonConfigOpts() +CONF = ConfigOpts() diff --git a/nova/openstack/common/iniparser.py b/nova/openstack/common/iniparser.py index 241284449..9bf399f0c 100644 --- a/nova/openstack/common/iniparser.py +++ b/nova/openstack/common/iniparser.py @@ -54,7 +54,7 @@ class BaseParser(object): value = value.strip() if ((value and value[0] == value[-1]) and - (value[0] == "\"" or value[0] == "'")): + (value[0] == "\"" or value[0] == "'")): value = value[1:-1] return key.strip(), [value] diff --git a/nova/openstack/common/log.py b/nova/openstack/common/log.py index 5c6dbcf14..32513bb32 100644 --- a/nova/openstack/common/log.py +++ b/nova/openstack/common/log.py @@ -47,6 +47,67 @@ from nova.openstack.common import local from nova.openstack.common import notifier +_DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s" +_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" + +common_cli_opts = [ + cfg.BoolOpt('debug', + short='d', + default=False, + help='Print debugging output (set logging level to ' + 'DEBUG instead of default WARNING level).'), + cfg.BoolOpt('verbose', + short='v', + default=False, + help='Print more verbose output (set logging level to ' + 'INFO instead of default WARNING level).'), +] + +logging_cli_opts = [ + cfg.StrOpt('log-config', + metavar='PATH', + help='If this option is specified, the logging configuration ' + 'file specified is used and overrides any other logging ' + 'options specified. Please see the Python logging module ' + 'documentation for details on logging configuration ' + 'files.'), + cfg.StrOpt('log-format', + default=_DEFAULT_LOG_FORMAT, + metavar='FORMAT', + help='A logging.Formatter log message format string which may ' + 'use any of the available logging.LogRecord attributes. ' + 'Default: %(default)s'), + cfg.StrOpt('log-date-format', + default=_DEFAULT_LOG_DATE_FORMAT, + metavar='DATE_FORMAT', + help='Format string for %%(asctime)s in log records. ' + 'Default: %(default)s'), + cfg.StrOpt('log-file', + metavar='PATH', + deprecated_name='logfile', + help='(Optional) Name of log file to output to. ' + 'If not set, logging will go to stdout.'), + cfg.StrOpt('log-dir', + deprecated_name='logdir', + help='(Optional) The directory to keep log files in ' + '(will be prepended to --log-file)'), + cfg.BoolOpt('use-syslog', + default=False, + help='Use syslog for logging.'), + cfg.StrOpt('syslog-log-facility', + default='LOG_USER', + help='syslog facility to receive log lines') +] + +generic_log_opts = [ + cfg.BoolOpt('use_stderr', + default=True, + help='Log output to standard error'), + cfg.StrOpt('logfile_mode', + default='0644', + help='Default file mode used when creating log files'), +] + log_opts = [ cfg.StrOpt('logging_context_format_string', default='%(asctime)s.%(msecs)03d %(levelname)s %(name)s ' @@ -94,24 +155,9 @@ log_opts = [ 'format it like this'), ] - -generic_log_opts = [ - cfg.StrOpt('logdir', - default=None, - help='Log output to a per-service log file in named directory'), - cfg.StrOpt('logfile', - default=None, - help='Log output to a named file'), - cfg.BoolOpt('use_stderr', - default=True, - help='Log output to standard error'), - cfg.StrOpt('logfile_mode', - default='0644', - help='Default file mode used when creating log files'), -] - - CONF = cfg.CONF +CONF.register_cli_opts(common_cli_opts) +CONF.register_cli_opts(logging_cli_opts) CONF.register_opts(generic_log_opts) CONF.register_opts(log_opts) @@ -149,8 +195,8 @@ def _get_binary_name(): def _get_log_file_path(binary=None): - logfile = CONF.log_file or CONF.logfile - logdir = CONF.log_dir or CONF.logdir + logfile = CONF.log_file + logdir = CONF.log_dir if logfile and not logdir: return logfile diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index 09de10388..16714a5ff 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -23,7 +23,6 @@ Scheduler base class that all Schedulers should inherit from import sys -from nova.compute import api as compute_api from nova.compute import power_state from nova.compute import rpcapi as compute_rpcapi from nova.compute import utils as compute_utils @@ -115,7 +114,6 @@ class Scheduler(object): def __init__(self): self.host_manager = importutils.import_object( CONF.scheduler_host_manager) - self.compute_api = compute_api.API() self.compute_rpcapi = compute_rpcapi.ComputeAPI() self.servicegroup_api = servicegroup.API() diff --git a/nova/scheduler/filter_scheduler.py b/nova/scheduler/filter_scheduler.py index 07a3f578a..9384e1495 100644 --- a/nova/scheduler/filter_scheduler.py +++ b/nova/scheduler/filter_scheduler.py @@ -47,15 +47,15 @@ class FilterScheduler(driver.Scheduler): Returns a list of the instances created. """ - instance_uuids = request_spec.get('instance_uuids') - num_instances = len(instance_uuids) - LOG.debug(_("Attempting to build %(num_instances)d instance(s)") % - locals()) - payload = dict(request_spec=request_spec) notifier.notify(context, notifier.publisher_id("scheduler"), 'scheduler.run_instance.start', notifier.INFO, payload) + instance_uuids = request_spec.pop('instance_uuids') + num_instances = len(instance_uuids) + LOG.debug(_("Attempting to build %(num_instances)d instance(s)") % + locals()) + weighed_hosts = self._schedule(context, request_spec, filter_properties, instance_uuids) @@ -124,6 +124,8 @@ class FilterScheduler(driver.Scheduler): filter_properties, requested_networks, injected_files, admin_password, is_first_time, instance_uuid=None): """Create the requested resource in this Zone.""" + # NOTE(vish): add our current instance back into the request spec + request_spec['instance_uuids'] = [instance_uuid] payload = dict(request_spec=request_spec, weighted_host=weighed_host.to_dict(), instance_id=instance_uuid) diff --git a/nova/service.py b/nova/service.py index df8cf020f..87857f93d 100644 --- a/nova/service.py +++ b/nova/service.py @@ -61,6 +61,9 @@ service_opts = [ cfg.ListOpt('enabled_apis', default=['ec2', 'osapi_compute', 'metadata'], help='a list of APIs to enable by default'), + cfg.ListOpt('enabled_ssl_apis', + default=[], + help='a list of APIs with enabled SSL'), cfg.StrOpt('ec2_listen', default="0.0.0.0", help='IP address for EC2 API to listen'), @@ -399,6 +402,14 @@ class Service(object): self.binary = binary self.topic = topic self.manager_class_name = manager + # NOTE(russellb) We want to make sure to create the servicegroup API + # instance early, before creating other things such as the manager, + # that will also create a servicegroup API instance. Internally, the + # servicegroup only allocates a single instance of the driver API and + # we want to make sure that our value of db_allowed is there when it + # gets created. For that to happen, this has to be the first instance + # of the servicegroup API. + self.servicegroup_api = servicegroup.API(db_allowed=db_allowed) manager_class = importutils.import_class(self.manager_class_name) self.manager = manager_class(host=self.host, *args, **kwargs) self.report_interval = report_interval @@ -408,10 +419,8 @@ class Service(object): self.saved_args, self.saved_kwargs = args, kwargs self.timers = [] self.backdoor_port = None - self.db_allowed = db_allowed self.conductor_api = conductor.API(use_local=db_allowed) self.conductor_api.wait_until_ready(context.get_admin_context()) - self.servicegroup_api = servicegroup.API(db_allowed=db_allowed) def start(self): verstr = version.version_string_with_package() @@ -565,7 +574,7 @@ class Service(object): class WSGIService(object): """Provides ability to launch API from a 'paste' configuration.""" - def __init__(self, name, loader=None): + def __init__(self, name, loader=None, use_ssl=False): """Initialize, but do not start the WSGI server. :param name: The name of the WSGI server given to the loader. @@ -580,10 +589,12 @@ class WSGIService(object): self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0") self.port = getattr(CONF, '%s_listen_port' % name, 0) self.workers = getattr(CONF, '%s_workers' % name, None) + self.use_ssl = use_ssl self.server = wsgi.Server(name, self.app, host=self.host, - port=self.port) + port=self.port, + use_ssl=self.use_ssl) # Pull back actual port used self.port = self.server.port self.backdoor_port = None diff --git a/nova/tests/api/openstack/compute/contrib/test_availability_zone.py b/nova/tests/api/openstack/compute/contrib/test_availability_zone.py new file mode 100644 index 000000000..8abe7f388 --- /dev/null +++ b/nova/tests/api/openstack/compute/contrib/test_availability_zone.py @@ -0,0 +1,244 @@ +# Copyright 2012 IBM +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from datetime import datetime +from lxml import etree +import webob + +from nova.api.openstack.compute.contrib import availability_zone +from nova import availability_zones +from nova import context +from nova import db +from nova.openstack.common import jsonutils +from nova import servicegroup +from nova import test +from nova.tests.api.openstack import fakes + + +def fake_service_get_all(context, disabled=None): + def __fake_service(binary, availability_zone, + created_at, updated_at, host, disabled): + return {'binary': binary, + 'availability_zone': availability_zone, + 'available_zones': availability_zone, + 'created_at': created_at, + 'updated_at': updated_at, + 'host': host, + 'disabled': disabled} + + if disabled: + return [__fake_service("nova-compute", "zone-2", + datetime(2012, 11, 14, 9, 53, 25, 0), + datetime(2012, 12, 26, 14, 45, 25, 0), + "fake_host-1", True), + __fake_service("nova-scheduler", "internal", + datetime(2012, 11, 14, 9, 57, 3, 0), + datetime(2012, 12, 26, 14, 45, 25, 0), + "fake_host-1", True), + __fake_service("nova-network", "internal", + datetime(2012, 11, 16, 7, 25, 46, 0), + datetime(2012, 12, 26, 14, 45, 24, 0), + "fake_host-2", True)] + else: + return [__fake_service("nova-compute", "zone-1", + datetime(2012, 11, 14, 9, 53, 25, 0), + datetime(2012, 12, 26, 14, 45, 25, 0), + "fake_host-1", False), + __fake_service("nova-sched", "internal", + datetime(2012, 11, 14, 9, 57, 03, 0), + datetime(2012, 12, 26, 14, 45, 25, 0), + "fake_host-1", False), + __fake_service("nova-network", "internal", + datetime(2012, 11, 16, 7, 25, 46, 0), + datetime(2012, 12, 26, 14, 45, 24, 0), + "fake_host-2", False)] + + +def fake_service_is_up(self, service): + return service['binary'] != u"nova-network" + + +def fake_set_availability_zones(context, services): + return services + + +class AvailabilityZoneApiTest(test.TestCase): + def setUp(self): + super(AvailabilityZoneApiTest, self).setUp() + self.stubs.Set(db, 'service_get_all', fake_service_get_all) + self.stubs.Set(availability_zones, 'set_availability_zones', + fake_set_availability_zones) + self.stubs.Set(servicegroup.API, 'service_is_up', fake_service_is_up) + + def test_availability_zone_index(self): + req = webob.Request.blank('/v2/fake/os-availability-zone') + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + resp_dict = jsonutils.loads(resp.body) + + self.assertTrue('availabilityZoneInfo' in resp_dict) + zones = resp_dict['availabilityZoneInfo'] + self.assertEqual(len(zones), 2) + self.assertEqual(zones[0]['zoneName'], u'zone-1') + self.assertTrue(zones[0]['zoneState']['available']) + self.assertIsNone(zones[0]['hosts']) + self.assertEqual(zones[1]['zoneName'], u'zone-2') + self.assertFalse(zones[1]['zoneState']['available']) + self.assertIsNone(zones[1]['hosts']) + + def test_availability_zone_detail(self): + def _formatZone(zone_dict): + result = [] + + # Zone tree view item + result.append({'zoneName': zone_dict['zoneName'], + 'zoneState': u'available' + if zone_dict['zoneState']['available'] else + u'not available'}) + + if zone_dict['hosts'] is not None: + for (host, services) in zone_dict['hosts'].items(): + # Host tree view item + result.append({'zoneName': u'|- %s' % host, + 'zoneState': u''}) + for (svc, state) in services.items(): + # Service tree view item + result.append({'zoneName': u'| |- %s' % svc, + 'zoneState': u'%s %s %s' % ( + 'enabled' if state['active'] else + 'disabled', + ':-)' if state['available'] else + 'XXX', + jsonutils.to_primitive( + state['updated_at']))}) + return result + + def _assertZone(zone, name, status): + self.assertEqual(zone['zoneName'], name) + self.assertEqual(zone['zoneState'], status) + + availabilityZone = availability_zone.AvailabilityZoneController() + + req = webob.Request.blank('/v2/fake/os-availability-zone/detail') + req.method = 'GET' + req.environ['nova.context'] = context.get_admin_context() + resp_dict = availabilityZone.detail(req) + + self.assertTrue('availabilityZoneInfo' in resp_dict) + zones = resp_dict['availabilityZoneInfo'] + self.assertEqual(len(zones), 3) + + ''' availabilityZoneInfo field content in response body: + [{'zoneName': 'zone-1', + 'zoneState': {'available': True}, + 'hosts': {'fake_host-1': { + 'nova-compute': {'active': True, 'available': True, + 'updated_at': datetime(2012, 12, 26, 14, 45, 25)}}}}, + {'zoneName': 'internal', + 'zoneState': {'available': True}, + 'hosts': {'fake_host-1': { + 'nova-sched': {'active': True, 'available': True, + 'updated_at': datetime(2012, 12, 26, 14, 45, 25)}}, + 'fake_host-2': { + 'nova-network': {'active': True, 'available': False, + 'updated_at': datetime(2012, 12, 26, 14, 45, 24)}}}}, + {'zoneName': 'zone-2', + 'zoneState': {'available': False}, + 'hosts': None}] + ''' + + l0 = [u'zone-1', u'available'] + l1 = [u'|- fake_host-1', u''] + l2 = [u'| |- nova-compute', u'enabled :-) 2012-12-26T14:45:25.000000'] + l3 = [u'internal', u'available'] + l4 = [u'|- fake_host-1', u''] + l5 = [u'| |- nova-sched', u'enabled :-) 2012-12-26T14:45:25.000000'] + l6 = [u'|- fake_host-2', u''] + l7 = [u'| |- nova-network', u'enabled XXX 2012-12-26T14:45:24.000000'] + l8 = [u'zone-2', u'not available'] + + z0 = _formatZone(zones[0]) + z1 = _formatZone(zones[1]) + z2 = _formatZone(zones[2]) + + self.assertEqual(len(z0), 3) + self.assertEqual(len(z1), 5) + self.assertEqual(len(z2), 1) + + _assertZone(z0[0], l0[0], l0[1]) + _assertZone(z0[1], l1[0], l1[1]) + _assertZone(z0[2], l2[0], l2[1]) + _assertZone(z1[0], l3[0], l3[1]) + _assertZone(z1[1], l4[0], l4[1]) + _assertZone(z1[2], l5[0], l5[1]) + _assertZone(z1[3], l6[0], l6[1]) + _assertZone(z1[4], l7[0], l7[1]) + _assertZone(z2[0], l8[0], l8[1]) + + +class AvailabilityZoneSerializerTest(test.TestCase): + def test_availability_zone_index_detail_serializer(self): + def _verify_zone(zone_dict, tree): + self.assertEqual(tree.tag, 'availabilityZone') + self.assertEqual(zone_dict['zoneName'], tree.get('name')) + self.assertEqual(str(zone_dict['zoneState']['available']), + tree[0].get('available')) + + for _idx, host_child in enumerate(tree[1]): + self.assertTrue(host_child.get('name') in zone_dict['hosts']) + svcs = zone_dict['hosts'][host_child.get('name')] + for _idx, svc_child in enumerate(host_child[0]): + self.assertTrue(svc_child.get('name') in svcs) + svc = svcs[svc_child.get('name')] + self.assertEqual(len(svc_child), 1) + + self.assertEqual(str(svc['available']), + svc_child[0].get('available')) + self.assertEqual(str(svc['active']), + svc_child[0].get('active')) + self.assertEqual(str(svc['updated_at']), + svc_child[0].get('updated_at')) + + serializer = availability_zone.AvailabilityZonesTemplate() + raw_availability_zones = \ + [{'zoneName': 'zone-1', + 'zoneState': {'available': True}, + 'hosts': {'fake_host-1': { + 'nova-compute': {'active': True, 'available': True, + 'updated_at': + datetime(2012, 12, 26, 14, 45, 25)}}}}, + {'zoneName': 'internal', + 'zoneState': {'available': True}, + 'hosts': {'fake_host-1': { + 'nova-sched': {'active': True, 'available': True, + 'updated_at': + datetime(2012, 12, 26, 14, 45, 25)}}, + 'fake_host-2': { + 'nova-network': {'active': True, + 'available': False, + 'updated_at': + datetime(2012, 12, 26, 14, 45, 24)}}}}, + {'zoneName': 'zone-2', + 'zoneState': {'available': False}, + 'hosts': None}] + + text = serializer.serialize( + dict(availabilityZoneInfo=raw_availability_zones)) + tree = etree.fromstring(text) + + self.assertEqual('availabilityZones', tree.tag) + self.assertEqual(len(raw_availability_zones), len(tree)) + for idx, child in enumerate(tree): + _verify_zone(raw_availability_zones[idx], child) diff --git a/nova/tests/api/openstack/compute/contrib/test_baremetal_nodes.py b/nova/tests/api/openstack/compute/contrib/test_baremetal_nodes.py new file mode 100644 index 000000000..381d452a7 --- /dev/null +++ b/nova/tests/api/openstack/compute/contrib/test_baremetal_nodes.py @@ -0,0 +1,197 @@ +# Copyright (c) 2013 NTT DOCOMO, INC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from webob import exc + +from nova.api.openstack.compute.contrib import baremetal_nodes +from nova import context +from nova import exception +from nova import test +from nova.virt.baremetal import db + + +class FakeRequest(object): + + def __init__(self, context): + self.environ = {"nova.context": context} + + +class BareMetalNodesTest(test.TestCase): + + def setUp(self): + super(BareMetalNodesTest, self).setUp() + + self.context = context.get_admin_context() + self.controller = baremetal_nodes.BareMetalNodeController() + self.request = FakeRequest(self.context) + + def test_create(self): + node = { + 'service_host': "host", + 'cpus': 8, + 'memory_mb': 8192, + 'local_gb': 128, + 'pm_address': "10.1.2.3", + 'pm_user': "pm_user", + 'pm_password': "pm_pass", + 'prov_mac_address': "12:34:56:78:90:ab", + 'prov_vlan_id': 1234, + 'terminal_port': 8000, + 'interfaces': [], + } + response = node.copy() + response['id'] = 100 + del response['pm_password'] + response['instance_uuid'] = None + self.mox.StubOutWithMock(db, 'bm_node_create') + db.bm_node_create(self.context, node).AndReturn(response) + self.mox.ReplayAll() + res_dict = self.controller.create(self.request, {'node': node}) + self.assertEqual({'node': response}, res_dict) + + def test_delete(self): + self.mox.StubOutWithMock(db, 'bm_node_destroy') + db.bm_node_destroy(self.context, 1) + self.mox.ReplayAll() + self.controller.delete(self.request, 1) + + def test_index(self): + nodes = [{'id': 1}, + {'id': 2}, + ] + interfaces = [{'id': 1, 'address': '11:11:11:11:11:11'}, + {'id': 2, 'address': '22:22:22:22:22:22'}, + ] + self.mox.StubOutWithMock(db, 'bm_node_get_all') + self.mox.StubOutWithMock(db, 'bm_interface_get_all_by_bm_node_id') + db.bm_node_get_all(self.context).AndReturn(nodes) + db.bm_interface_get_all_by_bm_node_id(self.context, 1).\ + AndRaise(exception.InstanceNotFound(instance_id=1)) + db.bm_interface_get_all_by_bm_node_id(self.context, 2).\ + AndReturn(interfaces) + self.mox.ReplayAll() + res_dict = self.controller.index(self.request) + self.assertEqual(2, len(res_dict['nodes'])) + self.assertEqual([], res_dict['nodes'][0]['interfaces']) + self.assertEqual(2, len(res_dict['nodes'][1]['interfaces'])) + + def test_show(self): + node_id = 1 + node = {'id': node_id} + interfaces = [{'id': 1, 'address': '11:11:11:11:11:11'}, + {'id': 2, 'address': '22:22:22:22:22:22'}, + ] + self.mox.StubOutWithMock(db, 'bm_node_get') + self.mox.StubOutWithMock(db, 'bm_interface_get_all_by_bm_node_id') + db.bm_node_get(self.context, node_id).AndReturn(node) + db.bm_interface_get_all_by_bm_node_id(self.context, node_id).\ + AndReturn(interfaces) + self.mox.ReplayAll() + res_dict = self.controller.show(self.request, node_id) + self.assertEqual(node_id, res_dict['node']['id']) + self.assertEqual(2, len(res_dict['node']['interfaces'])) + + def test_add_interface(self): + node_id = 1 + address = '11:22:33:44:55:66' + body = {'add_interface': {'address': address}} + self.mox.StubOutWithMock(db, 'bm_node_get') + self.mox.StubOutWithMock(db, 'bm_interface_create') + self.mox.StubOutWithMock(db, 'bm_interface_get') + db.bm_node_get(self.context, node_id) + db.bm_interface_create(self.context, + bm_node_id=node_id, + address=address, + datapath_id=None, + port_no=None).\ + AndReturn(12345) + db.bm_interface_get(self.context, 12345).\ + AndReturn({'id': 12345, 'address': address}) + self.mox.ReplayAll() + res_dict = self.controller._add_interface(self.request, node_id, body) + self.assertEqual(12345, res_dict['interface']['id']) + self.assertEqual(address, res_dict['interface']['address']) + + def test_remove_interface(self): + node_id = 1 + interfaces = [{'id': 1}, + {'id': 2}, + {'id': 3}, + ] + body = {'remove_interface': {'id': 2}} + self.mox.StubOutWithMock(db, 'bm_node_get') + self.mox.StubOutWithMock(db, 'bm_interface_get_all_by_bm_node_id') + self.mox.StubOutWithMock(db, 'bm_interface_destroy') + db.bm_node_get(self.context, node_id) + db.bm_interface_get_all_by_bm_node_id(self.context, node_id).\ + AndReturn(interfaces) + db.bm_interface_destroy(self.context, 2) + self.mox.ReplayAll() + self.controller._remove_interface(self.request, node_id, body) + + def test_remove_interface_by_address(self): + node_id = 1 + interfaces = [{'id': 1, 'address': '11:11:11:11:11:11'}, + {'id': 2, 'address': '22:22:22:22:22:22'}, + {'id': 3, 'address': '33:33:33:33:33:33'}, + ] + self.mox.StubOutWithMock(db, 'bm_node_get') + self.mox.StubOutWithMock(db, 'bm_interface_get_all_by_bm_node_id') + self.mox.StubOutWithMock(db, 'bm_interface_destroy') + db.bm_node_get(self.context, node_id) + db.bm_interface_get_all_by_bm_node_id(self.context, node_id).\ + AndReturn(interfaces) + db.bm_interface_destroy(self.context, 2) + self.mox.ReplayAll() + body = {'remove_interface': {'address': '22:22:22:22:22:22'}} + self.controller._remove_interface(self.request, node_id, body) + + def test_remove_interface_no_id_no_address(self): + node_id = 1 + self.mox.StubOutWithMock(db, 'bm_node_get') + db.bm_node_get(self.context, node_id) + self.mox.ReplayAll() + body = {'remove_interface': {}} + self.assertRaises(exc.HTTPBadRequest, + self.controller._remove_interface, + self.request, + node_id, + body) + + def test_add_interface_node_not_found(self): + node_id = 1 + self.mox.StubOutWithMock(db, 'bm_node_get') + db.bm_node_get(self.context, node_id).\ + AndRaise(exception.InstanceNotFound(instance_id=node_id)) + self.mox.ReplayAll() + body = {'add_interface': {'address': '11:11:11:11:11:11'}} + self.assertRaises(exc.HTTPNotFound, + self.controller._add_interface, + self.request, + node_id, + body) + + def test_remove_interface_node_not_found(self): + node_id = 1 + self.mox.StubOutWithMock(db, 'bm_node_get') + db.bm_node_get(self.context, node_id).\ + AndRaise(exception.InstanceNotFound(instance_id=node_id)) + self.mox.ReplayAll() + body = {'remove_interface': {'address': '11:11:11:11:11:11'}} + self.assertRaises(exc.HTTPNotFound, + self.controller._remove_interface, + self.request, + node_id, + body) diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py index af769a6ca..5456c23af 100644 --- a/nova/tests/api/openstack/compute/test_servers.py +++ b/nova/tests/api/openstack/compute/test_servers.py @@ -2008,6 +2008,55 @@ class ServersControllerCreateTest(test.TestCase): self.assertNotEqual(reservation_id, None) self.assertTrue(len(reservation_id) > 1) + def test_create_multiple_instances_with_multiple_volume_bdm(self): + """ + Test that a BadRequest is raised if multiple instances + are requested with a list of block device mappings for volumes. + """ + self.ext_mgr.extensions = {'os-multiple-create': 'fake'} + min_count = 2 + bdm = [{'device_name': 'foo1', 'volume_id': 'vol-xxxx'}, + {'device_name': 'foo2', 'volume_id': 'vol-yyyy'} + ] + params = { + 'block_device_mapping': bdm, + 'min_count': min_count + } + old_create = compute_api.API.create + + def create(*args, **kwargs): + self.assertEqual(kwargs['min_count'], 2) + self.assertEqual(len(kwargs['block_device_mapping']), 2) + return old_create(*args, **kwargs) + + self.stubs.Set(compute_api.API, 'create', create) + self.assertRaises(webob.exc.HTTPBadRequest, + self._test_create_extra, params, no_image=True) + + def test_create_multiple_instances_with_single_volume_bdm(self): + """ + Test that a BadRequest is raised if multiple instances + are requested to boot from a single volume. + """ + self.ext_mgr.extensions = {'os-multiple-create': 'fake'} + min_count = 2 + bdm = [{'device_name': 'foo1', 'volume_id': 'vol-xxxx'}] + params = { + 'block_device_mapping': bdm, + 'min_count': min_count + } + old_create = compute_api.API.create + + def create(*args, **kwargs): + self.assertEqual(kwargs['min_count'], 2) + self.assertEqual(kwargs['block_device_mapping']['volume_id'], + 'vol-xxxx') + return old_create(*args, **kwargs) + + self.stubs.Set(compute_api.API, 'create', create) + self.assertRaises(webob.exc.HTTPBadRequest, + self._test_create_extra, params, no_image=True) + def test_create_instance_image_ref_is_bookmark(self): image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' image_href = 'http://localhost/fake/images/%s' % image_uuid diff --git a/nova/tests/fake_policy.py b/nova/tests/fake_policy.py index acefa856c..d8780744d 100644 --- a/nova/tests/fake_policy.py +++ b/nova/tests/fake_policy.py @@ -105,6 +105,7 @@ policy_data = """ "compute_extension:admin_actions:migrate": "", "compute_extension:aggregates": "", "compute_extension:agents": "", + "compute_extension:baremetal_nodes": "", "compute_extension:cells": "", "compute_extension:certificates": "", "compute_extension:cloudpipe": "", @@ -158,6 +159,8 @@ policy_data = """ "compute_extension:volumes": "", "compute_extension:volumetypes": "", "compute_extension:zones": "", + "compute_extension:availability_zone:list": "", + "compute_extension:availability_zone:detail": "is_admin:True", "volume:create": "", diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl index fe0613646..be2fabec4 100644 --- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl +++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl @@ -89,6 +89,14 @@ "updated": "%(timestamp)s" }, { + "alias": "os-baremetal-nodes", + "description": "%(text)s", + "links": [], + "name": "BareMetalNodes", + "namespace": "http://docs.openstack.org/compute/ext/baremetal_nodes/api/v2", + "updated": "%(timestamp)s" + }, + { "alias": "os-cells", "description": "%(text)s", "links": [], diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl index 2051d891a..ae2e9ff9e 100644 --- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl +++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl @@ -33,6 +33,9 @@ <extension alias="os-agents" name="Agents" namespace="http://docs.openstack.org/compute/ext/agents/api/v2" updated="%(timestamp)s"> <description>%(text)s</description> </extension> + <extension alias="os-baremetal-nodes" name="BareMetalNodes" namespace="http://docs.openstack.org/compute/ext/baremetal_nodes/api/v2" updated="%(timestamp)s"> + <description>%(text)s</description> + </extension> <extension alias="os-cells" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/cells/api/v1.1" name="Cells"> <description>%(text)s</description> </extension> diff --git a/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-details-resp.json.tpl b/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-details-resp.json.tpl new file mode 100644 index 000000000..6d44692e1 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-details-resp.json.tpl @@ -0,0 +1,48 @@ +{ + "availabilityZoneInfo": [ + { + "zoneName": "zone-1", + "zoneState": { + "available": true + }, + "hosts": { + "fake_host-1": { + "nova-compute": { + "active": true, + "available": true, + "updated_at": "2012-12-26T14:45:25.000000" + } + } + } + }, + { + "zoneName": "internal", + "zoneState": { + "available": true + }, + "hosts": { + "fake_host-1": { + "nova-sched": { + "active": true, + "available": true, + "updated_at": "2012-12-26T14:45:25.000000" + } + }, + "fake_host-2": { + "nova-network": { + "active": true, + "available": false, + "updated_at": "2012-12-26T14:45:24.000000" + } + } + } + }, + { + "zoneName": "zone-2", + "zoneState": { + "available": false + }, + "hosts": null + } + ] +}
\ No newline at end of file diff --git a/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-details-resp.xml.tpl b/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-details-resp.xml.tpl new file mode 100644 index 000000000..856a64957 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-details-resp.xml.tpl @@ -0,0 +1,44 @@ +<?xml version='1.0' encoding='UTF-8'?> +<availabilityZones + xmlns:os-availability-zone="http://docs.openstack.org/compute/ext/availabilityzone/api/v1.1"> + <availabilityZone name="zone-1"> + <zoneState available="True" /> + <hosts> + <host name="fake_host-1"> + <services> + <service name="nova-compute"> + <serviceState available="True" active="True" + updated_at="2012-12-26 14:45:25" /> + </service> + </services> + </host> + </hosts> + <metadata /> + </availabilityZone> + <availabilityZone name="internal"> + <zoneState available="True" /> + <hosts> + <host name="fake_host-1"> + <services> + <service name="nova-sched"> + <serviceState available="True" active="True" + updated_at="2012-12-26 14:45:25" /> + </service> + </services> + </host> + <host name="fake_host-2"> + <services> + <service name="nova-network"> + <serviceState available="False" active="True" + updated_at="2012-12-26 14:45:24" /> + </service> + </services> + </host> + </hosts> + <metadata /> + </availabilityZone> + <availabilityZone name="zone-2"> + <zoneState available="False" /> + <metadata /> + </availabilityZone> +</availabilityZones>
\ No newline at end of file diff --git a/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-get-resp.json.tpl new file mode 100644 index 000000000..381708aaf --- /dev/null +++ b/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-get-resp.json.tpl @@ -0,0 +1,18 @@ +{ + "availabilityZoneInfo": [ + { + "zoneName": "zone-1", + "zoneState": { + "available": true + }, + "hosts": null + }, + { + "zoneName": "zone-2", + "zoneState": { + "available": false + }, + "hosts": null + } + ] +}
\ No newline at end of file diff --git a/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-get-resp.xml.tpl new file mode 100644 index 000000000..1eff177de --- /dev/null +++ b/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-get-resp.xml.tpl @@ -0,0 +1,12 @@ +<?xml version='1.0' encoding='UTF-8'?> +<availabilityZones + xmlns:os-availability-zone="http://docs.openstack.org/compute/ext/availabilityzone/api/v1.1"> + <availabilityZone name="zone-1"> + <zoneState available="True" /> + <metadata /> + </availabilityZone> + <availabilityZone name="zone-2"> + <zoneState available="False" /> + <metadata /> + </availabilityZone> +</availabilityZones>
\ No newline at end of file diff --git a/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-add-interface-req.json.tpl b/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-add-interface-req.json.tpl new file mode 100644 index 000000000..fbc9e5b8d --- /dev/null +++ b/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-add-interface-req.json.tpl @@ -0,0 +1,5 @@ +{ + "add_interface": { + "address": "%(address)s" + } +} diff --git a/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-add-interface-req.xml.tpl b/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-add-interface-req.xml.tpl new file mode 100644 index 000000000..abbbe895b --- /dev/null +++ b/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-add-interface-req.xml.tpl @@ -0,0 +1,4 @@ +<?xml version="1.0" encoding="UTF-8"?> +<add_interface + address="%(address)s" +/> diff --git a/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-add-interface-resp.json.tpl b/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-add-interface-resp.json.tpl new file mode 100644 index 000000000..268b41f08 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-add-interface-resp.json.tpl @@ -0,0 +1,8 @@ +{ + "interface": { + "id": %(interface_id)s, + "address": "aa:aa:aa:aa:aa:aa", + "datapath_id": null, + "port_no": null + } +} diff --git a/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-add-interface-resp.xml.tpl b/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-add-interface-resp.xml.tpl new file mode 100644 index 000000000..e5d34f92b --- /dev/null +++ b/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-add-interface-resp.xml.tpl @@ -0,0 +1,7 @@ +<?xml version="1.0" encoding="UTF-8"?> +<interface + id="%(interface_id)s" + address="aa:aa:aa:aa:aa:aa" + datapath_id="None" + port_no="None" +/> diff --git a/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-create-req.json.tpl b/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-create-req.json.tpl new file mode 100644 index 000000000..fd2ae101f --- /dev/null +++ b/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-create-req.json.tpl @@ -0,0 +1,14 @@ +{ + "node": { + "service_host": "host", + "cpus": 8, + "memory_mb": 8192, + "local_gb": 128, + "pm_address": "10.1.2.3", + "pm_user": "pm_user", + "pm_password": "pm_pass", + "prov_mac_address": "12:34:56:78:90:ab", + "prov_vlan_id": 1234, + "terminal_port": 8000 + } +} diff --git a/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-create-req.xml.tpl b/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-create-req.xml.tpl new file mode 100644 index 000000000..78a2c1c74 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-create-req.xml.tpl @@ -0,0 +1,12 @@ +<?xml version="1.0" encoding="UTF-8"?> +<node + service_host="host" + cpus="8" + memory_mb="8192" + local_gb="128" + pm_address="10.1.2.3" + pm_user="pm_user" + prov_mac_address="12:34:56:78:90:ab" + prov_vlan_id="1234" + terminal_port="8000" +/> diff --git a/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-create-resp.json.tpl b/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-create-resp.json.tpl new file mode 100644 index 000000000..d3911b49d --- /dev/null +++ b/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-create-resp.json.tpl @@ -0,0 +1,16 @@ +{ + "node": { + "service_host": "host", + "cpus": 8, + "memory_mb": 8192, + "local_gb": 128, + "pm_address": "10.1.2.3", + "pm_user": "pm_user", + "prov_mac_address": "12:34:56:78:90:ab", + "prov_vlan_id": 1234, + "terminal_port": 8000, + "instance_uuid": null, + "id": %(node_id)s, + "interfaces": [] + } +} diff --git a/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-create-resp.xml.tpl b/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-create-resp.xml.tpl new file mode 100644 index 000000000..f21d16a11 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-create-resp.xml.tpl @@ -0,0 +1,15 @@ +<?xml version="1.0" encoding="UTF-8"?> +<node + service_host="host" + cpus="8" + memory_mb="8192" + local_gb="128" + pm_address="10.1.2.3" + pm_user="pm_user" + prov_mac_address="12:34:56:78:90:ab" + prov_vlan_id="1234" + terminal_port="8000" + instance_uuid="None" + id="%(node_id)s"> + <interfaces/> +</node> diff --git a/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-list-resp.json.tpl b/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-list-resp.json.tpl new file mode 100644 index 000000000..9b04a9cea --- /dev/null +++ b/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-list-resp.json.tpl @@ -0,0 +1,21 @@ +{ + "nodes": [{ + "service_host": "host", + "cpus": 8, + "memory_mb": 8192, + "local_gb": 128, + "pm_address": "10.1.2.3", + "pm_user": "pm_user", + "prov_mac_address": "12:34:56:78:90:ab", + "prov_vlan_id": 1234, + "terminal_port": 8000, + "instance_uuid": null, + "id": %(node_id)s, + "interfaces": [{ + "id": %(interface_id)s, + "address": "%(address)s", + "datapath_id": null, + "port_no": null + }] + }] +} diff --git a/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-list-resp.xml.tpl b/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-list-resp.xml.tpl new file mode 100644 index 000000000..f17b6cc20 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-list-resp.xml.tpl @@ -0,0 +1,19 @@ +<?xml version="1.0" encoding="UTF-8"?> +<nodes> +<node + service_host="host" + cpus="8" + memory_mb="8192" + local_gb="128" + pm_address="10.1.2.3" + pm_user="pm_user" + prov_mac_address="12:34:56:78:90:ab" + prov_vlan_id="1234" + terminal_port="8000" + instance_uuid="None" + id="%(node_id)s"> + <interfaces> + <interface id="%(interface_id)s" address="%(address)s" datapath_id="None" port_no="None"/> + </interfaces> +</node> +</nodes> diff --git a/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-remove-interface-req.json.tpl b/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-remove-interface-req.json.tpl new file mode 100644 index 000000000..eb76a9140 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-remove-interface-req.json.tpl @@ -0,0 +1,5 @@ +{ + "remove_interface": { + "address": "%(address)s" + } +} diff --git a/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-remove-interface-req.xml.tpl b/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-remove-interface-req.xml.tpl new file mode 100644 index 000000000..089c94e86 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-remove-interface-req.xml.tpl @@ -0,0 +1,4 @@ +<?xml version="1.0" encoding="UTF-8"?> +<remove_interface + address="%(address)s" +/> diff --git a/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-show-resp.json.tpl b/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-show-resp.json.tpl new file mode 100644 index 000000000..701b33d24 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-show-resp.json.tpl @@ -0,0 +1,21 @@ +{ + "node": { + "service_host": "host", + "cpus": 8, + "memory_mb": 8192, + "local_gb": 128, + "pm_address": "10.1.2.3", + "pm_user": "pm_user", + "prov_mac_address": "12:34:56:78:90:ab", + "prov_vlan_id": 1234, + "terminal_port": 8000, + "instance_uuid": null, + "id": %(node_id)s, + "interfaces": [{ + "id": %(interface_id)s, + "address": "%(address)s", + "datapath_id": null, + "port_no": null + }] + } +} diff --git a/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-show-resp.xml.tpl b/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-show-resp.xml.tpl new file mode 100644 index 000000000..36e5568e5 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-baremetal-nodes/baremetal-node-show-resp.xml.tpl @@ -0,0 +1,17 @@ +<?xml version="1.0" encoding="UTF-8"?> +<node + service_host="host" + cpus="8" + memory_mb="8192" + local_gb="128" + pm_address="10.1.2.3" + pm_user="pm_user" + prov_mac_address="12:34:56:78:90:ab" + prov_vlan_id="1234" + terminal_port="8000" + instance_uuid="None" + id="%(node_id)s"> + <interfaces> + <interface id="%(interface_id)s" address="%(address)s" datapath_id="None" port_no="None"/> + </interfaces> +</node> diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py index f101da243..887ca206f 100644 --- a/nova/tests/integrated/test_api_samples.py +++ b/nova/tests/integrated/test_api_samples.py @@ -43,6 +43,7 @@ from nova.openstack.common import timeutils import nova.quota from nova.scheduler import driver from nova import test +from nova.tests.baremetal.db import base as bm_db_base from nova.tests import fake_network from nova.tests.image import fake from nova.tests.integrated import integrated_helpers @@ -2589,3 +2590,75 @@ class CellsSampleJsonTest(ApiSampleTestBase): class CellsSampleXmlTest(CellsSampleJsonTest): ctype = 'xml' + + +class BareMetalNodesJsonTest(ApiSampleTestBase, bm_db_base.BMDBTestCase): + extension_name = ('nova.api.openstack.compute.contrib.baremetal_nodes.' + 'Baremetal_nodes') + + def _create_node(self): + response = self._do_post("os-baremetal-nodes", + "baremetal-node-create-req", + {}) + self.assertEqual(response.status, 200) + subs = {'node_id': '(?P<id>\d+)'} + return self._verify_response("baremetal-node-create-resp", + subs, response) + + def test_create_node(self): + self._create_node() + + def test_list_nodes(self): + node_id = self._create_node() + interface_id = self._add_interface(node_id) + response = self._do_get('os-baremetal-nodes') + self.assertEqual(response.status, 200) + subs = {'node_id': node_id, + 'interface_id': interface_id, + 'address': 'aa:aa:aa:aa:aa:aa', + } + return self._verify_response('baremetal-node-list-resp', + subs, response) + + def test_show_node(self): + node_id = self._create_node() + interface_id = self._add_interface(node_id) + response = self._do_get('os-baremetal-nodes/%s' % node_id) + self.assertEqual(response.status, 200) + subs = {'node_id': node_id, + 'interface_id': interface_id, + 'address': 'aa:aa:aa:aa:aa:aa', + } + return self._verify_response('baremetal-node-show-resp', + subs, response) + + def test_delete_node(self): + node_id = self._create_node() + response = self._do_delete("os-baremetal-nodes/%s" % node_id) + self.assertEqual(response.status, 202) + + def _add_interface(self, node_id): + response = self._do_post("os-baremetal-nodes/%s/action" % node_id, + "baremetal-node-add-interface-req", + {'address': 'aa:aa:aa:aa:aa:aa'}) + self.assertEqual(response.status, 200) + subs = {'interface_id': r'(?P<id>\d+)'} + return self._verify_response("baremetal-node-add-interface-resp", + subs, response) + + def test_add_interface(self): + node_id = self._create_node() + self._add_interface(node_id) + + def test_remove_interface(self): + node_id = self._create_node() + self._add_interface(node_id) + response = self._do_post("os-baremetal-nodes/%s/action" % node_id, + "baremetal-node-remove-interface-req", + {'address': 'aa:aa:aa:aa:aa:aa'}) + self.assertEqual(response.status, 202) + self.assertEqual(response.read(), "") + + +class BareMetalNodesXmlTest(BareMetalNodesJsonTest): + ctype = 'xml' diff --git a/nova/tests/integrated/test_multiprocess_api.py b/nova/tests/integrated/test_multiprocess_api.py index 5a82e0033..b2361b13c 100644 --- a/nova/tests/integrated/test_multiprocess_api.py +++ b/nova/tests/integrated/test_multiprocess_api.py @@ -150,7 +150,7 @@ class MultiprocessWSGITest(integrated_helpers._IntegratedTestBase): workers = self._get_workers() LOG.info('workers: %r' % workers) - self.assertFalse(workers, 'No OS processes left.') + self.assertFalse(workers, 'OS processes left %r' % workers) def test_terminate_sigkill(self): self._terminate_with_signal(signal.SIGKILL) diff --git a/nova/tests/network/test_manager.py b/nova/tests/network/test_manager.py index b5b3ec107..9ccdffab5 100644 --- a/nova/tests/network/test_manager.py +++ b/nova/tests/network/test_manager.py @@ -185,6 +185,7 @@ class FlatNetworkTestCase(test.TestCase): 'vif_devname': None, 'vif_uuid': '00000000-0000-0000-0000-00000000000000%02d' % nid, + 'ovs_interfaceid': None, 'should_create_vlan': False, 'should_create_bridge': False} self.assertThat(info, matchers.DictMatches(check)) diff --git a/nova/tests/ssl_cert/ca.crt b/nova/tests/ssl_cert/ca.crt new file mode 100644 index 000000000..9d66ca627 --- /dev/null +++ b/nova/tests/ssl_cert/ca.crt @@ -0,0 +1,35 @@ +-----BEGIN CERTIFICATE----- +MIIGDDCCA/SgAwIBAgIJAPSvwQYk4qI4MA0GCSqGSIb3DQEBBQUAMGExCzAJBgNV +BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMRUwEwYDVQQKEwxPcGVuc3RhY2sg +Q0ExEjAQBgNVBAsTCUdsYW5jZSBDQTESMBAGA1UEAxMJR2xhbmNlIENBMB4XDTEy +MDIwOTE3MTAwMloXDTIyMDIwNjE3MTAwMlowYTELMAkGA1UEBhMCQVUxEzARBgNV +BAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAGA1UECxMJ +R2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0EwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQDmf+fapWfzy1Uylus0KGalw4X/5xZ+ltPVOr+IdCPbstvi +RTC5g+O+TvXeOP32V/cnSY4ho/+f2q730za+ZA/cgWO252rcm3Q7KTJn3PoqzJvX +/l3EXe3/TCrbzgZ7lW3QLTCTEE2eEzwYG3wfDTOyoBq+F6ct6ADh+86gmpbIRfYI +N+ixB0hVyz9427PTof97fL7qxxkjAayB28OfwHrkEBl7iblNhUC0RoH+/H9r5GEl +GnWiebxfNrONEHug6PHgiaGq7/Dj+u9bwr7J3/NoS84I08ajMnhlPZxZ8bS/O8If +ceWGZv7clPozyhABT/otDfgVcNH1UdZ4zLlQwc1MuPYN7CwxrElxc8Quf94ttGjb +tfGTl4RTXkDofYdG1qBWW962PsGl2tWmbYDXV0q5JhV/IwbrE1X9f+OksJQne1/+ +dZDxMhdf2Q1V0P9hZZICu4+YhmTMs5Mc9myKVnzp4NYdX5fXoB/uNYph+G7xG5IK +WLSODKhr1wFGTTcuaa8LhOH5UREVenGDJuc6DdgX9a9PzyJGIi2ngQ03TJIkCiU/ +4J/r/vsm81ezDiYZSp2j5JbME+ixW0GBLTUWpOIxUSHgUFwH5f7lQwbXWBOgwXQk +BwpZTmdQx09MfalhBtWeu4/6BnOCOj7e/4+4J0eVxXST0AmVyv8YjJ2nz1F9oQID +AQABo4HGMIHDMB0GA1UdDgQWBBTk7Krj4bEsTjHXaWEtI2GZ5ACQyTCBkwYDVR0j +BIGLMIGIgBTk7Krj4bEsTjHXaWEtI2GZ5ACQyaFlpGMwYTELMAkGA1UEBhMCQVUx +EzARBgNVBAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAG +A1UECxMJR2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0GCCQD0r8EGJOKiODAM +BgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4ICAQA8Zrss/MiwFHGmDlercE0h +UvzA54n/EvKP9nP3jHM2qW/VPfKdnFw99nEPFLhb+lN553vdjOpCYFm+sW0Z5Mi4 +qsFkk4AmXIIEFOPt6zKxMioLYDQ9Sw/BUv6EZGeANWr/bhmaE+dMcKJt5le/0jJm +2ahsVB9fbFu9jBFeYb7Ba/x2aLkEGMxaDLla+6EQhj148fTnS1wjmX9G2cNzJvj/ ++C2EfKJIuDJDqw2oS2FGVpP37FA2Bz2vga0QatNneLkGKCFI3ZTenBznoN+fmurX +TL3eJE4IFNrANCcdfMpdyLAtXz4KpjcehqpZMu70er3d30zbi1l0Ajz4dU+WKz/a +NQES+vMkT2wqjXHVTjrNwodxw3oLK/EuTgwoxIHJuplx5E5Wrdx9g7Gl1PBIJL8V +xiOYS5N7CakyALvdhP7cPubA2+TPAjNInxiAcmhdASS/Vrmpvrkat6XhGn8h9liv +ysDOpMQmYQkmgZBpW8yBKK7JABGGsJADJ3E6J5MMWBX2RR4kFoqVGAzdOU3oyaTy +I0kz5sfuahaWpdYJVlkO+esc0CRXw8fLDYivabK2tOgUEWeZsZGZ9uK6aV1VxTAY +9Guu3BJ4Rv/KP/hk7mP8rIeCwotV66/2H8nq72ImQhzSVyWcxbFf2rJiFQJ3BFwA +WoRMgEwjGJWqzhJZUYpUAQ== +-----END CERTIFICATE----- diff --git a/nova/tests/ssl_cert/certificate.crt b/nova/tests/ssl_cert/certificate.crt new file mode 100644 index 000000000..3c1aa6363 --- /dev/null +++ b/nova/tests/ssl_cert/certificate.crt @@ -0,0 +1,30 @@ +-----BEGIN CERTIFICATE----- +MIIFLjCCAxYCAQEwDQYJKoZIhvcNAQEFBQAwYTELMAkGA1UEBhMCQVUxEzARBgNV +BAgTClNvbWUtU3RhdGUxFTATBgNVBAoTDE9wZW5zdGFjayBDQTESMBAGA1UECxMJ +R2xhbmNlIENBMRIwEAYDVQQDEwlHbGFuY2UgQ0EwHhcNMTIwMjA5MTcxMDUzWhcN +MjIwMjA2MTcxMDUzWjBZMQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1TdGF0 +ZTESMBAGA1UEChMJT3BlbnN0YWNrMQ8wDQYDVQQLEwZHbGFuY2UxEDAOBgNVBAMT +BzAuMC4wLjAwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXpUkQN6pu +avo+gz3o1K4krVdPl1m7NjNJDyD/+ZH0EGNcEN7iag1qPE7JsjqGPNZsQK1dMoXb +Sz+OSi9qvNeJnBcfwUx5qTAtwyAb9AxGkwuMafIU+lWbsclo+dPGsja01ywbXTCZ +bF32iqnpOMYhfxWUdoQYiBkhxxhW9eMPKLS/KkP8/bx+Vaa2XJiAebqkd9nrksAA +BeGc9mlafYBEmiChPdJEPw+1ePA4QVq9aPepDsqAKtGN8JLpmoC3BdxQQTbbwL3Q +8fTXK4tCNUaVk4AbDy/McFq6y0ocQoBPJjihOY35mWG/OLtcI99yPOpWGnps/5aG +/64DDJ2D67Fnaj6gKHV+6TXFO8KZxlnxtgtiZDJBZkneTBt9ArSOv+l6NBsumRz0 +iEJ4o4H1S2TSMnprAvX7WnGtc6Xi9gXahYcDHEelwwYzqAiTBv6hxSp4MZ2dNXa+ +KzOitC7ZbV2qsg0au0wjfE/oSQ3NvsvUr8nOmfutJTvHRAwbC1v4G/tuAsO7O0w2 +0u2B3u+pG06m5+rnEqp+rB9hmukRYTfgEFRRsVIvpFl/cwvPXKRcX03UIMx+lLr9 +Ft+ep7YooBhY3wY2kwCxD4lRYNmbwsCIVywZt40f/4ad98TkufR9NhsfycxGeqbr +mTMFlZ8TTlmP82iohekKCOvoyEuTIWL2+wIDAQABMA0GCSqGSIb3DQEBBQUAA4IC +AQBMUBgV0R+Qltf4Du7u/8IFmGAoKR/mktB7R1gRRAqsvecUt7kIwBexGdavGg1y +0pU0+lgUZjJ20N1SlPD8gkNHfXE1fL6fmMjWz4dtYJjzRVhpufHPeBW4tl8DgHPN +rBGAYQ+drDSXaEjiPQifuzKx8WS+DGA3ki4co5mPjVnVH1xvLIdFsk89z3b3YD1k +yCJ/a9K36x6Z/c67JK7s6MWtrdRF9+MVnRKJ2PK4xznd1kBz16V+RA466wBDdARY +vFbtkafbEqOb96QTonIZB7+fAldKDPZYnwPqasreLmaGOaM8sxtlPYAJ5bjDONbc +AaXG8BMRQyO4FyH237otDKlxPyHOFV66BaffF5S8OlwIMiZoIvq+IcTZOdtDUSW2 +KHNLfe5QEDZdKjWCBrfqAfvNuG13m03WqfmcMHl3o/KiPJlx8l9Z4QEzZ9xcyQGL +cncgeHM9wJtzi2cD/rTDNFsx/gxvoyutRmno7I3NRbKmpsXF4StZioU3USRspB07 +hYXOVnG3pS+PjVby7ThT3gvFHSocguOsxClx1epdUJAmJUbmM7NmOp5WVBVtMtC2 +Su4NG/xJciXitKzw+btb7C7RjO6OEqv/1X/oBDzKBWQAwxUC+lqmnM7W6oqWJFEM +YfTLnrjs7Hj6ThMGcEnfvc46dWK3dz0RjsQzUxugPuEkLA== +-----END CERTIFICATE----- diff --git a/nova/tests/ssl_cert/privatekey.key b/nova/tests/ssl_cert/privatekey.key new file mode 100644 index 000000000..b63df3d29 --- /dev/null +++ b/nova/tests/ssl_cert/privatekey.key @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEA16VJEDeqbmr6PoM96NSuJK1XT5dZuzYzSQ8g//mR9BBjXBDe +4moNajxOybI6hjzWbECtXTKF20s/jkovarzXiZwXH8FMeakwLcMgG/QMRpMLjGny +FPpVm7HJaPnTxrI2tNcsG10wmWxd9oqp6TjGIX8VlHaEGIgZIccYVvXjDyi0vypD +/P28flWmtlyYgHm6pHfZ65LAAAXhnPZpWn2ARJogoT3SRD8PtXjwOEFavWj3qQ7K +gCrRjfCS6ZqAtwXcUEE228C90PH01yuLQjVGlZOAGw8vzHBaustKHEKATyY4oTmN ++Zlhvzi7XCPfcjzqVhp6bP+Whv+uAwydg+uxZ2o+oCh1fuk1xTvCmcZZ8bYLYmQy +QWZJ3kwbfQK0jr/pejQbLpkc9IhCeKOB9Utk0jJ6awL1+1pxrXOl4vYF2oWHAxxH +pcMGM6gIkwb+ocUqeDGdnTV2viszorQu2W1dqrINGrtMI3xP6EkNzb7L1K/Jzpn7 +rSU7x0QMGwtb+Bv7bgLDuztMNtLtgd7vqRtOpufq5xKqfqwfYZrpEWE34BBUUbFS +L6RZf3MLz1ykXF9N1CDMfpS6/Rbfnqe2KKAYWN8GNpMAsQ+JUWDZm8LAiFcsGbeN +H/+GnffE5Ln0fTYbH8nMRnqm65kzBZWfE05Zj/NoqIXpCgjr6MhLkyFi9vsCAwEA +AQKCAgAA96baQcWr9SLmQOR4NOwLEhQAMWefpWCZhU3amB4FgEVR1mmJjnw868RW +t0v36jH0Dl44us9K6o2Ab+jCi9JTtbWM2Osk6JNkwSlVtsSPVH2KxbbmTTExH50N +sYE3tPj12rlB7isXpRrOzlRwzWZmJBHOtrFlAsdKFYCQc03vdXlKGkBv1BuSXYP/ +8W5ltSYXMspxehkOZvhaIejbFREMPbzDvGlDER1a7Q320qQ7kUr7ISvbY1XJUzj1 +f1HwgEA6w/AhED5Jv6wfgvx+8Yo9hYnflTPbsO1XRS4x7kJxGHTMlFuEsSF1ICYH +Bcos0wUiGcBO2N6uAFuhe98BBn+nOwAPZYWwGkmVuK2psm2mXAHx94GT/XqgK/1r +VWGSoOV7Fhjauc2Nv8/vJU18DXT3OY5hc4iXVeEBkuZwRb/NVUtnFoHxVO/Mp5Fh +/W5KZaLWVrLghzvSQ/KUIM0k4lfKDZpY9ZpOdNgWDyZY8tNrXumUZZimzWdXZ9vR +dBssmd8qEKs1AHGFnMDt56IjLGou6j0qnWsLdR1e/WEFsYzGXLVHCv6vXRNkbjqh +WFw5nA+2Dw1YAsy+YkTfgx2pOe+exM/wxsVPa7tG9oZ374dywUi1k6VoHw5dkmJw +1hbXqSLZtx2N51G+SpGmNAV4vLUF0y3dy2wnrzFkFT4uxh1w8QKCAQEA+h6LwHTK +hgcJx6CQQ6zYRqXo4wdvMooY1FcqJOq7LvJUA2CX5OOLs8qN1TyFrOCuAUTurOrM +ABlQ0FpsIaP8TOGz72dHe2eLB+dD6Bqjn10sEFMn54zWd/w9ympQrO9jb5X3ViTh +sCcdYyXVS9Hz8nzbbIF+DaKlxF2Hh71uRDxXpMPxRcGbOIuKZXUj6RkTIulzqT6o +uawlegWxch05QSgzq/1ASxtjTzo4iuDCAii3N45xqxnB+fV9NXEt4R2oOGquBRPJ +LxKcOnaQKBD0YNX4muTq+zPlv/kOb8/ys2WGWDUrNkpyJXqhTve4KONjqM7+iL/U +4WdJuiCjonzk/QKCAQEA3Lc+kNq35FNLxMcnCVcUgkmiCWZ4dyGZZPdqjOPww1+n +bbudGPzY1nxOvE60dZM4or/tm6qlXYfb2UU3+OOJrK9s297EQybZ8DTZu2GHyitc +NSFV3Gl4cgvKdbieGKkk9X2dV9xSNesNvX9lJEnQxuwHDTeo8ubLHtV88Ml1xokn +7W+IFiyEuUIL4e5/fadbrI3EwMrbCF4+9VcfABx4PTNMzdc8LsncCMXE+jFX8AWp +TsT2JezTe5o2WpvBoKMAYhJQNQiaWATn00pDVY/70H1vK3ljomAa1IUdOr/AhAF7 +3jL0MYMgXSHzXZOKAtc7yf+QfFWF1Ls8+sen1clJVwKCAQEAp59rB0r+Iz56RmgL +5t7ifs5XujbURemY5E2aN+18DuVmenD0uvfoO1DnJt4NtCNLWhxpXEdq+jH9H/VJ +fG4a+ydT4IC1vjVRTrWlo9qeh4H4suQX3S1c2kKY4pvHf25blH/Lp9bFzbkZD8Ze +IRcOxxb4MsrBwL+dGnGYD9dbG63ZCtoqSxaKQSX7VS1hKKmeUopj8ivFBdIht5oz +JogBQ/J+Vqg9u1gagRFCrYgdXTcOOtRix0lW336vL+6u0ax/fXe5MjvlW3+8Zc3p +pIBgVrlvh9ccx8crFTIDg9m4DJRgqaLQV+0ifI2np3WK3RQvSQWYPetZ7sm69ltD +bvUGvQKCAQAz5CEhjUqOs8asjOXwnDiGKSmfbCgGWi/mPQUf+rcwN9z1P5a/uTKB +utgIDbj/q401Nkp2vrgCNV7KxitSqKxFnTjKuKUL5KZ4gvRtyZBTR751/1BgcauP +pJYE91K0GZBG5zGG5pWtd4XTd5Af5/rdycAeq2ddNEWtCiRFuBeohbaNbBtimzTZ +GV4R0DDJKf+zoeEQMqEsZnwG0mTHceoS+WylOGU92teQeG7HI7K5C5uymTwFzpgq +ByegRd5QFgKRDB0vWsZuyzh1xI/wHdnmOpdYcUGre0zTijhFB7ALWQ32P6SJv3ps +av78kSNxZ4j3BM7DbJf6W8sKasZazOghAoIBAHekpBcLq9gRv2+NfLYxWN2sTZVB +1ldwioG7rWvk5YQR2akukecI3NRjtC5gG2vverawG852Y4+oLfgRMHxgp0qNStwX +juTykzPkCwZn8AyR+avC3mkrtJyM3IigcYOu4/UoaRDFa0xvCC1EfumpnKXIpHag +miSQZf2sVbgqb3/LWvHIg/ceOP9oGJve87/HVfQtBoLaIe5RXCWkqB7mcI/exvTS +8ShaW6v2Fe5Bzdvawj7sbsVYRWe93Aq2tmIgSX320D2RVepb6mjD4nr0IUaM3Yed +TFT7e2ikWXyDLLgVkDTU4Qe8fr3ZKGfanCIDzvgNw6H1gRi+2WQgOmjilMQ= +-----END RSA PRIVATE KEY----- diff --git a/nova/tests/test_driver.py b/nova/tests/test_driver.py new file mode 100644 index 000000000..2dee7725f --- /dev/null +++ b/nova/tests/test_driver.py @@ -0,0 +1,60 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2013 Citrix Systems, Inc. +# Copyright 2013 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import test +from nova.virt import driver + + +class FakeDriver(object): + def __init__(self, *args, **kwargs): + self.args = args + self.kwargs = kwargs + + +class FakeDriver2(FakeDriver): + pass + + +class ToDriverRegistryTestCase(test.TestCase): + + def assertDriverInstance(self, inst, class_, *args, **kwargs): + self.assertEquals(class_, inst.__class__) + self.assertEquals(args, inst.args) + self.assertEquals(kwargs, inst.kwargs) + + def test_driver_dict_from_config(self): + drvs = driver.driver_dict_from_config( + [ + 'key1=nova.tests.test_driver.FakeDriver', + 'key2=nova.tests.test_driver.FakeDriver2', + ], 'arg1', 'arg2', param1='value1', param2='value2' + ) + + self.assertEquals( + sorted(['key1', 'key2']), + sorted(drvs.keys()) + ) + + self.assertDriverInstance( + drvs['key1'], + FakeDriver, 'arg1', 'arg2', param1='value1', + param2='value2') + + self.assertDriverInstance( + drvs['key2'], + FakeDriver2, 'arg1', 'arg2', param1='value1', + param2='value2') diff --git a/nova/tests/test_imagebackend.py b/nova/tests/test_imagebackend.py index a9865cb44..495e7c947 100644 --- a/nova/tests/test_imagebackend.py +++ b/nova/tests/test_imagebackend.py @@ -273,7 +273,7 @@ class LvmTestCase(_ImageTestCase, test.TestCase): cmd = ('dd', 'if=%s' % self.TEMPLATE_PATH, 'of=%s' % self.PATH, 'bs=4M') self.utils.execute(*cmd, run_as_root=True) - self.disk.resize2fs(self.PATH) + self.disk.resize2fs(self.PATH, run_as_root=True) self.mox.ReplayAll() image = self.image_class(self.INSTANCE, self.NAME) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 75e758cde..f96536893 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -4747,6 +4747,25 @@ class LibvirtDriverTestCase(test.TestCase): self.libvirtconnection._cleanup_resize(ins_ref, _fake_network_info(self.stubs, 1)) + def test_get_instance_disk_info_exception(self): + instance_name = "fake-instance-name" + + class FakeExceptionDomain(FakeVirtDomain): + def __init__(self): + super(FakeExceptionDomain, self).__init__() + + def XMLDesc(self, *args): + raise libvirt.libvirtError("Libvirt error") + + def fake_lookup_by_name(instance_name): + return FakeExceptionDomain() + + self.stubs.Set(self.libvirtconnection, '_lookup_by_name', + fake_lookup_by_name) + self.assertRaises(exception.InstanceNotFound, + self.libvirtconnection.get_instance_disk_info, + instance_name) + class LibvirtVolumeUsageTestCase(test.TestCase): """Test for nova.virt.libvirt.libvirt_driver.LibvirtDriver diff --git a/nova/tests/test_libvirt_vif.py b/nova/tests/test_libvirt_vif.py index 11ffa020f..3861d7dfa 100644 --- a/nova/tests/test_libvirt_vif.py +++ b/nova/tests/test_libvirt_vif.py @@ -27,7 +27,7 @@ CONF = cfg.CONF class LibvirtVifTestCase(test.TestCase): - net = { + net_bridge = { 'cidr': '101.168.1.0/24', 'cidr_v6': '101:1db9::/64', 'gateway_v6': '101:1db9::1', @@ -42,15 +42,39 @@ class LibvirtVifTestCase(test.TestCase): 'id': 'network-id-xxx-yyy-zzz' } - mapping = { + mapping_bridge = { 'mac': 'ca:fe:de:ad:be:ef', - 'gateway_v6': net['gateway_v6'], + 'gateway_v6': net_bridge['gateway_v6'], 'ips': [{'ip': '101.168.1.9'}], 'dhcp_server': '191.168.1.1', 'vif_uuid': 'vif-xxx-yyy-zzz', 'vif_devname': 'tap-xxx-yyy-zzz' } + net_ovs = { + 'cidr': '101.168.1.0/24', + 'cidr_v6': '101:1db9::/64', + 'gateway_v6': '101:1db9::1', + 'netmask_v6': '64', + 'netmask': '255.255.255.0', + 'bridge': 'br0', + 'vlan': 99, + 'gateway': '101.168.1.1', + 'broadcast': '101.168.1.255', + 'dns1': '8.8.8.8', + 'id': 'network-id-xxx-yyy-zzz' + } + + mapping_ovs = { + 'mac': 'ca:fe:de:ad:be:ef', + 'gateway_v6': net_ovs['gateway_v6'], + 'ips': [{'ip': '101.168.1.9'}], + 'dhcp_server': '191.168.1.1', + 'vif_uuid': 'vif-xxx-yyy-zzz', + 'vif_devname': 'tap-xxx-yyy-zzz', + 'ovs_interfaceid': 'aaa-bbb-ccc', + } + instance = { 'name': 'instance-name', 'uuid': 'instance-uuid' @@ -67,7 +91,7 @@ class LibvirtVifTestCase(test.TestCase): self.stubs.Set(utils, 'execute', fake_execute) - def _get_instance_xml(self, driver): + def _get_instance_xml(self, driver, net, mapping): conf = vconfig.LibvirtConfigGuest() conf.virt_type = "qemu" conf.name = "fake-name" @@ -75,7 +99,7 @@ class LibvirtVifTestCase(test.TestCase): conf.memory = 100 * 1024 conf.vcpus = 4 - nic = driver.get_config(self.instance, self.net, self.mapping) + nic = driver.get_config(self.instance, net, mapping) conf.add_device(nic) return conf.to_xml() @@ -126,7 +150,9 @@ class LibvirtVifTestCase(test.TestCase): libvirt_type='kvm') d = vif.LibvirtBridgeDriver() - xml = self._get_instance_xml(d) + xml = self._get_instance_xml(d, + self.net_bridge, + self.mapping_bridge) doc = etree.fromstring(xml) ret = doc.findall('./devices/interface') @@ -143,7 +169,9 @@ class LibvirtVifTestCase(test.TestCase): libvirt_type='kvm') d = vif.LibvirtBridgeDriver() - xml = self._get_instance_xml(d) + xml = self._get_instance_xml(d, + self.net_bridge, + self.mapping_bridge) doc = etree.fromstring(xml) ret = doc.findall('./devices/interface') @@ -160,7 +188,9 @@ class LibvirtVifTestCase(test.TestCase): libvirt_type='qemu') d = vif.LibvirtBridgeDriver() - xml = self._get_instance_xml(d) + xml = self._get_instance_xml(d, + self.net_bridge, + self.mapping_bridge) doc = etree.fromstring(xml) ret = doc.findall('./devices/interface') @@ -177,7 +207,9 @@ class LibvirtVifTestCase(test.TestCase): libvirt_type='xen') d = vif.LibvirtBridgeDriver() - xml = self._get_instance_xml(d) + xml = self._get_instance_xml(d, + self.net_bridge, + self.mapping_bridge) doc = etree.fromstring(xml) ret = doc.findall('./devices/interface') @@ -191,7 +223,9 @@ class LibvirtVifTestCase(test.TestCase): def test_bridge_driver(self): d = vif.LibvirtBridgeDriver() - xml = self._get_instance_xml(d) + xml = self._get_instance_xml(d, + self.net_bridge, + self.mapping_bridge) doc = etree.fromstring(xml) ret = doc.findall('./devices/interface') @@ -199,13 +233,15 @@ class LibvirtVifTestCase(test.TestCase): node = ret[0] self.assertEqual(node.get("type"), "bridge") br_name = node.find("source").get("bridge") - self.assertEqual(br_name, self.net['bridge']) + self.assertEqual(br_name, self.net_bridge['bridge']) mac = node.find("mac").get("address") - self.assertEqual(mac, self.mapping['mac']) + self.assertEqual(mac, self.mapping_bridge['mac']) def test_ovs_ethernet_driver(self): d = vif.LibvirtOpenVswitchDriver() - xml = self._get_instance_xml(d) + xml = self._get_instance_xml(d, + self.net_ovs, + self.mapping_ovs) doc = etree.fromstring(xml) ret = doc.findall('./devices/interface') @@ -215,13 +251,15 @@ class LibvirtVifTestCase(test.TestCase): dev_name = node.find("target").get("dev") self.assertTrue(dev_name.startswith("tap")) mac = node.find("mac").get("address") - self.assertEqual(mac, self.mapping['mac']) + self.assertEqual(mac, self.mapping_ovs['mac']) script = node.find("script").get("path") self.assertEquals(script, "") def test_ovs_virtualport_driver(self): d = vif.LibvirtOpenVswitchVirtualPortDriver() - xml = self._get_instance_xml(d) + xml = self._get_instance_xml(d, + self.net_ovs, + self.mapping_ovs) doc = etree.fromstring(xml) ret = doc.findall('./devices/interface') @@ -232,21 +270,24 @@ class LibvirtVifTestCase(test.TestCase): br_name = node.find("source").get("bridge") self.assertEqual(br_name, "br0") mac = node.find("mac").get("address") - self.assertEqual(mac, self.mapping['mac']) + self.assertEqual(mac, self.mapping_ovs['mac']) vp = node.find("virtualport") self.assertEqual(vp.get("type"), "openvswitch") iface_id_found = False for p_elem in vp.findall("parameters"): iface_id = p_elem.get("interfaceid", None) if iface_id: - self.assertEqual(iface_id, self.mapping['vif_uuid']) + self.assertEqual(iface_id, + self.mapping_ovs['ovs_interfaceid']) iface_id_found = True self.assertTrue(iface_id_found) def test_quantum_bridge_ethernet_driver(self): d = vif.QuantumLinuxBridgeVIFDriver() - xml = self._get_instance_xml(d) + xml = self._get_instance_xml(d, + self.net_bridge, + self.mapping_bridge) doc = etree.fromstring(xml) ret = doc.findall('./devices/interface') @@ -256,13 +297,15 @@ class LibvirtVifTestCase(test.TestCase): dev_name = node.find("target").get("dev") self.assertTrue(dev_name.startswith("tap")) mac = node.find("mac").get("address") - self.assertEqual(mac, self.mapping['mac']) + self.assertEqual(mac, self.mapping_ovs['mac']) br_name = node.find("source").get("bridge") self.assertEqual(br_name, "br0") def test_quantum_hybrid_driver(self): d = vif.LibvirtHybridOVSBridgeDriver() - xml = self._get_instance_xml(d) + xml = self._get_instance_xml(d, + self.net_ovs, + self.mapping_ovs) doc = etree.fromstring(xml) ret = doc.findall('./devices/interface') @@ -270,6 +313,6 @@ class LibvirtVifTestCase(test.TestCase): node = ret[0] self.assertEqual(node.get("type"), "bridge") br_name = node.find("source").get("bridge") - self.assertEqual(br_name, self.net['bridge']) + self.assertEqual(br_name, self.net_ovs['bridge']) mac = node.find("mac").get("address") - self.assertEqual(mac, self.mapping['mac']) + self.assertEqual(mac, self.mapping_ovs['mac']) diff --git a/nova/tests/test_migrations.py b/nova/tests/test_migrations.py index a6c150971..f0ed0a863 100644 --- a/nova/tests/test_migrations.py +++ b/nova/tests/test_migrations.py @@ -136,12 +136,6 @@ class TestMigrations(test.TestCase): # and recreate it, which ensures that we have no side-effects # from the tests self._reset_databases() - - # remove these from the list so they aren't used in the migration tests - if "mysqlcitest" in self.engines: - del self.engines["mysqlcitest"] - if "mysqlcitest" in self.test_databases: - del self.test_databases["mysqlcitest"] super(TestMigrations, self).tearDown() def _reset_databases(self): @@ -490,3 +484,77 @@ class TestMigrations(test.TestCase): migration_api.downgrade(engine, TestMigrations.REPOSITORY, 146) _146_check() + + def test_migration_152(self): + host1 = 'compute-host1' + host2 = 'compute-host2' + + def _151_check(services, volumes): + service = services.select(services.c.id == 1).execute().first() + self.assertEqual(False, service.deleted) + service = services.select(services.c.id == 2).execute().first() + self.assertEqual(True, service.deleted) + + volume = volumes.select(volumes.c.id == "first").execute().first() + self.assertEqual(False, volume.deleted) + volume = volumes.select(volumes.c.id == "second").execute().first() + self.assertEqual(True, volume.deleted) + + for key, engine in self.engines.items(): + migration_api.version_control(engine, TestMigrations.REPOSITORY, + migration.INIT_VERSION) + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 151) + metadata = sqlalchemy.schema.MetaData() + metadata.bind = engine + + # NOTE(boris-42): It is enough to test one table with type of `id` + # column Integer and one with type String. + services = sqlalchemy.Table('services', metadata, autoload=True) + volumes = sqlalchemy.Table('volumes', metadata, autoload=True) + + engine.execute( + services.insert(), + [ + {'id': 1, 'host': host1, 'binary': 'nova-compute', + 'report_count': 0, 'topic': 'compute', 'deleted': False}, + {'id': 2, 'host': host1, 'binary': 'nova-compute', + 'report_count': 0, 'topic': 'compute', 'deleted': True} + ] + ) + + engine.execute( + volumes.insert(), + [ + {'id': 'first', 'host': host1, 'deleted': False}, + {'id': 'second', 'host': host2, 'deleted': True} + ] + ) + + _151_check(services, volumes) + + migration_api.upgrade(engine, TestMigrations.REPOSITORY, 152) + # NOTE(boris-42): One more time get from DB info about tables. + metadata2 = sqlalchemy.schema.MetaData() + metadata2.bind = engine + + services = sqlalchemy.Table('services', metadata2, autoload=True) + + service = services.select(services.c.id == 1).execute().first() + self.assertEqual(0, service.deleted) + service = services.select(services.c.id == 2).execute().first() + self.assertEqual(service.id, service.deleted) + + volumes = sqlalchemy.Table('volumes', metadata2, autoload=True) + volume = volumes.select(volumes.c.id == "first").execute().first() + self.assertEqual("", volume.deleted) + volume = volumes.select(volumes.c.id == "second").execute().first() + self.assertEqual(volume.id, volume.deleted) + + migration_api.downgrade(engine, TestMigrations.REPOSITORY, 151) + # NOTE(boris-42): One more time get from DB info about tables. + metadata = sqlalchemy.schema.MetaData() + metadata.bind = engine + services = sqlalchemy.Table('services', metadata, autoload=True) + volumes = sqlalchemy.Table('volumes', metadata, autoload=True) + + _151_check(services, volumes) diff --git a/nova/tests/test_wsgi.py b/nova/tests/test_wsgi.py index b4b25ed97..b04bc3e03 100644 --- a/nova/tests/test_wsgi.py +++ b/nova/tests/test_wsgi.py @@ -21,9 +21,17 @@ import os.path import tempfile +import eventlet + import nova.exception from nova import test import nova.wsgi +import urllib2 +import webob + +SSL_CERT_DIR = os.path.normpath(os.path.join( + os.path.dirname(os.path.abspath(__file__)), + 'ssl_cert')) class TestLoaderNothingExists(test.TestCase): @@ -99,3 +107,92 @@ class TestWSGIServer(test.TestCase): self.assertNotEqual(0, server.port) server.stop() server.wait() + + +class TestWSGIServerWithSSL(test.TestCase): + """WSGI server with SSL tests.""" + + def setUp(self): + super(TestWSGIServerWithSSL, self).setUp() + self.flags(enabled_ssl_apis=['fake_ssl'], + ssl_cert_file=os.path.join(SSL_CERT_DIR, 'certificate.crt'), + ssl_key_file=os.path.join(SSL_CERT_DIR, 'privatekey.key')) + + def test_ssl_server(self): + + def test_app(env, start_response): + start_response('200 OK', {}) + return ['PONG'] + + fake_ssl_server = nova.wsgi.Server("fake_ssl", test_app, + host="127.0.0.1", port=0, + use_ssl=True) + fake_ssl_server.start() + self.assertNotEqual(0, fake_ssl_server.port) + + cli = eventlet.connect(("localhost", fake_ssl_server.port)) + cli = eventlet.wrap_ssl(cli, + ca_certs=os.path.join(SSL_CERT_DIR, 'ca.crt')) + + cli.write('POST / HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nContent-length:4\r\n\r\nPING') + response = cli.read(8192) + self.assertEquals(response[-4:], "PONG") + + fake_ssl_server.stop() + fake_ssl_server.wait() + + def test_two_servers(self): + + def test_app(env, start_response): + start_response('200 OK', {}) + return ['PONG'] + + fake_ssl_server = nova.wsgi.Server("fake_ssl", test_app, + host="127.0.0.1", port=0, use_ssl=True) + fake_ssl_server.start() + self.assertNotEqual(0, fake_ssl_server.port) + + fake_server = nova.wsgi.Server("fake", test_app, + host="127.0.0.1", port=0) + fake_server.start() + self.assertNotEquals(0, fake_server.port) + + cli = eventlet.connect(("localhost", fake_ssl_server.port)) + cli = eventlet.wrap_ssl(cli, + ca_certs=os.path.join(SSL_CERT_DIR, 'ca.crt')) + + cli.write('POST / HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nContent-length:4\r\n\r\nPING') + response = cli.read(8192) + self.assertEquals(response[-4:], "PONG") + + cli = eventlet.connect(("localhost", fake_server.port)) + + cli.sendall('POST / HTTP/1.1\r\nHost: localhost\r\n' + 'Connection: close\r\nContent-length:4\r\n\r\nPING') + response = cli.recv(8192) + self.assertEquals(response[-4:], "PONG") + + fake_ssl_server.stop() + fake_ssl_server.wait() + + def test_app_using_ipv6_and_ssl(self): + greetings = 'Hello, World!!!' + + @webob.dec.wsgify + def hello_world(req): + return greetings + + server = nova.wsgi.Server("fake_ssl", + hello_world, + host="::1", + port=0, + use_ssl=True) + server.start() + + response = urllib2.urlopen('https://[::1]:%d/' % server.port) + self.assertEquals(greetings, response.read()) + + server.stop() + server.wait() diff --git a/nova/tests/virt/xenapi/test_volumeops.py b/nova/tests/virt/xenapi/test_volumeops.py index 7cc5c70da..844ae8459 100644 --- a/nova/tests/virt/xenapi/test_volumeops.py +++ b/nova/tests/virt/xenapi/test_volumeops.py @@ -21,6 +21,13 @@ from nova.virt.xenapi import volumeops class VolumeAttachTestCase(test.TestCase): def test_detach_volume_call(self): + registered_calls = [] + + def regcall(label): + def side_effect(*args, **kwargs): + registered_calls.append(label) + return side_effect + ops = volumeops.VolumeOps('session') self.mox.StubOutWithMock(volumeops.vm_utils, 'vm_ref_or_raise') self.mox.StubOutWithMock(volumeops.vm_utils, 'find_vbd_by_number') @@ -45,10 +52,12 @@ class VolumeAttachTestCase(test.TestCase): volumeops.vm_utils.unplug_vbd('session', 'vbdref') - volumeops.vm_utils.destroy_vbd('session', 'vbdref') + volumeops.vm_utils.destroy_vbd('session', 'vbdref').WithSideEffects( + regcall('destroy_vbd')) volumeops.volume_utils.find_sr_from_vbd( - 'session', 'vbdref').AndReturn('srref') + 'session', 'vbdref').WithSideEffects( + regcall('find_sr_from_vbd')).AndReturn('srref') volumeops.volume_utils.purge_sr('session', 'srref') @@ -58,6 +67,9 @@ class VolumeAttachTestCase(test.TestCase): dict(driver_volume_type='iscsi', data='conn_data'), 'instance_1', 'mountpoint') + self.assertEquals( + ['find_sr_from_vbd', 'destroy_vbd'], registered_calls) + def test_attach_volume_call(self): ops = volumeops.VolumeOps('session') self.mox.StubOutWithMock(ops, '_connect_volume') diff --git a/nova/utils.py b/nova/utils.py index 75cba0a7c..f9e08fd80 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -507,14 +507,18 @@ def str_dict_replace(s, mapping): class LazyPluggable(object): """A pluggable backend loaded lazily based on some value.""" - def __init__(self, pivot, **backends): + def __init__(self, pivot, config_group=None, **backends): self.__backends = backends self.__pivot = pivot self.__backend = None + self.__config_group = config_group def __get_backend(self): if not self.__backend: - backend_name = CONF[self.__pivot] + if self.__config_group is None: + backend_name = CONF[self.__pivot] + else: + backend_name = CONF[self.__config_group][self.__pivot] if backend_name not in self.__backends: msg = _('Invalid backend: %s') % backend_name raise exception.NovaException(msg) diff --git a/nova/virt/baremetal/db/api.py b/nova/virt/baremetal/db/api.py index 206a59b4f..002425333 100644 --- a/nova/virt/baremetal/db/api.py +++ b/nova/virt/baremetal/db/api.py @@ -50,16 +50,21 @@ from nova import utils # because utils.LazyPluggable doesn't support reading from # option groups. See bug #1093043. db_opts = [ - cfg.StrOpt('baremetal_db_backend', + cfg.StrOpt('db_backend', default='sqlalchemy', - help='The backend to use for db'), + help='The backend to use for bare-metal database'), ] +baremetal_group = cfg.OptGroup(name='baremetal', + title='Baremetal Options') + CONF = cfg.CONF -CONF.register_opts(db_opts) +CONF.register_group(baremetal_group) +CONF.register_opts(db_opts, baremetal_group) IMPL = utils.LazyPluggable( - 'baremetal_db_backend', + 'db_backend', + config_group='baremetal', sqlalchemy='nova.virt.baremetal.db.sqlalchemy.api') diff --git a/nova/virt/baremetal/db/migration.py b/nova/virt/baremetal/db/migration.py index 40631bf45..d630ccf65 100644 --- a/nova/virt/baremetal/db/migration.py +++ b/nova/virt/baremetal/db/migration.py @@ -22,7 +22,8 @@ from nova import utils IMPL = utils.LazyPluggable( - 'baremetal_db_backend', + 'db_backend', + config_group='baremetal', sqlalchemy='nova.virt.baremetal.db.sqlalchemy.migration') INIT_VERSION = 0 diff --git a/nova/virt/baremetal/ipmi.py b/nova/virt/baremetal/ipmi.py index 97c158727..393b3657b 100644 --- a/nova/virt/baremetal/ipmi.py +++ b/nova/virt/baremetal/ipmi.py @@ -126,7 +126,7 @@ class IPMI(base.PowerManager): args.append(pwfile) args.extend(command.split(" ")) out, err = utils.execute(*args, attempts=3) - LOG.debug(_("ipmitool stdout: '%(out)s', stderr: '%(err)%s'"), + LOG.debug(_("ipmitool stdout: '%(out)s', stderr: '%(err)s'"), locals()) return out, err finally: diff --git a/nova/virt/disk/api.py b/nova/virt/disk/api.py index 26fb86f1e..d080f6d36 100644 --- a/nova/virt/disk/api.py +++ b/nova/virt/disk/api.py @@ -96,9 +96,13 @@ def mkfs(os_type, fs_label, target): utils.execute(*mkfs_command.split()) -def resize2fs(image, check_exit_code=False): - utils.execute('e2fsck', '-fp', image, check_exit_code=check_exit_code) - utils.execute('resize2fs', image, check_exit_code=check_exit_code) +def resize2fs(image, check_exit_code=False, run_as_root=False): + utils.execute('e2fsck', '-fp', image, + check_exit_code=check_exit_code, + run_as_root=run_as_root) + utils.execute('resize2fs', image, + check_exit_code=check_exit_code, + run_as_root=run_as_root) def get_disk_size(path): diff --git a/nova/virt/driver.py b/nova/virt/driver.py index 566e5230d..747b60714 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -49,6 +49,17 @@ CONF.register_opts(driver_opts) LOG = logging.getLogger(__name__) +def driver_dict_from_config(named_driver_config, *args, **kwargs): + driver_registry = dict() + + for driver_str in named_driver_config: + driver_type, _sep, driver = driver_str.partition('=') + driver_class = importutils.import_class(driver) + driver_registry[driver_type] = driver_class(*args, **kwargs) + + return driver_registry + + def block_device_info_get_root(block_device_info): block_device_info = block_device_info or {} return block_device_info.get('root_device_name') diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 1692dc086..7439ad40a 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -284,11 +284,10 @@ class LibvirtDriver(driver.ComputeDriver): self.virtapi, get_connection=self._get_connection) self.vif_driver = importutils.import_object(CONF.libvirt_vif_driver) - self.volume_drivers = {} - for driver_str in CONF.libvirt_volume_drivers: - driver_type, _sep, driver = driver_str.partition('=') - driver_class = importutils.import_class(driver) - self.volume_drivers[driver_type] = driver_class(self) + + self.volume_drivers = driver.driver_dict_from_config( + CONF.libvirt_volume_drivers, self) + self._host_state = None disk_prefix_map = {"lxc": "", "uml": "ubd", "xen": "sd"} @@ -2832,7 +2831,15 @@ class LibvirtDriver(driver.ComputeDriver): disk_info = [] virt_dom = self._lookup_by_name(instance_name) - xml = virt_dom.XMLDesc(0) + try: + xml = virt_dom.XMLDesc(0) + except libvirt.libvirtError as ex: + error_code = ex.get_error_code() + msg = _("Error from libvirt while getting description of " + "%(instance_name)s: [Error Code %(error_code)s] " + "%(ex)s") % locals() + LOG.warn(msg) + raise exception.InstanceNotFound(instance_id=instance_name) doc = etree.fromstring(xml) disk_nodes = doc.findall('.//devices/disk') path_nodes = doc.findall('.//devices/disk/source') diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py index d272e408c..0815c142f 100644 --- a/nova/virt/libvirt/imagebackend.py +++ b/nova/virt/libvirt/imagebackend.py @@ -228,7 +228,7 @@ class Lvm(Image): cmd = ('dd', 'if=%s' % base, 'of=%s' % self.path, 'bs=4M') utils.execute(*cmd, run_as_root=True) if resize: - disk.resize2fs(self.path) + disk.resize2fs(self.path, run_as_root=True) generated = 'ephemeral_size' in kwargs diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py index 83d43a6db..d90a5e295 100644 --- a/nova/virt/libvirt/vif.py +++ b/nova/virt/libvirt/vif.py @@ -150,6 +150,9 @@ class LibvirtOpenVswitchDriver(LibvirtBaseVIFDriver): def get_bridge_name(self, network): return network.get('bridge') or CONF.libvirt_ovs_bridge + def get_ovs_interfaceid(self, mapping): + return mapping.get('ovs_interfaceid') or mapping['vif_uuid'] + def get_config(self, instance, network, mapping): dev = self.get_vif_devname(mapping) @@ -162,55 +165,26 @@ class LibvirtOpenVswitchDriver(LibvirtBaseVIFDriver): return conf - def create_ovs_vif_port(self, bridge, dev, iface_id, mac, instance_id): - utils.execute('ovs-vsctl', '--', '--may-exist', 'add-port', - bridge, dev, - '--', 'set', 'Interface', dev, - 'external-ids:iface-id=%s' % iface_id, - 'external-ids:iface-status=active', - 'external-ids:attached-mac=%s' % mac, - 'external-ids:vm-uuid=%s' % instance_id, - run_as_root=True) - - def delete_ovs_vif_port(self, bridge, dev): - utils.execute('ovs-vsctl', 'del-port', bridge, dev, - run_as_root=True) - utils.execute('ip', 'link', 'delete', dev, run_as_root=True) - def plug(self, instance, vif): network, mapping = vif - iface_id = mapping['vif_uuid'] + iface_id = self.get_ovs_interfaceid(mapping) dev = self.get_vif_devname(mapping) - if not linux_net.device_exists(dev): - # Older version of the command 'ip' from the iproute2 package - # don't have support for the tuntap option (lp:882568). If it - # turns out we're on an old version we work around this by using - # tunctl. - try: - # First, try with 'ip' - utils.execute('ip', 'tuntap', 'add', dev, 'mode', 'tap', - run_as_root=True) - except exception.ProcessExecutionError: - # Second option: tunctl - utils.execute('tunctl', '-b', '-t', dev, run_as_root=True) - utils.execute('ip', 'link', 'set', dev, 'up', run_as_root=True) - - self.create_ovs_vif_port(self.get_bridge_name(network), - dev, iface_id, mapping['mac'], - instance['uuid']) + linux_net.create_tap_dev(dev) + linux_net.create_ovs_vif_port(self.get_bridge_name(network), + dev, iface_id, mapping['mac'], + instance['uuid']) def unplug(self, instance, vif): """Unplug the VIF by deleting the port from the bridge.""" try: network, mapping = vif - self.delete_ovs_vif_port(self.get_bridge_name(network), - self.get_vif_devname(mapping)) + linux_net.delete_ovs_vif_port(self.get_bridge_name(network), + self.get_vif_devname(mapping)) except exception.ProcessExecutionError: LOG.exception(_("Failed while unplugging vif"), instance=instance) -class LibvirtHybridOVSBridgeDriver(LibvirtBridgeDriver, - LibvirtOpenVswitchDriver): +class LibvirtHybridOVSBridgeDriver(LibvirtBridgeDriver): """VIF driver that uses OVS + Linux Bridge for iptables compatibility. Enables the use of OVS-based Quantum plugins while at the same @@ -229,6 +203,9 @@ class LibvirtHybridOVSBridgeDriver(LibvirtBridgeDriver, def get_bridge_name(self, network): return network.get('bridge') or CONF.libvirt_ovs_bridge + def get_ovs_interfaceid(self, mapping): + return mapping.get('ovs_interfaceid') or mapping['vif_uuid'] + def get_config(self, instance, network, mapping): br_name = self.get_br_name(mapping['vif_uuid']) network['bridge'] = br_name @@ -247,9 +224,9 @@ class LibvirtHybridOVSBridgeDriver(LibvirtBridgeDriver, """ network, mapping = vif - iface_id = mapping['vif_uuid'] - br_name = self.get_br_name(iface_id) - v1_name, v2_name = self.get_veth_pair_names(iface_id) + iface_id = self.get_ovs_interfaceid(mapping) + br_name = self.get_br_name(mapping['vif_uuid']) + v1_name, v2_name = self.get_veth_pair_names(mapping['vif_uuid']) if not linux_net.device_exists(br_name): utils.execute('brctl', 'addbr', br_name, run_as_root=True) @@ -258,9 +235,9 @@ class LibvirtHybridOVSBridgeDriver(LibvirtBridgeDriver, linux_net._create_veth_pair(v1_name, v2_name) utils.execute('ip', 'link', 'set', br_name, 'up', run_as_root=True) utils.execute('brctl', 'addif', br_name, v1_name, run_as_root=True) - self.create_ovs_vif_port(self.get_bridge_name(network), - v2_name, iface_id, mapping['mac'], - instance['uuid']) + linux_net.create_ovs_vif_port(self.get_bridge_name(network), + v2_name, iface_id, mapping['mac'], + instance['uuid']) def unplug(self, instance, vif): """UnPlug using hybrid strategy @@ -270,16 +247,16 @@ class LibvirtHybridOVSBridgeDriver(LibvirtBridgeDriver, """ try: network, mapping = vif - iface_id = mapping['vif_uuid'] - br_name = self.get_br_name(iface_id) - v1_name, v2_name = self.get_veth_pair_names(iface_id) + br_name = self.get_br_name(mapping['vif_uuid']) + v1_name, v2_name = self.get_veth_pair_names(mapping['vif_uuid']) utils.execute('brctl', 'delif', br_name, v1_name, run_as_root=True) utils.execute('ip', 'link', 'set', br_name, 'down', run_as_root=True) utils.execute('brctl', 'delbr', br_name, run_as_root=True) - self.delete_ovs_vif_port(self.get_bridge_name(network), v2_name) + linux_net.delete_ovs_vif_port(self.get_bridge_name(network), + v2_name) except exception.ProcessExecutionError: LOG.exception(_("Failed while unplugging vif"), instance=instance) @@ -291,6 +268,9 @@ class LibvirtOpenVswitchVirtualPortDriver(LibvirtBaseVIFDriver): def get_bridge_name(self, network): return network.get('bridge') or CONF.libvirt_ovs_bridge + def get_ovs_interfaceid(self, mapping): + return mapping.get('ovs_interfaceid') or mapping['vif_uuid'] + def get_config(self, instance, network, mapping): """Pass data required to create OVS virtual port element.""" conf = super(LibvirtOpenVswitchVirtualPortDriver, @@ -299,7 +279,8 @@ class LibvirtOpenVswitchVirtualPortDriver(LibvirtBaseVIFDriver): mapping) designer.set_vif_host_backend_ovs_config( - conf, self.get_bridge_name(network), mapping['vif_uuid'], + conf, self.get_bridge_name(network), + self.get_ovs_interfaceid(mapping), self.get_vif_devname(mapping)) return conf diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index 5f79b6c3a..c2d717cfd 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -125,6 +125,7 @@ class VolumeOps(object): try: vbd_ref = vm_utils.find_vbd_by_number(self._session, vm_ref, device_number) + sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref) except volume_utils.StorageError, exc: LOG.exception(exc) raise Exception(_('Unable to locate volume %s') % mountpoint) @@ -143,7 +144,6 @@ class VolumeOps(object): # Forget SR only if no other volumes on this host are using it try: - sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref) volume_utils.purge_sr(self._session, sr_ref) except volume_utils.StorageError, exc: LOG.exception(exc) diff --git a/nova/wsgi.py b/nova/wsgi.py index 16851dba8..0a7570b6c 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -28,6 +28,7 @@ import eventlet.wsgi import greenlet from paste import deploy import routes.middleware +import ssl import webob.dec import webob.exc @@ -45,7 +46,21 @@ wsgi_opts = [ help='A python format string that is used as the template to ' 'generate log lines. The following values can be formatted ' 'into it: client_ip, date_time, request_line, status_code, ' - 'body_length, wall_seconds.') + 'body_length, wall_seconds.'), + cfg.StrOpt('ssl_ca_file', + default=None, + help="CA certificate file to use to verify " + "connecting clients"), + cfg.StrOpt('ssl_cert_file', + default=None, + help="SSL certificate of API server"), + cfg.StrOpt('ssl_key_file', + default=None, + help="SSL private key of API server"), + cfg.IntOpt('tcp_keepidle', + default=600, + help="Sets the value of TCP_KEEPIDLE in seconds for each " + "server socket. Not supported on OS X.") ] CONF = cfg.CONF CONF.register_opts(wsgi_opts) @@ -59,7 +74,8 @@ class Server(object): default_pool_size = 1000 def __init__(self, name, app, host='0.0.0.0', port=0, pool_size=None, - protocol=eventlet.wsgi.HttpProtocol, backlog=128): + protocol=eventlet.wsgi.HttpProtocol, backlog=128, + use_ssl=False): """Initialize, but do not start, a WSGI server. :param name: Pretty name for logging. @@ -78,6 +94,7 @@ class Server(object): self._pool = eventlet.GreenPool(pool_size or self.default_pool_size) self._logger = logging.getLogger("nova.%s.wsgi.server" % self.name) self._wsgi_logger = logging.WritableLogger(self._logger) + self._use_ssl = use_ssl if backlog < 1: raise exception.InvalidInput( @@ -106,6 +123,60 @@ class Server(object): :returns: None """ + if self._use_ssl: + try: + ca_file = CONF.ssl_ca_file + cert_file = CONF.ssl_cert_file + key_file = CONF.ssl_key_file + + if cert_file and not os.path.exists(cert_file): + raise RuntimeError( + _("Unable to find cert_file : %s") % cert_file) + + if ca_file and not os.path.exists(ca_file): + raise RuntimeError( + _("Unable to find ca_file : %s") % ca_file) + + if key_file and not os.path.exists(key_file): + raise RuntimeError( + _("Unable to find key_file : %s") % key_file) + + if self._use_ssl and (not cert_file or not key_file): + raise RuntimeError( + _("When running server in SSL mode, you must " + "specify both a cert_file and key_file " + "option value in your configuration file")) + ssl_kwargs = { + 'server_side': True, + 'certfile': cert_file, + 'keyfile': key_file, + 'cert_reqs': ssl.CERT_NONE, + } + + if CONF.ssl_ca_file: + ssl_kwargs['ca_certs'] = ca_file + ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED + + self._socket = eventlet.wrap_ssl(self._socket, + **ssl_kwargs) + + self._socket.setsockopt(socket.SOL_SOCKET, + socket.SO_REUSEADDR, 1) + # sockets can hang around forever without keepalive + self._socket.setsockopt(socket.SOL_SOCKET, + socket.SO_KEEPALIVE, 1) + + # This option isn't available in the OS X version of eventlet + if hasattr(socket, 'TCP_KEEPIDLE'): + self._socket.setsockopt(socket.IPPROTO_TCP, + socket.TCP_KEEPIDLE, + CONF.tcp_keepidle) + + except Exception: + LOG.error(_("Failed to start %(name)s on %(host)s" + ":%(port)s with SSL support") % self.__dict__) + raise + self._server = eventlet.spawn(eventlet.wsgi.server, self._socket, self.app, diff --git a/run_tests.sh b/run_tests.sh index 8a7bce63b..11bc8b518 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -83,18 +83,29 @@ function run_tests { if [ "x$testrargs" = "x" ]; then testrargs="^(?!.*test.*coverage).*$" fi - export PYTHON="${wrapper} coverage run --source nova --parallel-mode" + TESTRTESTS="$TESTRTESTS --coverage" + else + TESTRTESTS="$TESTRTESTS --slowest" fi + # Just run the test suites in current environment set +e - TESTRTESTS="$TESTRTESTS $testrargs" + testrargs=`echo "$testrargs" | sed -e's/^\s*\(.*\)\s*$/\1/'` + TESTRTESTS="$TESTRTESTS --testr-args='$testrargs'" echo "Running \`${wrapper} $TESTRTESTS\`" - ${wrapper} $TESTRTESTS + bash -c "${wrapper} $TESTRTESTS" RESULT=$? set -e copy_subunit_log + if [ $coverage -eq 1 ]; then + echo "Generating coverage report in covhtml/" + # Don't compute coverage for common code, which is tested elsewhere + ${wrapper} coverage combine + ${wrapper} coverage html --include='nova/*' --omit='nova/openstack/common/*' -d covhtml -i + fi + return $RESULT } @@ -114,7 +125,7 @@ function run_pep8 { # NOTE(lzyeval): Avoid selecting *.pyc files to reduce pep8 check-up time # when running on devstack. srcfiles=`find nova -type f -name "*.py" ! -wholename "nova\/openstack*"` - srcfiles+=" `find bin -type f ! -name "nova.conf*" ! -name "*api-paste.ini*"`" + srcfiles+=" `find bin -type f ! -name "nova.conf*" ! -name "*api-paste.ini*" ! -name "*~"`" srcfiles+=" `find tools -type f -name "*.py"`" srcfiles+=" `find plugins -type f -name "*.py"`" srcfiles+=" `find smoketests -type f -name "*.py"`" @@ -143,7 +154,7 @@ function run_pep8 { } -TESTRTESTS="testr run --parallel $testropts" +TESTRTESTS="python setup.py testr $testropts" if [ $never_venv -eq 0 ] then @@ -197,10 +208,3 @@ if [ -z "$testrargs" ]; then run_pep8 fi fi - -if [ $coverage -eq 1 ]; then - echo "Generating coverage report in covhtml/" - # Don't compute coverage for common code, which is tested elsewhere - ${wrapper} coverage combine - ${wrapper} coverage html --include='nova/*' --omit='nova/openstack/common/*' -d covhtml -i -fi diff --git a/tools/conf/extract_opts.py b/tools/conf/extract_opts.py index ccfcf3e4f..4dde53335 100644 --- a/tools/conf/extract_opts.py +++ b/tools/conf/extract_opts.py @@ -74,10 +74,6 @@ def main(srcfiles): # The options list is a list of (module, options) tuples opts_by_group = {'DEFAULT': []} - opts_by_group['DEFAULT'].append( - (cfg.__name__ + ':' + cfg.CommonConfigOpts.__name__, - _list_opts(cfg.CommonConfigOpts)[0][1])) - for pkg_name in pkg_names: mods = mods_by_pkg.get(pkg_name) mods.sort() diff --git a/tools/lintstack.sh b/tools/lintstack.sh index 42c6a60b3..d8591d03d 100755 --- a/tools/lintstack.sh +++ b/tools/lintstack.sh @@ -20,7 +20,16 @@ # commit for review. set -e TOOLS_DIR=$(cd $(dirname "$0") && pwd) -GITHEAD=`git rev-parse HEAD` +# Get the current branch name. +GITHEAD=`git rev-parse --abbrev-ref HEAD` +if [[ "$GITHEAD" == "HEAD" ]]; then + # In detached head mode, get revision number instead + GITHEAD=`git rev-parse HEAD` + echo "Currently we are at commit $GITHEAD" +else + echo "Currently we are at branch $GITHEAD" +fi + cp -f $TOOLS_DIR/lintstack.py $TOOLS_DIR/lintstack.head.py if git rev-parse HEAD^2 2>/dev/null; then @@ -47,8 +56,4 @@ git checkout $GITHEAD $TOOLS_DIR/lintstack.head.py echo "Check passed. FYI: the pylint exceptions are:" cat $TOOLS_DIR/pylint_exceptions -echo -echo "You are in detached HEAD mode. If you are a developer" -echo "and not very familiar with git, you might want to do" -echo "'git checkout branch-name' to go back to your branch." diff --git a/tools/xenserver/cleanup_sm_locks.py b/tools/xenserver/cleanup_sm_locks.py new file mode 100755 index 000000000..de455b076 --- /dev/null +++ b/tools/xenserver/cleanup_sm_locks.py @@ -0,0 +1,123 @@ +#!/usr/bin/env python + +# Copyright 2013 OpenStack, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Script to cleanup old XenServer /var/lock/sm locks. + +XenServer 5.6 and 6.0 do not appear to always cleanup locks when using a +FileSR. ext3 has a limit of 32K inode links, so when we have 32K-2 (31998) +locks laying around, builds will begin to fail because we can't create any +additional locks. This cleanup script is something we can run periodically as +a stop-gap measure until this is fixed upstream. + +This script should be run on the dom0 of the affected machine. +""" +import errno +import optparse +import os +import sys +import time + +BASE = '/var/lock/sm' + + +def _get_age_days(secs): + return float(time.time() - secs) / 86400 + + +def _parse_args(): + parser = optparse.OptionParser() + parser.add_option("-d", "--dry-run", + action="store_true", dest="dry_run", default=False, + help="don't actually remove locks") + parser.add_option("-l", "--limit", + action="store", type='int', dest="limit", + default=sys.maxint, + help="max number of locks to delete (default: no limit)") + parser.add_option("-v", "--verbose", + action="store_true", dest="verbose", default=False, + help="don't print status messages to stdout") + + options, args = parser.parse_args() + + try: + days_old = int(args[0]) + except (IndexError, ValueError): + parser.print_help() + sys.exit(1) + + return options, days_old + + +def main(): + options, days_old = _parse_args() + + if not os.path.exists(BASE): + print >> sys.stderr, "error: '%s' doesn't exist. Make sure you're"\ + " running this on the dom0." % BASE + sys.exit(1) + + lockpaths_removed = 0 + nspaths_removed = 0 + + for nsname in os.listdir(BASE)[:options.limit]: + nspath = os.path.join(BASE, nsname) + + if not os.path.isdir(nspath): + continue + + # Remove old lockfiles + removed = 0 + locknames = os.listdir(nspath) + for lockname in locknames: + lockpath = os.path.join(nspath, lockname) + lock_age_days = _get_age_days(os.path.getmtime(lockpath)) + if lock_age_days > days_old: + lockpaths_removed += 1 + removed += 1 + + if options.verbose: + print 'Removing old lock: %03d %s' % (lock_age_days, + lockpath) + + if not options.dry_run: + os.unlink(lockpath) + + # Remove empty namespace paths + if len(locknames) == removed: + nspaths_removed += 1 + + if options.verbose: + print 'Removing empty namespace: %s' % nspath + + if not options.dry_run: + try: + os.rmdir(nspath) + except OSError, e: + if e.errno == errno.ENOTEMPTY: + print >> sys.stderr, "warning: directory '%s'"\ + " not empty" % nspath + else: + raise + + if options.dry_run: + print "** Dry Run **" + + print "Total locks removed: ", lockpaths_removed + print "Total namespaces removed: ", nspaths_removed + + +if __name__ == '__main__': + main() diff --git a/tools/xenserver/vm_vdi_cleaner.py b/tools/xenserver/vm_vdi_cleaner.py index eeaf978b8..27b89d510 100755 --- a/tools/xenserver/vm_vdi_cleaner.py +++ b/tools/xenserver/vm_vdi_cleaner.py @@ -42,6 +42,7 @@ cleaner_opts = [ ] CONF = cfg.CONF CONF.register_opts(cleaner_opts) +CONF.import_opt('verbose', 'nova.openstack.common.log') CONF.import_opt("resize_confirm_window", "nova.compute.manager") |