summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--HACKING.rst1
-rwxr-xr-xbin/nova-all2
-rwxr-xr-xbin/nova-api2
-rwxr-xr-xbin/nova-baremetal-deploy-helper32
-rwxr-xr-xbin/nova-baremetal-manage2
-rwxr-xr-xbin/nova-cells3
-rwxr-xr-xbin/nova-cert3
-rwxr-xr-xbin/nova-clear-rabbit-queues3
-rwxr-xr-xbin/nova-compute3
-rwxr-xr-xbin/nova-conductor3
-rwxr-xr-xbin/nova-console3
-rwxr-xr-xbin/nova-consoleauth3
-rwxr-xr-xbin/nova-dhcpbridge3
-rwxr-xr-xbin/nova-manage28
-rwxr-xr-xbin/nova-network3
-rwxr-xr-xbin/nova-novncproxy3
-rwxr-xr-xbin/nova-rpc-zmq-receiver32
-rwxr-xr-xbin/nova-scheduler3
-rwxr-xr-xbin/nova-spicehtml5proxy3
-rw-r--r--doc/README.rst12
-rw-r--r--doc/api_samples/OS-EXT-IMG-SIZE/image-get-resp.json34
-rw-r--r--doc/api_samples/OS-EXT-IMG-SIZE/image-get-resp.xml12
-rw-r--r--doc/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.json219
-rw-r--r--doc/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.xml71
-rw-r--r--doc/api_samples/all_extensions/extensions-get-resp.json30
-rw-r--r--doc/api_samples/all_extensions/extensions-get-resp.xml15
-rw-r--r--doc/api_samples/os-attach-interfaces/attach-interfaces-create-req.json5
-rw-r--r--doc/api_samples/os-attach-interfaces/attach-interfaces-create-req.xml4
-rw-r--r--doc/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json12
-rw-r--r--doc/api_samples/os-attach-interfaces/attach-interfaces-create-resp.xml13
-rw-r--r--doc/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json16
-rw-r--r--doc/api_samples/os-attach-interfaces/attach-interfaces-list-resp.xml15
-rw-r--r--doc/api_samples/os-attach-interfaces/attach-interfaces-list.xml17
-rw-r--r--doc/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json14
-rw-r--r--doc/api_samples/os-attach-interfaces/attach-interfaces-show-resp.xml13
-rw-r--r--doc/api_samples/os-attach-interfaces/server-post-req.json16
-rw-r--r--doc/api_samples/os-attach-interfaces/server-post-req.xml19
-rw-r--r--doc/api_samples/os-attach-interfaces/server-post-resp.json16
-rw-r--r--doc/api_samples/os-attach-interfaces/server-post-resp.xml6
-rw-r--r--doc/api_samples/os-hypervisors/hypervisors-detail-resp.json26
-rw-r--r--doc/api_samples/os-hypervisors/hypervisors-detail-resp.xml6
-rw-r--r--doc/api_samples/os-hypervisors/hypervisors-list-resp.json8
-rw-r--r--doc/api_samples/os-hypervisors/hypervisors-list-resp.xml4
-rw-r--r--doc/api_samples/os-hypervisors/hypervisors-search-resp.json8
-rw-r--r--doc/api_samples/os-hypervisors/hypervisors-search-resp.xml4
-rw-r--r--doc/api_samples/os-hypervisors/hypervisors-servers-resp.json8
-rw-r--r--doc/api_samples/os-hypervisors/hypervisors-servers-resp.xml6
-rw-r--r--doc/api_samples/os-hypervisors/hypervisors-show-resp.json24
-rw-r--r--doc/api_samples/os-hypervisors/hypervisors-show-resp.xml4
-rw-r--r--doc/api_samples/os-hypervisors/hypervisors-statistics-resp.json16
-rw-r--r--doc/api_samples/os-hypervisors/hypervisors-statistics-resp.xml2
-rw-r--r--doc/api_samples/os-hypervisors/hypervisors-uptime-resp.json7
-rw-r--r--doc/api_samples/os-hypervisors/hypervisors-uptime-resp.xml2
-rw-r--r--doc/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json8
-rw-r--r--doc/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.xml7
-rw-r--r--doc/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json11
-rw-r--r--doc/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.xml9
-rw-r--r--doc/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json13
-rw-r--r--doc/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.xml11
-rw-r--r--doc/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json11
-rw-r--r--doc/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.xml9
-rw-r--r--doc/source/devref/cloudpipe.rst166
-rw-r--r--doc/source/devref/development.environment.rst37
-rw-r--r--doc/source/devref/down.sh7
-rw-r--r--doc/source/devref/filter_scheduler.rst2
-rw-r--r--doc/source/devref/index.rst2
-rw-r--r--doc/source/devref/interfaces17
-rw-r--r--doc/source/devref/multinic.rst39
-rw-r--r--doc/source/devref/network.rst79
-rw-r--r--doc/source/devref/server.conf.template34
-rw-r--r--doc/source/devref/up.sh7
-rw-r--r--doc/source/image_src/multinic_1.odgbin12363 -> 0 bytes
-rw-r--r--doc/source/image_src/multinic_2.odgbin13425 -> 0 bytes
-rw-r--r--doc/source/image_src/multinic_3.odgbin13598 -> 0 bytes
-rw-r--r--doc/source/images/multinic_dhcp.pngbin54531 -> 0 bytes
-rw-r--r--doc/source/images/multinic_flat.pngbin40871 -> 0 bytes
-rw-r--r--doc/source/images/multinic_vlan.pngbin58552 -> 0 bytes
-rw-r--r--etc/nova/nova.conf.sample3
-rw-r--r--etc/nova/policy.json3
-rw-r--r--etc/nova/rootwrap.d/compute.filters10
-rw-r--r--nova/api/auth.py2
-rw-r--r--nova/api/ec2/__init__.py2
-rw-r--r--nova/api/ec2/cloud.py21
-rw-r--r--nova/api/ec2/faults.py2
-rw-r--r--nova/api/metadata/base.py3
-rw-r--r--nova/api/metadata/handler.py2
-rw-r--r--nova/api/openstack/auth.py2
-rw-r--r--nova/api/openstack/common.py12
-rw-r--r--nova/api/openstack/compute/__init__.py4
-rw-r--r--nova/api/openstack/compute/contrib/__init__.py3
-rw-r--r--nova/api/openstack/compute/contrib/admin_actions.py8
-rw-r--r--nova/api/openstack/compute/contrib/attach_interfaces.py192
-rw-r--r--nova/api/openstack/compute/contrib/availability_zone.py4
-rw-r--r--nova/api/openstack/compute/contrib/cells.py8
-rw-r--r--nova/api/openstack/compute/contrib/cloudpipe.py3
-rw-r--r--nova/api/openstack/compute/contrib/floating_ips_bulk.py2
-rw-r--r--nova/api/openstack/compute/contrib/fping.py3
-rw-r--r--nova/api/openstack/compute/contrib/hide_server_addresses.py2
-rw-r--r--nova/api/openstack/compute/contrib/hosts.py4
-rw-r--r--nova/api/openstack/compute/contrib/image_size.py88
-rw-r--r--nova/api/openstack/compute/contrib/instance_actions.py6
-rw-r--r--nova/api/openstack/compute/contrib/instance_usage_audit_log.py2
-rw-r--r--nova/api/openstack/compute/contrib/os_networks.py2
-rw-r--r--nova/api/openstack/compute/contrib/os_tenant_networks.py10
-rw-r--r--nova/api/openstack/compute/contrib/quota_classes.py3
-rw-r--r--nova/api/openstack/compute/contrib/quotas.py4
-rw-r--r--nova/api/openstack/compute/contrib/rescue.py2
-rw-r--r--nova/api/openstack/compute/contrib/security_group_default_rules.py216
-rw-r--r--nova/api/openstack/compute/contrib/security_groups.py135
-rw-r--r--nova/api/openstack/compute/contrib/services.py4
-rw-r--r--nova/api/openstack/compute/contrib/volumes.py3
-rw-r--r--nova/api/openstack/compute/extensions.py3
-rw-r--r--nova/api/openstack/compute/images.py2
-rw-r--r--nova/api/openstack/compute/servers.py9
-rw-r--r--nova/api/openstack/wsgi.py13
-rw-r--r--nova/api/sizelimit.py2
-rw-r--r--nova/api/validator.py4
-rw-r--r--nova/availability_zones.py3
-rw-r--r--nova/cells/manager.py3
-rw-r--r--nova/cells/messaging.py2
-rw-r--r--nova/cells/opts.py2
-rw-r--r--nova/cells/rpc_driver.py3
-rw-r--r--nova/cells/rpcapi.py3
-rw-r--r--nova/cells/scheduler.py10
-rw-r--r--nova/cells/state.py3
-rw-r--r--nova/cert/rpcapi.py3
-rw-r--r--nova/cloudpipe/pipelib.py3
-rw-r--r--nova/common/memorycache.py3
-rw-r--r--nova/compute/__init__.py17
-rw-r--r--nova/compute/api.py412
-rw-r--r--nova/compute/instance_types.py3
-rwxr-xr-xnova/compute/manager.py105
-rw-r--r--nova/compute/resource_tracker.py3
-rw-r--r--nova/compute/rpcapi.py20
-rw-r--r--nova/compute/utils.py3
-rw-r--r--nova/conductor/__init__.py6
-rw-r--r--nova/conductor/api.py3
-rw-r--r--nova/conductor/manager.py8
-rw-r--r--nova/conductor/rpcapi.py3
-rw-r--r--nova/config.py3
-rw-r--r--nova/console/api.py3
-rw-r--r--nova/console/manager.py3
-rw-r--r--nova/console/rpcapi.py3
-rw-r--r--nova/console/vmrc.py3
-rw-r--r--nova/console/vmrc_manager.py3
-rw-r--r--nova/console/xvp.py2
-rw-r--r--nova/consoleauth/__init__.py2
-rw-r--r--nova/consoleauth/manager.py3
-rw-r--r--nova/consoleauth/rpcapi.py3
-rw-r--r--nova/context.py53
-rw-r--r--nova/crypto.py2
-rw-r--r--nova/db/api.py56
-rw-r--r--nova/db/base.py3
-rw-r--r--nova/db/sqlalchemy/api.py348
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/149_inet_datatype_for_postgres.py2
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/155_add_task_log_uc.py40
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/156_cidr_column_length.py56
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/157_add_security_group_default_rules.py61
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/158_add_networks_uc.py40
-rw-r--r--nova/db/sqlalchemy/models.py20
-rw-r--r--nova/db/sqlalchemy/types.py12
-rw-r--r--nova/db/sqlalchemy/utils.py59
-rw-r--r--nova/exception.py44
-rw-r--r--nova/image/glance.py2
-rw-r--r--nova/image/s3.py2
-rw-r--r--nova/ipv6/api.py4
-rw-r--r--nova/locale/bs/LC_MESSAGES/nova.po2
-rw-r--r--nova/locale/cs/LC_MESSAGES/nova.po2
-rw-r--r--nova/locale/da/LC_MESSAGES/nova.po2
-rw-r--r--nova/locale/de/LC_MESSAGES/nova.po2
-rw-r--r--nova/locale/en_AU/LC_MESSAGES/nova.po4
-rw-r--r--nova/locale/en_GB/LC_MESSAGES/nova.po4
-rw-r--r--nova/locale/en_US/LC_MESSAGES/nova.po4
-rw-r--r--nova/locale/es/LC_MESSAGES/nova.po2
-rw-r--r--nova/locale/fr/LC_MESSAGES/nova.po2
-rw-r--r--nova/locale/it/LC_MESSAGES/nova.po2
-rw-r--r--nova/locale/ja/LC_MESSAGES/nova.po2
-rw-r--r--nova/locale/ko/LC_MESSAGES/nova.po2
-rw-r--r--nova/locale/nb/LC_MESSAGES/nova.po2
-rw-r--r--nova/locale/nova.pot1130
-rw-r--r--nova/locale/pt_BR/LC_MESSAGES/nova.po2
-rw-r--r--nova/locale/ru/LC_MESSAGES/nova.po2
-rw-r--r--nova/locale/tl/LC_MESSAGES/nova.po2
-rw-r--r--nova/locale/tr/LC_MESSAGES/nova.po2
-rw-r--r--nova/locale/tr_TR/LC_MESSAGES/nova.po2
-rw-r--r--nova/locale/uk/LC_MESSAGES/nova.po2
-rw-r--r--nova/locale/zh_CN/LC_MESSAGES/nova.po2
-rw-r--r--nova/locale/zh_TW/LC_MESSAGES/nova.po2
-rw-r--r--nova/manager.py24
-rw-r--r--nova/netconf.py2
-rw-r--r--nova/network/__init__.py15
-rw-r--r--nova/network/api.py25
-rw-r--r--nova/network/driver.py3
-rw-r--r--nova/network/floating_ips.py3
-rw-r--r--nova/network/ldapdns.py3
-rw-r--r--nova/network/linux_net.py19
-rw-r--r--nova/network/manager.py50
-rw-r--r--nova/network/minidns.py4
-rw-r--r--nova/network/quantumv2/__init__.py7
-rw-r--r--nova/network/quantumv2/api.py127
-rw-r--r--nova/network/rpcapi.py3
-rw-r--r--nova/network/security_group/__init__.py18
-rw-r--r--nova/network/security_group/openstack_driver.py59
-rw-r--r--nova/network/security_group/quantum_driver.py398
-rw-r--r--nova/network/security_group/security_group_base.py196
-rw-r--r--nova/notifications.py3
-rw-r--r--nova/objectstore/s3server.py2
-rw-r--r--nova/openstack/common/cfg.py1736
-rw-r--r--nova/openstack/common/db/api.py100
-rw-r--r--nova/openstack/common/db/sqlalchemy/session.py37
-rw-r--r--nova/openstack/common/eventlet_backdoor.py3
-rw-r--r--nova/openstack/common/iniparser.py130
-rw-r--r--nova/openstack/common/lockutils.py3
-rw-r--r--nova/openstack/common/log.py3
-rw-r--r--nova/openstack/common/notifier/api.py3
-rw-r--r--nova/openstack/common/notifier/log_notifier.py3
-rw-r--r--nova/openstack/common/notifier/rpc_notifier.py2
-rw-r--r--nova/openstack/common/notifier/rpc_notifier2.py3
-rw-r--r--nova/openstack/common/plugin/pluginmanager.py3
-rw-r--r--nova/openstack/common/rpc/__init__.py6
-rw-r--r--nova/openstack/common/rpc/amqp.py222
-rw-r--r--nova/openstack/common/rpc/common.py33
-rw-r--r--nova/openstack/common/rpc/impl_kombu.py29
-rw-r--r--nova/openstack/common/rpc/impl_qpid.py30
-rw-r--r--nova/openstack/common/rpc/impl_zmq.py2
-rw-r--r--nova/openstack/common/rpc/matchmaker.py3
-rw-r--r--nova/paths.py2
-rw-r--r--nova/policy.py3
-rw-r--r--nova/quota.py4
-rw-r--r--nova/scheduler/chance.py9
-rw-r--r--nova/scheduler/driver.py72
-rw-r--r--nova/scheduler/filter_scheduler.py12
-rw-r--r--nova/scheduler/filters/aggregate_multitenancy_isolation.py47
-rw-r--r--nova/scheduler/filters/availability_zone_filter.py3
-rw-r--r--nova/scheduler/filters/compute_filter.py3
-rw-r--r--nova/scheduler/filters/core_filter.py4
-rw-r--r--nova/scheduler/filters/disk_filter.py3
-rw-r--r--nova/scheduler/filters/io_ops_filter.py3
-rw-r--r--nova/scheduler/filters/isolated_hosts_filter.py3
-rw-r--r--nova/scheduler/filters/num_instances_filter.py3
-rw-r--r--nova/scheduler/filters/ram_filter.py3
-rw-r--r--nova/scheduler/filters/trusted_filter.py4
-rw-r--r--nova/scheduler/host_manager.py3
-rw-r--r--nova/scheduler/manager.py13
-rw-r--r--nova/scheduler/multi.py3
-rw-r--r--nova/scheduler/rpcapi.py10
-rw-r--r--nova/scheduler/scheduler_options.py3
-rw-r--r--nova/scheduler/weights/__init__.py2
-rw-r--r--nova/scheduler/weights/least_cost.py3
-rw-r--r--nova/scheduler/weights/ram.py4
-rw-r--r--nova/service.py2
-rw-r--r--nova/servicegroup/api.py17
-rw-r--r--nova/servicegroup/drivers/db.py3
-rw-r--r--nova/servicegroup/drivers/mc.py109
-rw-r--r--nova/servicegroup/drivers/zk.py4
-rw-r--r--nova/spice/__init__.py2
-rw-r--r--nova/storage/__init__.py15
-rw-r--r--nova/storage/linuxscsi.py139
-rw-r--r--nova/test.py2
-rw-r--r--nova/tests/api/ec2/test_cinder_cloud.py2
-rw-r--r--nova/tests/api/ec2/test_cloud.py4
-rw-r--r--nova/tests/api/ec2/test_ec2_validate.py3
-rw-r--r--nova/tests/api/ec2/test_middleware.py2
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_admin_actions.py2
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py245
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_cloudpipe.py2
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_evacuate.py2
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_floating_ip_bulk.py2
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_fping.py2
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_image_size.py130
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_instance_actions.py19
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_networks.py2
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_quantum_security_groups.py639
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_rescue.py2
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_security_group_default_rules.py467
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_security_groups.py193
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_server_password.py2
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_volumes.py6
-rw-r--r--nova/tests/api/openstack/compute/test_extensions.py4
-rw-r--r--nova/tests/api/openstack/compute/test_image_metadata.py2
-rw-r--r--nova/tests/api/openstack/compute/test_server_actions.py3
-rw-r--r--nova/tests/api/openstack/compute/test_server_metadata.py2
-rw-r--r--nova/tests/api/openstack/compute/test_servers.py11
-rw-r--r--nova/tests/api/openstack/fakes.py6
-rw-r--r--nova/tests/api/test_sizelimit.py2
-rw-r--r--nova/tests/baremetal/db/base.py3
-rw-r--r--nova/tests/baremetal/db/test_bm_node.py58
-rw-r--r--nova/tests/baremetal/db/utils.py1
-rw-r--r--nova/tests/baremetal/test_driver.py264
-rw-r--r--nova/tests/baremetal/test_ipmi.py3
-rw-r--r--nova/tests/baremetal/test_nova_baremetal_deploy_helper.py23
-rw-r--r--nova/tests/baremetal/test_pxe.py124
-rw-r--r--nova/tests/baremetal/test_virtual_power_driver.py6
-rw-r--r--nova/tests/baremetal/test_volume_driver.py4
-rw-r--r--nova/tests/cells/fakes.py3
-rw-r--r--nova/tests/cells/test_cells_manager.py3
-rw-r--r--nova/tests/cells/test_cells_messaging.py5
-rw-r--r--nova/tests/cells/test_cells_rpc_driver.py3
-rw-r--r--nova/tests/cells/test_cells_rpcapi.py3
-rw-r--r--nova/tests/cells/test_cells_scheduler.py6
-rw-r--r--nova/tests/cert/test_rpcapi.py3
-rw-r--r--nova/tests/compute/test_compute.py88
-rw-r--r--nova/tests/compute/test_compute_utils.py3
-rw-r--r--nova/tests/compute/test_multiple_nodes.py3
-rw-r--r--nova/tests/compute/test_resource_tracker.py3
-rw-r--r--nova/tests/compute/test_rpcapi.py3
-rw-r--r--nova/tests/conf_fixture.py2
-rw-r--r--nova/tests/console/test_console.py3
-rw-r--r--nova/tests/console/test_rpcapi.py3
-rw-r--r--nova/tests/consoleauth/test_rpcapi.py3
-rw-r--r--nova/tests/db/fakes.py10
-rw-r--r--nova/tests/fake_imagebackend.py2
-rw-r--r--nova/tests/fake_libvirt_utils.py63
-rw-r--r--nova/tests/fake_network.py3
-rw-r--r--nova/tests/fake_policy.py3
-rw-r--r--nova/tests/fake_volume.py3
-rw-r--r--nova/tests/fakelibvirt.py38
-rw-r--r--nova/tests/hyperv/db_fakes.py13
-rw-r--r--nova/tests/image/fake.py11
-rw-r--r--nova/tests/image/test_fake.py3
-rw-r--r--nova/tests/image/test_glance.py2
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.json.tpl34
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.xml.tpl12
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.json.tpl219
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.xml.tpl71
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl24
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl9
-rw-r--r--nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-req.xml.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tpl12
-rw-r--r--nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-resp.xml.tpl12
-rw-r--r--nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-list-resp.xml.tpl15
-rw-r--r--nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tpl14
-rw-r--r--nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-show-resp.xml.tpl13
-rw-r--r--nova/tests/integrated/api_samples/os-attach-interfaces/server-post-req.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/os-attach-interfaces/server-post-req.xml.tpl19
-rw-r--r--nova/tests/integrated/api_samples/os-attach-interfaces/server-post-resp.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/os-attach-interfaces/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl26
-rw-r--r--nova/tests/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.xml.tpl6
-rw-r--r--nova/tests/integrated/api_samples/os-hypervisors/hypervisors-list-resp.json.tpl8
-rw-r--r--nova/tests/integrated/api_samples/os-hypervisors/hypervisors-list-resp.xml.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-hypervisors/hypervisors-search-resp.json.tpl8
-rw-r--r--nova/tests/integrated/api_samples/os-hypervisors/hypervisors-search-resp.xml.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-hypervisors/hypervisors-servers-resp.json.tpl8
-rw-r--r--nova/tests/integrated/api_samples/os-hypervisors/hypervisors-servers-resp.xml.tpl6
-rw-r--r--nova/tests/integrated/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl24
-rw-r--r--nova/tests/integrated/api_samples/os-hypervisors/hypervisors-show-resp.xml.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-hypervisors/hypervisors-statistics-resp.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/os-hypervisors/hypervisors-statistics-resp.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tpl7
-rw-r--r--nova/tests/integrated/api_samples/os-hypervisors/hypervisors-uptime-resp.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json.tpl8
-rw-r--r--nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.xml.tpl7
-rw-r--r--nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json.tpl11
-rw-r--r--nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.xml.tpl9
-rw-r--r--nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json.tpl13
-rw-r--r--nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.xml.tpl11
-rw-r--r--nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json.tpl11
-rw-r--r--nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.xml.tpl9
-rw-r--r--nova/tests/integrated/integrated_helpers.py5
-rw-r--r--nova/tests/integrated/test_api_samples.py299
-rw-r--r--nova/tests/integrated/test_extensions.py5
-rw-r--r--nova/tests/integrated/test_login.py2
-rw-r--r--nova/tests/integrated/test_multiprocess_api.py15
-rw-r--r--nova/tests/integrated/test_servers.py2
-rw-r--r--nova/tests/integrated/test_xml.py2
-rw-r--r--nova/tests/matchers.py6
-rw-r--r--nova/tests/network/test_linux_net.py2
-rw-r--r--nova/tests/network/test_manager.py26
-rw-r--r--nova/tests/network/test_network_info.py6
-rw-r--r--nova/tests/network/test_quantumv2.py92
-rw-r--r--nova/tests/network/test_rpcapi.py3
-rw-r--r--nova/tests/policy_fixture.py2
-rw-r--r--nova/tests/scheduler/test_chance_scheduler.py30
-rw-r--r--nova/tests/scheduler/test_filter_scheduler.py40
-rw-r--r--nova/tests/scheduler/test_host_filters.py44
-rw-r--r--nova/tests/scheduler/test_least_cost.py5
-rw-r--r--nova/tests/scheduler/test_rpcapi.py9
-rw-r--r--nova/tests/scheduler/test_scheduler.py123
-rw-r--r--nova/tests/servicegroup/test_mc_servicegroup.py220
-rw-r--r--nova/tests/servicegroup/test_zk_driver.py2
-rw-r--r--nova/tests/test_availability_zones.py3
-rw-r--r--nova/tests/test_baremetal_migrations.conf9
-rw-r--r--nova/tests/test_db_api.py19
-rw-r--r--nova/tests/test_hypervapi.py3
-rw-r--r--nova/tests/test_imagebackend.py77
-rw-r--r--nova/tests/test_imagecache.py28
-rw-r--r--nova/tests/test_libvirt.py314
-rw-r--r--nova/tests/test_libvirt_blockinfo.py15
-rw-r--r--nova/tests/test_libvirt_config.py56
-rw-r--r--nova/tests/test_libvirt_vif.py2
-rw-r--r--nova/tests/test_libvirt_volume.py170
-rw-r--r--nova/tests/test_metadata.py2
-rw-r--r--nova/tests/test_migration_utils.py103
-rw-r--r--nova/tests/test_migrations.py434
-rw-r--r--nova/tests/test_notifications.py3
-rw-r--r--nova/tests/test_objectstore.py2
-rw-r--r--nova/tests/test_pipelib.py3
-rw-r--r--nova/tests/test_quota.py3
-rw-r--r--nova/tests/test_service.py6
-rw-r--r--nova/tests/test_test.py3
-rw-r--r--nova/tests/test_utils.py58
-rw-r--r--nova/tests/test_versions.py3
-rw-r--r--nova/tests/test_virt_drivers.py72
-rw-r--r--nova/tests/test_xenapi.py3
-rw-r--r--nova/tests/utils.py9
-rw-r--r--nova/tests/vmwareapi/db_fakes.py13
-rw-r--r--nova/utils.py71
-rw-r--r--nova/version.py3
-rw-r--r--nova/virt/baremetal/baremetal_states.py5
-rw-r--r--nova/virt/baremetal/db/api.py36
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/api.py98
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/002_drop_bm_deployments.py73
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/003_add_uuid_to_bm_nodes.py40
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/004_add_instance_name_to_bm_nodes.py37
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/models.py17
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/session.py3
-rwxr-xr-xnova/virt/baremetal/driver.py250
-rw-r--r--nova/virt/baremetal/ipmi.py3
-rw-r--r--nova/virt/baremetal/pxe.py83
-rw-r--r--nova/virt/baremetal/vif_driver.py3
-rw-r--r--nova/virt/baremetal/virtual_power_driver.py2
-rw-r--r--nova/virt/baremetal/volume_driver.py3
-rw-r--r--nova/virt/configdrive.py3
-rw-r--r--nova/virt/disk/api.py3
-rw-r--r--nova/virt/disk/mount/api.py4
-rw-r--r--nova/virt/disk/mount/nbd.py3
-rw-r--r--nova/virt/disk/vfs/localfs.py2
-rwxr-xr-xnova/virt/driver.py50
-rw-r--r--nova/virt/event.py85
-rwxr-xr-xnova/virt/fake.py14
-rw-r--r--nova/virt/firewall.py9
-rw-r--r--nova/virt/hyperv/basevolumeutils.py3
-rw-r--r--nova/virt/hyperv/hostops.py3
-rw-r--r--nova/virt/hyperv/livemigrationops.py3
-rw-r--r--nova/virt/hyperv/pathutils.py3
-rw-r--r--nova/virt/hyperv/snapshotops.py3
-rw-r--r--nova/virt/hyperv/vif.py4
-rw-r--r--nova/virt/hyperv/vmops.py3
-rw-r--r--nova/virt/hyperv/vmutils.py3
-rw-r--r--nova/virt/hyperv/volumeops.py3
-rw-r--r--nova/virt/hyperv/volumeutils.py2
-rw-r--r--nova/virt/hyperv/volumeutilsv2.py3
-rwxr-xr-xnova/virt/images.py6
-rw-r--r--nova/virt/libvirt/blockinfo.py21
-rw-r--r--nova/virt/libvirt/config.py76
-rw-r--r--nova/virt/libvirt/designer.py38
-rwxr-xr-xnova/virt/libvirt/driver.py369
-rw-r--r--nova/virt/libvirt/firewall.py50
-rwxr-xr-xnova/virt/libvirt/imagebackend.py54
-rw-r--r--nova/virt/libvirt/imagecache.py11
-rwxr-xr-xnova/virt/libvirt/utils.py144
-rw-r--r--nova/virt/libvirt/vif.py22
-rw-r--r--nova/virt/libvirt/volume.py465
-rw-r--r--nova/virt/netutils.py2
-rw-r--r--nova/virt/powervm/blockdev.py78
-rwxr-xr-xnova/virt/powervm/driver.py4
-rw-r--r--nova/virt/powervm/operator.py125
-rwxr-xr-xnova/virt/vmwareapi/driver.py2
-rw-r--r--nova/virt/vmwareapi/network_util.py2
-rw-r--r--nova/virt/vmwareapi/vif.py3
-rw-r--r--nova/virt/vmwareapi/vim.py3
-rw-r--r--nova/virt/vmwareapi/vmops.py3
-rw-r--r--nova/virt/vmwareapi/volumeops.py4
-rw-r--r--nova/virt/xenapi/agent.py3
-rwxr-xr-xnova/virt/xenapi/driver.py2
-rw-r--r--nova/virt/xenapi/host.py3
-rw-r--r--nova/virt/xenapi/imageupload/glance.py3
-rw-r--r--nova/virt/xenapi/pool.py3
-rw-r--r--nova/virt/xenapi/vif.py3
-rw-r--r--nova/virt/xenapi/vm_utils.py2
-rw-r--r--nova/virt/xenapi/vmops.py2
-rw-r--r--nova/virt/xenapi/volume_utils.py4
-rw-r--r--nova/vnc/__init__.py2
-rw-r--r--nova/vnc/xvp_proxy.py2
-rw-r--r--nova/volume/__init__.py9
-rw-r--r--nova/volume/cinder.py2
-rw-r--r--nova/wsgi.py2
-rw-r--r--openstack-common.conf2
-rw-r--r--setup.py2
-rwxr-xr-xtools/conf/analyze_opts.py2
-rw-r--r--tools/conf/extract_opts.py3
-rwxr-xr-xtools/hacking.py45
-rw-r--r--tools/install_venv_common.py31
-rw-r--r--tools/pip-requires1
-rwxr-xr-xtools/run_pep8.sh6
-rw-r--r--tools/test-requires1
-rw-r--r--tools/xenserver/destroy_cached_images.py3
-rwxr-xr-xtools/xenserver/vm_vdi_cleaner.py19
-rw-r--r--tox.ini3
492 files changed, 11921 insertions, 4804 deletions
diff --git a/HACKING.rst b/HACKING.rst
index 30f87576f..223f3ed7b 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -63,7 +63,6 @@ Imports
- imports from ``migrate`` package
- imports from ``sqlalchemy`` package
- imports from ``nova.db.sqlalchemy.session`` module
-- imports from ``nova.openstack.common.log.logging`` package
- imports from ``nova.db.sqlalchemy.migration.versioning_api`` package
Example::
diff --git a/bin/nova-all b/bin/nova-all
index 2553f6487..5fb1748e8 100755
--- a/bin/nova-all
+++ b/bin/nova-all
@@ -33,6 +33,7 @@ eventlet.monkey_patch(os=False)
import os
import sys
+from oslo.config import cfg
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(
sys.argv[0]), os.pardir, os.pardir))
@@ -42,7 +43,6 @@ if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
from nova import config
from nova.objectstore import s3server
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import service
from nova import utils
diff --git a/bin/nova-api b/bin/nova-api
index d957f3e58..d21d955cf 100755
--- a/bin/nova-api
+++ b/bin/nova-api
@@ -29,6 +29,7 @@ eventlet.monkey_patch(os=False)
import os
import sys
+from oslo.config import cfg
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(
sys.argv[0]), os.pardir, os.pardir))
@@ -37,7 +38,6 @@ if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
from nova import config
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import service
from nova import utils
diff --git a/bin/nova-baremetal-deploy-helper b/bin/nova-baremetal-deploy-helper
index 894a42003..0d2d21984 100755
--- a/bin/nova-baremetal-deploy-helper
+++ b/bin/nova-baremetal-deploy-helper
@@ -47,6 +47,7 @@ from nova import config
from nova import context as nova_context
from nova.openstack.common import log as logging
from nova import utils
+from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import db
@@ -234,22 +235,27 @@ class Worker(threading.Thread):
while not self.stop:
try:
# Set timeout to check self.stop periodically
- (deployment_id, params) = QUEUE.get(block=True,
+ (node_id, params) = QUEUE.get(block=True,
timeout=self.queue_timeout)
except Queue.Empty:
pass
else:
# Requests comes here from BareMetalDeploy.post()
- LOG.info("start deployment: %s, %s", deployment_id, params)
+ LOG.info(_('start deployment for node %(node_id)s, '
+ 'params %(params)s') % locals())
+ context = nova_context.get_admin_context()
try:
+ db.bm_node_update(context, node_id,
+ {'task_state': baremetal_states.DEPLOYING})
deploy(**params)
except Exception:
- LOG.exception('deployment %s failed' % deployment_id)
+ LOG.error(_('deployment to node %s failed') % node_id)
+ db.bm_node_update(context, node_id,
+ {'task_state': baremetal_states.DEPLOYFAIL})
else:
- LOG.info("deployment %s done", deployment_id)
- finally:
- context = nova_context.get_admin_context()
- db.bm_deployment_destroy(context, deployment_id)
+ LOG.info(_('deployment to node %s done') % node_id)
+ db.bm_node_update(context, node_id,
+ {'task_state': baremetal_states.DEPLOYDONE})
class BareMetalDeploy(object):
@@ -276,8 +282,8 @@ class BareMetalDeploy(object):
x = inpt.read(length)
q = dict(cgi.parse_qsl(x))
try:
- deployment_id = q['i']
- deployment_key = q['k']
+ node_id = q['i']
+ deploy_key = q['k']
address = q['a']
port = q.get('p', '3260')
iqn = q['n']
@@ -287,9 +293,9 @@ class BareMetalDeploy(object):
return "parameter '%s' is not defined" % e
context = nova_context.get_admin_context()
- d = db.bm_deployment_get(context, deployment_id)
+ d = db.bm_node_get(context, node_id)
- if d['key'] != deployment_key:
+ if d['deploy_key'] != deploy_key:
start_response('400 Bad Request', [('Content-type', 'text/plain')])
return 'key is not match'
@@ -306,8 +312,8 @@ class BareMetalDeploy(object):
if not self.worker.isAlive():
self.worker = Worker()
self.worker.start()
- LOG.info("request is queued: %s, %s", deployment_id, params)
- QUEUE.put((deployment_id, params))
+ LOG.info("request is queued: node %s, params %s", node_id, params)
+ QUEUE.put((node_id, params))
# Requests go to Worker.run()
start_response('200 OK', [('Content-type', 'text/plain')])
return ''
diff --git a/bin/nova-baremetal-manage b/bin/nova-baremetal-manage
index 35fb83f72..42200e5e1 100755
--- a/bin/nova-baremetal-manage
+++ b/bin/nova-baremetal-manage
@@ -58,6 +58,7 @@ import gettext
import os
import sys
+from oslo.config import cfg
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
@@ -70,7 +71,6 @@ if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
from nova import config
-from nova.openstack.common import cfg
from nova.openstack.common import cliutils
from nova.openstack.common import log as logging
from nova import version
diff --git a/bin/nova-cells b/bin/nova-cells
index a7e16ef53..bb955e9ec 100755
--- a/bin/nova-cells
+++ b/bin/nova-cells
@@ -24,6 +24,8 @@ eventlet.monkey_patch()
import os
import sys
+from oslo.config import cfg
+
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
@@ -33,7 +35,6 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir)
from nova import config
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import service
from nova import utils
diff --git a/bin/nova-cert b/bin/nova-cert
index 113b06510..d309cc028 100755
--- a/bin/nova-cert
+++ b/bin/nova-cert
@@ -23,6 +23,8 @@ eventlet.monkey_patch()
import os
import sys
+from oslo.config import cfg
+
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
@@ -33,7 +35,6 @@ if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
from nova import config
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import service
from nova import utils
diff --git a/bin/nova-clear-rabbit-queues b/bin/nova-clear-rabbit-queues
index e31758769..3e8b8e56d 100755
--- a/bin/nova-clear-rabbit-queues
+++ b/bin/nova-clear-rabbit-queues
@@ -27,6 +27,8 @@ import gettext
import os
import sys
+from oslo.config import cfg
+
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
@@ -39,7 +41,6 @@ gettext.install('nova', unicode=1)
from nova import config
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
diff --git a/bin/nova-compute b/bin/nova-compute
index d99a21051..26d81d8cc 100755
--- a/bin/nova-compute
+++ b/bin/nova-compute
@@ -33,6 +33,8 @@ import os
import sys
import traceback
+from oslo.config import cfg
+
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
@@ -45,7 +47,6 @@ if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
from nova import config
import nova.db.api
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import service
from nova import utils
diff --git a/bin/nova-conductor b/bin/nova-conductor
index 5d9e355dd..30d426f42 100755
--- a/bin/nova-conductor
+++ b/bin/nova-conductor
@@ -23,6 +23,8 @@ eventlet.monkey_patch()
import os
import sys
+from oslo.config import cfg
+
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
@@ -33,7 +35,6 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
from nova import config
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import service
from nova import utils
diff --git a/bin/nova-console b/bin/nova-console
index 011855a19..c567f28b1 100755
--- a/bin/nova-console
+++ b/bin/nova-console
@@ -24,6 +24,8 @@ eventlet.monkey_patch()
import os
import sys
+from oslo.config import cfg
+
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
@@ -34,7 +36,6 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
from nova import config
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import service
diff --git a/bin/nova-consoleauth b/bin/nova-consoleauth
index 9a6016326..776ee9b35 100755
--- a/bin/nova-consoleauth
+++ b/bin/nova-consoleauth
@@ -24,6 +24,8 @@ eventlet.monkey_patch()
import os
import sys
+from oslo.config import cfg
+
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
@@ -33,7 +35,6 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
from nova import config
from nova.consoleauth import manager
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import service
diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge
index 33df97b55..c00578821 100755
--- a/bin/nova-dhcpbridge
+++ b/bin/nova-dhcpbridge
@@ -25,6 +25,8 @@ import gettext
import os
import sys
+from oslo.config import cfg
+
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
@@ -39,7 +41,6 @@ from nova import config
from nova import context
from nova import db
from nova.network import rpcapi as network_rpcapi
-from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
diff --git a/bin/nova-manage b/bin/nova-manage
index 22549a50d..c4e9841ce 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -59,6 +59,8 @@ import netaddr
import os
import sys
+from oslo.config import cfg
+
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
@@ -77,7 +79,6 @@ from nova import context
from nova import db
from nova.db import migration
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import cliutils
from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.openstack.common import importutils
@@ -560,14 +561,31 @@ class NetworkCommands(object):
#1) Associate (set not None value given by project/host parameter)
#2) Disassociate (set None by disassociate parameter)
#3) Keep unchanged (project/host key is not added to 'net')
+ if dis_project:
+ net['project_id'] = None
+ if dis_host:
+ net['host'] = None
+
+ # The --disassociate-X are boolean options, but if they user
+ # mistakenly provides a value, it will be used as a positional argument
+ # and be erroneously interepreted as some other parameter (e.g.
+ # a project instead of host value). The safest thing to do is error-out
+ # with a message indicating that there is probably a problem with
+ # how the disassociate modifications are being used.
+ if dis_project or dis_host:
+ if project or host:
+ error_msg = "ERROR: Unexpected arguments provided. Please " \
+ "use separate commands."
+ print(error_msg)
+ sys.exit(1)
+ db.network_update(admin_context, network['id'], net)
+ return
+
if project:
net['project_id'] = project
- elif dis_project:
- net['project_id'] = None
if host:
net['host'] = host
- elif dis_host:
- net['host'] = None
+
db.network_update(admin_context, network['id'], net)
diff --git a/bin/nova-network b/bin/nova-network
index 03472371c..d0ee61ed9 100755
--- a/bin/nova-network
+++ b/bin/nova-network
@@ -25,6 +25,8 @@ eventlet.monkey_patch()
import os
import sys
+from oslo.config import cfg
+
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
@@ -35,7 +37,6 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
from nova import config
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import service
from nova import utils
diff --git a/bin/nova-novncproxy b/bin/nova-novncproxy
index 617e2411d..45f272ada 100755
--- a/bin/nova-novncproxy
+++ b/bin/nova-novncproxy
@@ -24,9 +24,10 @@ noVNC consoles. Leverages websockify.py by Joel Martin
import os
import sys
+from oslo.config import cfg
+
from nova import config
from nova.console import websocketproxy
-from nova.openstack.common import cfg
opts = [
diff --git a/bin/nova-rpc-zmq-receiver b/bin/nova-rpc-zmq-receiver
index f89b0b3ba..90b652b91 100755
--- a/bin/nova-rpc-zmq-receiver
+++ b/bin/nova-rpc-zmq-receiver
@@ -22,8 +22,6 @@ import contextlib
import os
import sys
-import zmq
-
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
@@ -32,46 +30,22 @@ POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
sys.path.insert(0, POSSIBLE_TOPDIR)
-from nova import config
-from nova import exception
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova.openstack.common.rpc import impl_zmq
-from nova import utils
CONF = cfg.CONF
CONF.register_opts(rpc.rpc_opts)
CONF.register_opts(impl_zmq.zmq_opts)
+CONF(sys.argv[1:], project='nova')
def main():
- config.parse_args(sys.argv)
logging.setup("nova")
- utils.monkey_patch()
-
- ipc_dir = CONF.rpc_zmq_ipc_dir
-
- # Create the necessary directories/files for this service.
- if not os.path.isdir(ipc_dir):
- try:
- utils.execute('mkdir', '-p', ipc_dir, run_as_root=True)
- utils.execute('chown', "%s:%s" % (os.getuid(), os.getgid()),
- ipc_dir, run_as_root=True)
- utils.execute('chmod', '750', ipc_dir, run_as_root=True)
- except exception.ProcessExecutionError:
- logging.error(_("Could not create IPC socket directory."))
- return
with contextlib.closing(impl_zmq.ZmqProxy(CONF)) as reactor:
- consume_in = "tcp://%s:%s" % \
- (CONF.rpc_zmq_bind_address,
- CONF.rpc_zmq_port)
- consumption_proxy = impl_zmq.InternalContext(None)
-
- reactor.register(consumption_proxy,
- consume_in, zmq.PULL, out_bind=True)
-
reactor.consume_in_thread()
reactor.wait()
diff --git a/bin/nova-scheduler b/bin/nova-scheduler
index 507ff3d5a..ec024d853 100755
--- a/bin/nova-scheduler
+++ b/bin/nova-scheduler
@@ -26,6 +26,8 @@ import gettext
import os
import sys
+from oslo.config import cfg
+
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
@@ -37,7 +39,6 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
from nova import config
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import service
from nova import utils
diff --git a/bin/nova-spicehtml5proxy b/bin/nova-spicehtml5proxy
index 405092942..23d250ca3 100755
--- a/bin/nova-spicehtml5proxy
+++ b/bin/nova-spicehtml5proxy
@@ -24,9 +24,10 @@ SPICE HTML5 consoles. Leverages websockify.py by Joel Martin
import os
import sys
+from oslo.config import cfg
+
from nova import config
from nova.console import websocketproxy
-from nova.openstack.common import cfg
opts = [
diff --git a/doc/README.rst b/doc/README.rst
index a85f7bc91..413428451 100644
--- a/doc/README.rst
+++ b/doc/README.rst
@@ -8,13 +8,13 @@ Included documents:
Building the docs
-----------------
-From the root nova director::
+From the root nova directory::
python setup.py build_sphinx
Building just the man pages
---------------------------
-from the root nova director::
+from the root nova directory::
python setup.py build_sphinx -b man
@@ -24,9 +24,9 @@ Installing the man pages
After building the man pages, they can be found in ``doc/build/man/``.
You can install the man page onto your system by following the following steps:
-Example for ``nova-manage``::
+Example for ``nova-scheduler``::
mkdir /usr/local/man/man1
- install -g 0 -o 0 -m 0644 doc/build/man/nova-manage.1 /usr/local/man/man1/nova-manage.1
- gzip /usr/local/man/man1/nova-manage.1
- man nova-manage
+ install -g 0 -o 0 -m 0644 doc/build/man/nova-scheduler.1 /usr/local/man/man1/nova-scheduler.1
+ gzip /usr/local/man/man1/nova-scheduler.1
+ man nova-scheduler
diff --git a/doc/api_samples/OS-EXT-IMG-SIZE/image-get-resp.json b/doc/api_samples/OS-EXT-IMG-SIZE/image-get-resp.json
new file mode 100644
index 000000000..1548aeb59
--- /dev/null
+++ b/doc/api_samples/OS-EXT-IMG-SIZE/image-get-resp.json
@@ -0,0 +1,34 @@
+{
+ "image": {
+ "OS-EXT-IMG-SIZE:size": "74185822",
+ "created": "2011-01-01T01:02:03Z",
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "True",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage7",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-IMG-SIZE/image-get-resp.xml b/doc/api_samples/OS-EXT-IMG-SIZE/image-get-resp.xml
new file mode 100644
index 000000000..49fe2ee31
--- /dev/null
+++ b/doc/api_samples/OS-EXT-IMG-SIZE/image-get-resp.xml
@@ -0,0 +1,12 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<image xmlns:OS-EXT-IMG-SIZE="http://docs.openstack.org/compute/ext/image_size/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage7" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="70a599e0-31e7-49b7-b260-868f441e862b" OS-EXT-IMG-SIZE:size="74185822">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="auto_disk_config">True</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="http://openstack.example.com/v2/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
+ <atom:link href="http://glance.openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" type="application/vnd.openstack.image" rel="alternate"/>
+</image> \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.json b/doc/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.json
new file mode 100644
index 000000000..bbd9dcfb1
--- /dev/null
+++ b/doc/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.json
@@ -0,0 +1,219 @@
+{
+ "images": [
+ {
+ "OS-EXT-IMG-SIZE:size": "74185822",
+ "created": "2011-01-01T01:02:03Z",
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "True",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage7",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "OS-EXT-IMG-SIZE:size": "25165824",
+ "created": "2011-01-01T01:02:03Z",
+ "id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "OS-EXT-IMG-SIZE:size": "58145823",
+ "created": "2011-01-01T01:02:03Z",
+ "id": "a2459075-d96c-40d5-893e-577ff92e721c",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "OS-EXT-IMG-SIZE:size": "49163826",
+ "created": "2011-01-01T01:02:03Z",
+ "id": "a440c04b-79fa-479c-bed1-0b816eaec379",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "False",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage6",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "OS-EXT-IMG-SIZE:size": "26360814",
+ "created": "2011-01-01T01:02:03Z",
+ "id": "c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "ramdisk_id": null
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "OS-EXT-IMG-SIZE:size": "84035174",
+ "created": "2011-01-01T01:02:03Z",
+ "id": "cedef40a-ed67-4d10-800e-17455edce175",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/openstack/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "OS-EXT-IMG-SIZE:size": "83594576",
+ "created": "2011-01-01T01:02:03Z",
+ "id": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.xml b/doc/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.xml
new file mode 100644
index 000000000..d0b5787ca
--- /dev/null
+++ b/doc/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.xml
@@ -0,0 +1,71 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<images xmlns:OS-EXT-IMG-SIZE="http://docs.openstack.org/compute/ext/image_size/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage7" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="70a599e0-31e7-49b7-b260-868f441e862b" OS-EXT-IMG-SIZE:size="74185822">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="auto_disk_config">True</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="http://openstack.example.com/v2/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
+ <atom:link href="http://glance.openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="155d900f-4e14-4e4c-a73d-069cbf4541e6" OS-EXT-IMG-SIZE:size="25165824">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="http://openstack.example.com/v2/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6" rel="bookmark"/>
+ <atom:link href="http://glance.openstack.example.com/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="a2459075-d96c-40d5-893e-577ff92e721c" OS-EXT-IMG-SIZE:size="58145823">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ </metadata>
+ <atom:link href="http://openstack.example.com/v2/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c" rel="bookmark"/>
+ <atom:link href="http://glance.openstack.example.com/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage6" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="a440c04b-79fa-479c-bed1-0b816eaec379" OS-EXT-IMG-SIZE:size="49163826">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="auto_disk_config">False</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="http://openstack.example.com/v2/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379" rel="bookmark"/>
+ <atom:link href="http://glance.openstack.example.com/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="c905cedb-7281-47e4-8a62-f26bc5fc4c77" OS-EXT-IMG-SIZE:size="26360814">
+ <metadata>
+ <meta key="kernel_id">155d900f-4e14-4e4c-a73d-069cbf4541e6</meta>
+ <meta key="ramdisk_id">None</meta>
+ </metadata>
+ <atom:link href="http://openstack.example.com/v2/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77" rel="bookmark"/>
+ <atom:link href="http://glance.openstack.example.com/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="cedef40a-ed67-4d10-800e-17455edce175" OS-EXT-IMG-SIZE:size="84035174">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ </metadata>
+ <atom:link href="http://openstack.example.com/v2/openstack/images/cedef40a-ed67-4d10-800e-17455edce175" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/images/cedef40a-ed67-4d10-800e-17455edce175" rel="bookmark"/>
+ <atom:link href="http://glance.openstack.example.com/openstack/images/cedef40a-ed67-4d10-800e-17455edce175" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" id="76fa36fc-c930-4bf3-8c8a-ea2a2420deb6" OS-EXT-IMG-SIZE:size="83594576">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ </metadata>
+ <atom:link href="http://openstack.example.com/v2/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6" rel="bookmark"/>
+ <atom:link href="http://glance.openstack.example.com/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+</images> \ No newline at end of file
diff --git a/doc/api_samples/all_extensions/extensions-get-resp.json b/doc/api_samples/all_extensions/extensions-get-resp.json
index ba5e410eb..0ad7a6498 100644
--- a/doc/api_samples/all_extensions/extensions-get-resp.json
+++ b/doc/api_samples/all_extensions/extensions-get-resp.json
@@ -25,6 +25,14 @@
"updated": "2013-01-30T00:00:00+00:00"
},
{
+ "alias": "OS-EXT-IMG-SIZE",
+ "description": "Adds image size to image listings.",
+ "links": [],
+ "name": "ImageSize",
+ "namespace": "http://docs.openstack.org/compute/ext/image_size/api/v1.1",
+ "updated": "2013-02-19T00:00:00+00:00"
+ },
+ {
"alias": "OS-EXT-IPS",
"description": "Adds type parameter to the ip list.",
"links": [],
@@ -97,6 +105,14 @@
"updated": "2012-01-12T00:00:00+00:00"
},
{
+ "alias": "os-attach-interfaces",
+ "description": "Attach interface support.",
+ "links": [],
+ "name": "AttachInterfaces",
+ "namespace": "http://docs.openstack.org/compute/ext/interfaces/api/v1.1",
+ "updated": "2012-07-22T00:00:00+00:00"
+ },
+ {
"alias": "os-availability-zone",
"description": "1. Add availability_zone to the Create Server v1.1 API.\n 2. Add availability zones describing.\n ",
"links": [],
@@ -194,11 +210,11 @@
},
{
"alias": "os-evacuate",
- "description": "Enables server evacuation",
+ "description": "Enables server evacuation.",
"links": [],
"name": "Evacuate",
"namespace": "http://docs.openstack.org/compute/ext/evacuate/api/v2",
- "updated": "2012-12-05T00:00:00+00:00"
+ "updated": "2013-01-06T00:00:00+00:00"
},
{
"alias": "os-fixed-ips",
@@ -314,7 +330,7 @@
},
{
"alias": "os-instance-actions",
- "description": "View a log of actions taken on an instance",
+ "description": "View a log of actions and events taken on an instance.",
"links": [],
"name": "InstanceActions",
"namespace": "http://docs.openstack.org/compute/ext/instance-actions/api/v1.1",
@@ -385,6 +401,14 @@
"updated": "2011-08-18T00:00:00+00:00"
},
{
+ "alias": "os-security-group-default-rules",
+ "description": "Default rules for security group support.",
+ "links": [],
+ "name": "SecurityGroupDefaultRules",
+ "namespace": "http://docs.openstack.org/compute/ext/securitygroupdefaultrules/api/v1.1",
+ "updated": "2013-02-05T00:00:00+00:00"
+ },
+ {
"alias": "os-security-groups",
"description": "Security group support.",
"links": [],
diff --git a/doc/api_samples/all_extensions/extensions-get-resp.xml b/doc/api_samples/all_extensions/extensions-get-resp.xml
index a18e52437..da20e0b61 100644
--- a/doc/api_samples/all_extensions/extensions-get-resp.xml
+++ b/doc/api_samples/all_extensions/extensions-get-resp.xml
@@ -9,6 +9,9 @@
<extension alias="OS-EXT-AZ" updated="2013-01-30T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" name="ExtendedAvailabilityZone">
<description>Extended Server Attributes support.</description>
</extension>
+ <extension alias="OS-EXT-IMG-SIZE" updated="2013-02-19T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/image_size/api/v1.1" name="ImageSize">
+ <description>Adds image size to image listings.</description>
+ </extension>
<extension alias="OS-EXT-IPS" updated="2013-01-06T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/extended_ips/api/v1.1" name="ExtendedIps">
<description>Adds type parameter to the ip list.</description>
</extension>
@@ -40,6 +43,9 @@
<extension alias="os-aggregates" updated="2012-01-12T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/aggregates/api/v1.1" name="Aggregates">
<description>Admin-only aggregate administration.</description>
</extension>
+ <extension alias="os-attach-interfaces" updated="2012-07-22T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/interfaces/api/v1.1" name="AttachInterfaces">
+ <description>Attach interface support.</description>
+ </extension>
<extension alias="os-availability-zone" updated="2012-12-21T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/availabilityzone/api/v1.1" name="AvailabilityZone">
<description>1. Add availability_zone to the Create Server v1.1 API.
2. Add availability zones describing.
@@ -88,8 +94,8 @@
<extension alias="os-deferred-delete" updated="2011-09-01T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/deferred-delete/api/v1.1" name="DeferredDelete">
<description>Instance deferred delete.</description>
</extension>
- <extension alias="os-evacuate" updated="2012-12-05T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/evacuate/api/v2" name="Evacuate">
- <description>Enables server evacuation</description>
+ <extension alias="os-evacuate" updated="2013-01-06T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/evacuate/api/v2" name="Evacuate">
+ <description>Enables server evacuation.</description>
</extension>
<extension alias="os-fixed-ips" updated="2012-10-18T13:25:27-06:00" namespace="http://docs.openstack.org/compute/ext/fixed_ips/api/v2" name="FixedIPs">
<description>Fixed IPs support.</description>
@@ -136,7 +142,7 @@
<description>Admin-only hypervisor administration.</description>
</extension>
<extension alias="os-instance-actions" updated="2013-02-08T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/instance-actions/api/v1.1" name="InstanceActions">
- <description>View a log of actions taken on an instance</description>
+ <description>View a log of actions and events taken on an instance.</description>
</extension>
<extension alias="os-instance_usage_audit_log" updated="2012-07-06T01:00:00+00:00" namespace="http://docs.openstack.org/ext/services/api/v1.1" name="OSInstanceUsageAuditLog">
<description>Admin-only Task Log Monitoring.</description>
@@ -162,6 +168,9 @@
<extension alias="os-rescue" updated="2011-08-18T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/rescue/api/v1.1" name="Rescue">
<description>Instance rescue mode.</description>
</extension>
+ <extension alias="os-security-group-default-rules" updated="2013-02-05T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/securitygroupdefaultrules/api/v1.1" name="SecurityGroupDefaultRules">
+ <description>Default rules for security group support.</description>
+ </extension>
<extension alias="os-security-groups" updated="2011-07-21T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/securitygroups/api/v1.1" name="SecurityGroups">
<description>Security group support.</description>
</extension>
diff --git a/doc/api_samples/os-attach-interfaces/attach-interfaces-create-req.json b/doc/api_samples/os-attach-interfaces/attach-interfaces-create-req.json
new file mode 100644
index 000000000..11dcf6437
--- /dev/null
+++ b/doc/api_samples/os-attach-interfaces/attach-interfaces-create-req.json
@@ -0,0 +1,5 @@
+{
+ "interfaceAttachment": {
+ "port_id": "ce531f90-199f-48c0-816c-13e38010b442"
+ }
+}
diff --git a/doc/api_samples/os-attach-interfaces/attach-interfaces-create-req.xml b/doc/api_samples/os-attach-interfaces/attach-interfaces-create-req.xml
new file mode 100644
index 000000000..bd3f24265
--- /dev/null
+++ b/doc/api_samples/os-attach-interfaces/attach-interfaces-create-req.xml
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<interfaceAttachment>
+ <port_id>ce531f90-199f-48c0-816c-13e38010b442</port_id>
+</interfaceAttachment> \ No newline at end of file
diff --git a/doc/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json b/doc/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json
new file mode 100644
index 000000000..309f2a1e8
--- /dev/null
+++ b/doc/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json
@@ -0,0 +1,12 @@
+{
+ "interfaceAttachment": {
+ "fixed_ips": [{
+ "ip_address": "192.168.1.1",
+ "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
+ }],
+ "mac_addr": "fa:16:3e:4c:2c:30",
+ "net_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
+ "port_id": "ce531f90-199f-48c0-816c-13e38010b442",
+ "port_state": "ACTIVE"
+ }
+}
diff --git a/doc/api_samples/os-attach-interfaces/attach-interfaces-create-resp.xml b/doc/api_samples/os-attach-interfaces/attach-interfaces-create-resp.xml
new file mode 100644
index 000000000..4b3254371
--- /dev/null
+++ b/doc/api_samples/os-attach-interfaces/attach-interfaces-create-resp.xml
@@ -0,0 +1,13 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<interfaceAttachment>
+ <net_id>3cb9bc59-5699-4588-a4b1-b87f96708bc6</net_id>
+ <port_id>ce531f90-199f-48c0-816c-13e38010b442</port_id>
+ <fixed_ips>
+ <fixed_ip>
+ <subnet_id>f8a6e8f8-c2ec-497c-9f23-da9616de54ef</subnet_id>
+ <ip_address>192.168.1.3</ip_address>
+ </fixed_ip>
+ </fixed_ips>
+ <port_state>ACTIVE</port_state>
+ <mac_addr>fa:16:3e:4c:2c:30</mac_addr>
+</interfaceAttachment> \ No newline at end of file
diff --git a/doc/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json b/doc/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json
new file mode 100644
index 000000000..2c62ef9d5
--- /dev/null
+++ b/doc/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json
@@ -0,0 +1,16 @@
+{
+ "interfaceAttachments": [
+ {
+ "port_state": "ACTIVE",
+ "fixed_ips": [
+ {
+ "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef",
+ "ip_address": "192.168.1.3"
+ }
+ ],
+ "net_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
+ "port_id": "ce531f90-199f-48c0-816c-13e38010b442",
+ "mac_addr": "fa:16:3e:4c:2c:30"
+ }
+ ]
+}
diff --git a/doc/api_samples/os-attach-interfaces/attach-interfaces-list-resp.xml b/doc/api_samples/os-attach-interfaces/attach-interfaces-list-resp.xml
new file mode 100644
index 000000000..f1bef407c
--- /dev/null
+++ b/doc/api_samples/os-attach-interfaces/attach-interfaces-list-resp.xml
@@ -0,0 +1,15 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<interfaceAttachments>
+ <interfaceAttachment>
+ <port_state>ACTIVE</port_state>
+ <fixed_ips>
+ <fixed_ip>
+ <subnet_id>f8a6e8f8-c2ec-497c-9f23-da9616de54ef</subnet_id>
+ <ip_address>192.168.1.3</ip_address>
+ </fixed_ip>
+ </fixed_ips>
+ <port_id>ce531f90-199f-48c0-816c-13e38010b442</port_id>
+ <net_id>3cb9bc59-5699-4588-a4b1-b87f96708bc6</net_id>
+ <mac_addr>fa:16:3e:4c:2c:30</mac_addr>
+ </interfaceAttachment>
+</interfaceAttachments> \ No newline at end of file
diff --git a/doc/api_samples/os-attach-interfaces/attach-interfaces-list.xml b/doc/api_samples/os-attach-interfaces/attach-interfaces-list.xml
new file mode 100644
index 000000000..3392e2cc6
--- /dev/null
+++ b/doc/api_samples/os-attach-interfaces/attach-interfaces-list.xml
@@ -0,0 +1,17 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<interfaceAttachments>
+ <interfaceAttachment>
+ <item>
+ <port_state>ACTIVE</port_state>
+ <fixed_ips>
+ <fixed_ip>
+ <subnet_id>f8a6e8f8-c2ec-497c-9f23-da9616de54ef</subnet_id>
+ <ip_address>192.168.1.3</ip_address>
+ </fixed_ip>
+ </fixed_ips>
+ <port_id>ce531f90-199f-48c0-816c-13e38010b442</port_id>
+ <net_id>3cb9bc59-5699-4588-a4b1-b87f96708bc6</net_id>
+ <mac_addr>fa:16:3e:4c:2c:30</mac_addr>
+ </item>
+ </interfaceAttachment>
+</interfaceAttachments>
diff --git a/doc/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json b/doc/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json
new file mode 100644
index 000000000..14f5b9e1e
--- /dev/null
+++ b/doc/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json
@@ -0,0 +1,14 @@
+{
+ "interfaceAttachment": {
+ "port_state": "ACTIVE",
+ "fixed_ips": [
+ {
+ "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef",
+ "ip_address": "192.168.1.3"
+ }
+ ],
+ "net_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
+ "port_id": "ce531f90-199f-48c0-816c-13e38010b442",
+ "mac_addr": "fa:16:3e:4c:2c:30"
+ }
+}
diff --git a/doc/api_samples/os-attach-interfaces/attach-interfaces-show-resp.xml b/doc/api_samples/os-attach-interfaces/attach-interfaces-show-resp.xml
new file mode 100644
index 000000000..ce3e2fec2
--- /dev/null
+++ b/doc/api_samples/os-attach-interfaces/attach-interfaces-show-resp.xml
@@ -0,0 +1,13 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<interfaceAttachment>
+ <port_state>ACTIVE</port_state>
+ <fixed_ips>
+ <fixed_ip>
+ <subnet_id>b6e47749-6bf0-4d6e-ae4b-ba6b5e238510</subnet_id>
+ <ip_address>192.168.123.131</ip_address>
+ </fixed_ip>
+ </fixed_ips>
+ <port_id>89e64f2e-86bd-4c19-9155-4548b36fdcb2</port_id>
+ <net_id>a9efd207-2c1a-4cdd-a296-d3c7c3211302</net_id>
+ <mac_addr>fa:16:3e:a4:1c:12</mac_addr>
+</interfaceAttachment>
diff --git a/doc/api_samples/os-attach-interfaces/server-post-req.json b/doc/api_samples/os-attach-interfaces/server-post-req.json
new file mode 100644
index 000000000..d88eb4122
--- /dev/null
+++ b/doc/api_samples/os-attach-interfaces/server-post-req.json
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-attach-interfaces/server-post-req.xml b/doc/api_samples/os-attach-interfaces/server-post-req.xml
new file mode 100644
index 000000000..0a3c8bb53
--- /dev/null
+++ b/doc/api_samples/os-attach-interfaces/server-post-req.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" flavorRef="http://openstack.example.com/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/os-attach-interfaces/server-post-resp.json b/doc/api_samples/os-attach-interfaces/server-post-resp.json
new file mode 100644
index 000000000..54e47aefa
--- /dev/null
+++ b/doc/api_samples/os-attach-interfaces/server-post-resp.json
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "N4Lxd6cMUXmE",
+ "id": "4e44ac84-f3ed-4219-aa2e-b3d1477f0ac3",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/4e44ac84-f3ed-4219-aa2e-b3d1477f0ac3",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/4e44ac84-f3ed-4219-aa2e-b3d1477f0ac3",
+ "rel": "bookmark"
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-attach-interfaces/server-post-resp.xml b/doc/api_samples/os-attach-interfaces/server-post-resp.xml
new file mode 100644
index 000000000..0efd9029d
--- /dev/null
+++ b/doc/api_samples/os-attach-interfaces/server-post-resp.xml
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="71f1047f-f5db-42f9-b43f-85767bcafda6" adminPass="XVCtnj5P2MnJ">
+ <metadata/>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/71f1047f-f5db-42f9-b43f-85767bcafda6" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/71f1047f-f5db-42f9-b43f-85767bcafda6" rel="bookmark"/>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/os-hypervisors/hypervisors-detail-resp.json b/doc/api_samples/os-hypervisors/hypervisors-detail-resp.json
new file mode 100644
index 000000000..b124901ea
--- /dev/null
+++ b/doc/api_samples/os-hypervisors/hypervisors-detail-resp.json
@@ -0,0 +1,26 @@
+{
+ "hypervisors": [
+ {
+ "cpu_info": "?",
+ "current_workload": 0,
+ "disk_available_least": null,
+ "free_disk_gb": 1028,
+ "free_ram_mb": 7680,
+ "hypervisor_hostname": "fake-mini",
+ "hypervisor_type": "fake",
+ "hypervisor_version": 1,
+ "id": 1,
+ "local_gb": 1028,
+ "local_gb_used": 0,
+ "memory_mb": 8192,
+ "memory_mb_used": 512,
+ "running_vms": 0,
+ "service": {
+ "host": "1e0d7892083548cfb347e782d3b20342",
+ "id": 2
+ },
+ "vcpus": 1,
+ "vcpus_used": 0
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/os-hypervisors/hypervisors-detail-resp.xml b/doc/api_samples/os-hypervisors/hypervisors-detail-resp.xml
new file mode 100644
index 000000000..709f4fcd6
--- /dev/null
+++ b/doc/api_samples/os-hypervisors/hypervisors-detail-resp.xml
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<hypervisors>
+ <hypervisor vcpus_used="0" hypervisor_type="fake" local_gb_used="0" hypervisor_hostname="fake-mini" memory_mb_used="512" memory_mb="8192" current_workload="0" vcpus="1" cpu_info="?" running_vms="0" free_disk_gb="1028" hypervisor_version="1" disk_available_least="None" local_gb="1028" free_ram_mb="7680" id="1">
+ <service host="4400f556a66d44ce95dfa61e75a23aaf" id="2"/>
+ </hypervisor>
+</hypervisors> \ No newline at end of file
diff --git a/doc/api_samples/os-hypervisors/hypervisors-list-resp.json b/doc/api_samples/os-hypervisors/hypervisors-list-resp.json
new file mode 100644
index 000000000..8d9402127
--- /dev/null
+++ b/doc/api_samples/os-hypervisors/hypervisors-list-resp.json
@@ -0,0 +1,8 @@
+{
+ "hypervisors": [
+ {
+ "hypervisor_hostname": "fake-mini",
+ "id": 1
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/os-hypervisors/hypervisors-list-resp.xml b/doc/api_samples/os-hypervisors/hypervisors-list-resp.xml
new file mode 100644
index 000000000..8a784c592
--- /dev/null
+++ b/doc/api_samples/os-hypervisors/hypervisors-list-resp.xml
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<hypervisors>
+ <hypervisor id="1" hypervisor_hostname="fake-mini"/>
+</hypervisors> \ No newline at end of file
diff --git a/doc/api_samples/os-hypervisors/hypervisors-search-resp.json b/doc/api_samples/os-hypervisors/hypervisors-search-resp.json
new file mode 100644
index 000000000..8d9402127
--- /dev/null
+++ b/doc/api_samples/os-hypervisors/hypervisors-search-resp.json
@@ -0,0 +1,8 @@
+{
+ "hypervisors": [
+ {
+ "hypervisor_hostname": "fake-mini",
+ "id": 1
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/os-hypervisors/hypervisors-search-resp.xml b/doc/api_samples/os-hypervisors/hypervisors-search-resp.xml
new file mode 100644
index 000000000..8a784c592
--- /dev/null
+++ b/doc/api_samples/os-hypervisors/hypervisors-search-resp.xml
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<hypervisors>
+ <hypervisor id="1" hypervisor_hostname="fake-mini"/>
+</hypervisors> \ No newline at end of file
diff --git a/doc/api_samples/os-hypervisors/hypervisors-servers-resp.json b/doc/api_samples/os-hypervisors/hypervisors-servers-resp.json
new file mode 100644
index 000000000..8d9402127
--- /dev/null
+++ b/doc/api_samples/os-hypervisors/hypervisors-servers-resp.json
@@ -0,0 +1,8 @@
+{
+ "hypervisors": [
+ {
+ "hypervisor_hostname": "fake-mini",
+ "id": 1
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/os-hypervisors/hypervisors-servers-resp.xml b/doc/api_samples/os-hypervisors/hypervisors-servers-resp.xml
new file mode 100644
index 000000000..950a5ab11
--- /dev/null
+++ b/doc/api_samples/os-hypervisors/hypervisors-servers-resp.xml
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<hypervisors>
+ <hypervisor id="1" hypervisor_hostname="fake-mini">
+ <servers/>
+ </hypervisor>
+</hypervisors> \ No newline at end of file
diff --git a/doc/api_samples/os-hypervisors/hypervisors-show-resp.json b/doc/api_samples/os-hypervisors/hypervisors-show-resp.json
new file mode 100644
index 000000000..c0feecd02
--- /dev/null
+++ b/doc/api_samples/os-hypervisors/hypervisors-show-resp.json
@@ -0,0 +1,24 @@
+{
+ "hypervisor": {
+ "cpu_info": "?",
+ "current_workload": 0,
+ "disk_available_least": null,
+ "free_disk_gb": 1028,
+ "free_ram_mb": 7680,
+ "hypervisor_hostname": "fake-mini",
+ "hypervisor_type": "fake",
+ "hypervisor_version": 1,
+ "id": 1,
+ "local_gb": 1028,
+ "local_gb_used": 0,
+ "memory_mb": 8192,
+ "memory_mb_used": 512,
+ "running_vms": 0,
+ "service": {
+ "host": "5641188ab2964f88a21042b493585ff8",
+ "id": 2
+ },
+ "vcpus": 1,
+ "vcpus_used": 0
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-hypervisors/hypervisors-show-resp.xml b/doc/api_samples/os-hypervisors/hypervisors-show-resp.xml
new file mode 100644
index 000000000..04feef067
--- /dev/null
+++ b/doc/api_samples/os-hypervisors/hypervisors-show-resp.xml
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<hypervisor vcpus_used="0" hypervisor_type="fake" local_gb_used="0" hypervisor_hostname="fake-mini" memory_mb_used="512" memory_mb="8192" current_workload="0" vcpus="1" cpu_info="?" running_vms="0" free_disk_gb="1028" hypervisor_version="1" disk_available_least="None" local_gb="1028" free_ram_mb="7680" id="1">
+ <service host="807e1a43ceb740138eea32969c3e9fe5" id="2"/>
+</hypervisor> \ No newline at end of file
diff --git a/doc/api_samples/os-hypervisors/hypervisors-statistics-resp.json b/doc/api_samples/os-hypervisors/hypervisors-statistics-resp.json
new file mode 100644
index 000000000..2cfb51e70
--- /dev/null
+++ b/doc/api_samples/os-hypervisors/hypervisors-statistics-resp.json
@@ -0,0 +1,16 @@
+{
+ "hypervisor_statistics": {
+ "count": 1,
+ "current_workload": 0,
+ "disk_available_least": 0,
+ "free_disk_gb": 1028,
+ "free_ram_mb": 7680,
+ "local_gb": 1028,
+ "local_gb_used": 0,
+ "memory_mb": 8192,
+ "memory_mb_used": 512,
+ "running_vms": 0,
+ "vcpus": 1,
+ "vcpus_used": 0
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-hypervisors/hypervisors-statistics-resp.xml b/doc/api_samples/os-hypervisors/hypervisors-statistics-resp.xml
new file mode 100644
index 000000000..5d10411e3
--- /dev/null
+++ b/doc/api_samples/os-hypervisors/hypervisors-statistics-resp.xml
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<hypervisor_statistics count="1" vcpus_used="0" local_gb_used="0" memory_mb="8192" current_workload="0" vcpus="1" running_vms="0" free_disk_gb="1028" disk_available_least="0" local_gb="1028" free_ram_mb="7680" memory_mb_used="512"/> \ No newline at end of file
diff --git a/doc/api_samples/os-hypervisors/hypervisors-uptime-resp.json b/doc/api_samples/os-hypervisors/hypervisors-uptime-resp.json
new file mode 100644
index 000000000..f5f9d19e7
--- /dev/null
+++ b/doc/api_samples/os-hypervisors/hypervisors-uptime-resp.json
@@ -0,0 +1,7 @@
+{
+ "hypervisor": {
+ "hypervisor_hostname": "fake-mini",
+ "id": 1,
+ "uptime": " 08:32:11 up 93 days, 18:25, 12 users, load average: 0.20, 0.12, 0.14"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-hypervisors/hypervisors-uptime-resp.xml b/doc/api_samples/os-hypervisors/hypervisors-uptime-resp.xml
new file mode 100644
index 000000000..5c1b2cd7d
--- /dev/null
+++ b/doc/api_samples/os-hypervisors/hypervisors-uptime-resp.xml
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<hypervisor uptime=" 08:32:11 up 93 days, 18:25, 12 users, load average: 0.20, 0.12, 0.14" id="1" hypervisor_hostname="fake-mini"/> \ No newline at end of file
diff --git a/doc/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json b/doc/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json
new file mode 100644
index 000000000..8b0a901c7
--- /dev/null
+++ b/doc/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json
@@ -0,0 +1,8 @@
+{
+ "security_group_default_rule": {
+ "ip_protocol": "TCP",
+ "from_port": "80",
+ "to_port": "80",
+ "cidr": "10.10.12.0/24"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.xml b/doc/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.xml
new file mode 100644
index 000000000..7fa3af7d9
--- /dev/null
+++ b/doc/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.xml
@@ -0,0 +1,7 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<security_group_default_rule>
+ <ip_protocol>TCP</ip_protocol>
+ <from_port>80</from_port>
+ <to_port>80</to_port>
+ <cidr>10.10.12.0/24</cidr>
+</security_group_default_rule> \ No newline at end of file
diff --git a/doc/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json b/doc/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json
new file mode 100644
index 000000000..ae6c62bfd
--- /dev/null
+++ b/doc/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json
@@ -0,0 +1,11 @@
+{
+ "security_group_default_rule": {
+ "from_port": 80,
+ "id": 1,
+ "ip_protocol": "TCP",
+ "ip_range":{
+ "cidr": "10.10.10.0/24"
+ },
+ "to_port": 80
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.xml b/doc/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.xml
new file mode 100644
index 000000000..9e700969f
--- /dev/null
+++ b/doc/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.xml
@@ -0,0 +1,9 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<security_group_default_rule xmlns="http://docs.openstack.org/compute/api/v1.1" id="1">
+ <ip_protocol>TCP</ip_protocol>
+ <from_port>80</from_port>
+ <to_port>80</to_port>
+ <ip_range>
+ <cidr>10.10.10.0/24</cidr>
+ </ip_range>
+</security_group_default_rule> \ No newline at end of file
diff --git a/doc/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json b/doc/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json
new file mode 100644
index 000000000..c083640c3
--- /dev/null
+++ b/doc/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json
@@ -0,0 +1,13 @@
+{
+ "security_group_default_rules": [
+ {
+ "from_port": 80,
+ "id": 1,
+ "ip_protocol": "TCP",
+ "ip_range": {
+ "cidr": "10.10.10.0/24"
+ },
+ "to_port": 80
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.xml b/doc/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.xml
new file mode 100644
index 000000000..f009bf80f
--- /dev/null
+++ b/doc/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.xml
@@ -0,0 +1,11 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<security_group_default_rules xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <security_group_default_rule id="1">
+ <ip_protocol>TCP</ip_protocol>
+ <from_port>80</from_port>
+ <to_port>80</to_port>
+ <ip_range>
+ <cidr>10.10.10.0/24</cidr>
+ </ip_range>
+ </security_group_default_rule>
+</security_group_default_rules> \ No newline at end of file
diff --git a/doc/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json b/doc/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json
new file mode 100644
index 000000000..97b5259a1
--- /dev/null
+++ b/doc/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json
@@ -0,0 +1,11 @@
+{
+ "security_group_default_rule": {
+ "id": 1,
+ "from_port": 80,
+ "to_port": 80,
+ "ip_protocol": "TCP",
+ "ip_range": {
+ "cidr": "10.10.10.0/24"
+ }
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.xml b/doc/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.xml
new file mode 100644
index 000000000..9181abd38
--- /dev/null
+++ b/doc/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.xml
@@ -0,0 +1,9 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<security_group_default_rule xmlns="http://docs.openstack.org/compute/api/v1.1" id="1">
+ <from_port>80</from_port>
+ <to_port>80</to_port>
+ <ip_protocol>TCP</ip_protocol>
+ <ip_range>
+ <cidr>10.10.10.0/24</cidr>
+ </ip_range>
+</security_group_default_rule> \ No newline at end of file
diff --git a/doc/source/devref/cloudpipe.rst b/doc/source/devref/cloudpipe.rst
deleted file mode 100644
index 15d3160b7..000000000
--- a/doc/source/devref/cloudpipe.rst
+++ /dev/null
@@ -1,166 +0,0 @@
-..
- Copyright 2010-2011 United States Government as represented by the
- Administrator of the National Aeronautics and Space Administration.
- All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License"); you may
- not use this file except in compliance with the License. You may obtain
- a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- License for the specific language governing permissions and limitations
- under the License.
-
-
-.. _cloudpipe:
-
-Cloudpipe -- Per Project Vpns
-=============================
-
-Cloudpipe is a method for connecting end users to their project instances in vlan mode.
-
-
-Overview
---------
-
-The support code for cloudpipe implements admin commands (via nova-manage) to automatically create a vm for a project that allows users to vpn into the private network of their project. Access to this vpn is provided through a public port on the network host for the project. This allows users to have free access to the virtual machines in their project without exposing those machines to the public internet.
-
-
-Cloudpipe Image
----------------
-
-The cloudpipe image is basically just a linux instance with openvpn installed. It needs a simple script to grab user data from the metadata server, b64 decode it into a zip file, and run the autorun.sh script from inside the zip. The autorun script will configure and run openvpn to run using the data from nova.
-
-It is also useful to have a cron script that will periodically redownload the metadata and copy the new crl. This will keep revoked users from connecting and will disconnect any users that are connected with revoked certificates when their connection is renegotiated (every hour).
-
-
-Creating a Cloudpipe Image
---------------------------
-
-Making a cloudpipe image is relatively easy.
-
-# install openvpn on a base ubuntu image.
-# set up a server.conf.template in /etc/openvpn/
-
-.. literalinclude:: server.conf.template
- :language: bash
- :linenos:
-
-# set up.sh in /etc/openvpn/
-
-.. literalinclude:: up.sh
- :language: bash
- :linenos:
-
-# set down.sh in /etc/openvpn/
-
-.. literalinclude:: down.sh
- :language: bash
- :linenos:
-
-# download and run the payload on boot from /etc/rc.local
-
-.. literalinclude:: rc.local
- :language: bash
- :linenos:
-
-# setup /etc/network/interfaces
-
-.. literalinclude:: interfaces
- :language: bash
- :linenos:
-
-# register the image and set the image id in your flagfile::
-
- --vpn_image_id=ami-xxxxxxxx
-
-# you should set a few other flags to make vpns work properly::
-
- --use_project_ca
- --cnt_vpn_clients=5
-
-
-Cloudpipe Launch
-----------------
-
-When you use nova-manage to launch a cloudpipe for a user, it goes through the following process:
-
-#. creates a keypair called <project_id>-vpn and saves it in the keys directory
-#. creates a security group <project_id>-vpn and opens up 1194 and icmp
-#. creates a cert and private key for the vpn instance and saves it in the CA/projects/<project_id>/ directory
-#. zips up the info and puts it b64 encoded as user data
-#. launches an m1.tiny instance with the above settings using the flag-specified vpn image
-
-
-Vpn Access
-----------
-
-In vlan networking mode, the second ip in each private network is reserved for the cloudpipe instance. This gives a consistent ip to the instance so that nova-network can create forwarding rules for access from the outside world. The network for each project is given a specific high-numbered port on the public ip of the network host. This port is automatically forwarded to 1194 on the vpn instance.
-
-If specific high numbered ports do not work for your users, you can always allocate and associate a public ip to the instance, and then change the vpn_public_ip and vpn_public_port in the database. This will be turned into a nova-manage command or a flag soon.
-
-
-Certificates and Revocation
----------------------------
-
-If the use_project_ca flag is set (required to for cloudpipes to work securely), then each project has its own ca. This ca is used to sign the certificate for the vpn, and is also passed to the user for bundling images. When a certificate is revoked using nova-manage, a new Certificate Revocation List (crl) is generated. As long as cloudpipe has an updated crl, it will block revoked users from connecting to the vpn.
-
-The userdata for cloudpipe isn't currently updated when certs are revoked, so it is necessary to restart the cloudpipe instance if a user's credentials are revoked.
-
-
-Restarting Cloudpipe VPN
-------------------------
-
-You can reboot a cloudpipe vpn through the api if something goes wrong (using euca-reboot-instances for example), but if you generate a new crl, you will have to terminate it and start it again using nova-manage vpn run. The cloudpipe instance always gets the first ip in the subnet and it can take up to 10 minutes for the ip to be recovered. If you try to start the new vpn instance too soon, the instance will fail to start because of a NoMoreAddresses error. If you can't wait 10 minutes, you can manually update the ip with something like the following (use the right ip for the project)::
-
- euca-terminate-instances <instance_id>
- mysql nova -e "update fixed_ips set allocated=0, leased=0, instance_id=NULL where fixed_ip='10.0.0.2'"
-
-You also will need to terminate the dnsmasq running for the user (make sure you use the right pid file)::
-
- sudo kill `cat /var/lib/nova/br100.pid`
-
-Now you should be able to re-run the vpn::
-
- nova-manage vpn run <project_id>
-
-
-Logging into Cloudpipe VPN
---------------------------
-
-The keypair that was used to launch the cloudpipe instance should be in the keys/<project_id> folder. You can use this key to log into the cloudpipe instance for debugging purposes.
-
-
-The :mod:`nova.cloudpipe.pipelib` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.cloudpipe.pipelib
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-The :mod:`nova.api.cloudpipe` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.api.cloudpipe
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-The :mod:`nova.crypto` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.crypto
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
diff --git a/doc/source/devref/development.environment.rst b/doc/source/devref/development.environment.rst
index a366c4893..8c6e03323 100644
--- a/doc/source/devref/development.environment.rst
+++ b/doc/source/devref/development.environment.rst
@@ -21,33 +21,31 @@ Setting Up a Development Environment
This page describes how to setup a working Python development
environment that can be used in developing nova on Ubuntu, Fedora or
Mac OS X. These instructions assume you're already familiar with
-git. Refer to GettingTheCode_ for additional information.
-
-.. _GettingTheCode: http://wiki.openstack.org/GettingTheCode
+git.
Following these instructions will allow you to run the nova unit
tests. If you want to be able to run nova (i.e., launch VM instances),
you will also need to install libvirt and at least one of the
`supported hypervisors`_. Running nova is currently only supported on
-Linux, although you can run the unit tests on Mac OS X. See
-:doc:`../quickstart` for how to get a working version of OpenStack
-Compute running as quickly as possible.
+Linux, although you can run the unit tests on Mac OS X.
.. _supported hypervisors: http://wiki.openstack.org/HypervisorSupportMatrix
Virtual environments
--------------------
-Nova development uses `virtualenv <http://pypi.python.org/pypi/virtualenv>`__ to track and manage Python
-dependencies while in development and testing. This allows you to
-install all of the Python package dependencies in a virtual
-environment or "virtualenv" (a special subdirectory of your nova
-directory), instead of installing the packages at the system level.
+Nova development uses a set of shell scripts in DevStack. Virtual
+enviroments with venv are also available with the source code.
-.. note::
+The easiest way to build a fully functional development environment is
+with DevStack. Create a machine (such as a VM or Vagrant box) running a
+distribution supported by DevStack and install DevStack there. For
+example, there is a Vagrant script for DevStack at https://github.com/jogo/DevstackUp.
+
+ .. note::
- Virtualenv is useful for running the unit tests, but is not
- typically used for full integration testing or production usage.
+ If you prefer not to use devstack, you can still check out source code on your local
+ machine and develop from there.
Linux Systems
-------------
@@ -146,11 +144,10 @@ basis by running::
Contributing Your Work
----------------------
-Once your work is complete you may wish to contribute it to the project. Add
-your name and email address to the ``Authors`` file, and also to the ``.mailmap``
-file if you use multiple email addresses. Your contributions can not be merged
-into trunk unless you are listed in the Authors file. Nova uses the Gerrit
-code review system. For information on how to submit your branch to Gerrit,
-see GerritWorkflow_.
+Once your work is complete you may wish to contribute it to the project.
+Refer to HowToContribute_ for information.
+Nova uses the Gerrit code review system. For information on how to submit
+your branch to Gerrit, see GerritWorkflow_.
.. _GerritWorkflow: http://wiki.openstack.org/GerritWorkflow
+.. _HowToContribute: http://wiki.openstack.org/HowToContribute
diff --git a/doc/source/devref/down.sh b/doc/source/devref/down.sh
deleted file mode 100644
index 5c1888870..000000000
--- a/doc/source/devref/down.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/sh
-
-BR=$1
-DEV=$2
-
-/usr/sbin/brctl delif $BR $DEV
-/sbin/ifconfig $DEV down
diff --git a/doc/source/devref/filter_scheduler.rst b/doc/source/devref/filter_scheduler.rst
index 31dcfde77..a1175ddc7 100644
--- a/doc/source/devref/filter_scheduler.rst
+++ b/doc/source/devref/filter_scheduler.rst
@@ -93,6 +93,7 @@ There are some standard filter classes to use (:mod:`nova.scheduler.filters`):
* |AggregateTypeAffinityFilter| - limits instance_type by aggregate.
* |GroupAntiAffinityFilter| - ensures that each instance in group is on a
different host.
+* |AggregateMultiTenancyIsolation| - isolate tenants in specific aggregates.
Now we can focus on these standard filter classes in details. I will pass the
simplest ones, such as |AllHostsFilter|, |CoreFilter| and |RamFilter| are,
@@ -350,3 +351,4 @@ in :mod:`nova.tests.scheduler`.
.. |TypeAffinityFilter| replace:: :class:`TypeAffinityFilter <nova.scheduler.filters.type_filter.TypeAffinityFilter>`
.. |AggregateTypeAffinityFilter| replace:: :class:`AggregateTypeAffinityFilter <nova.scheduler.filters.type_filter.AggregateTypeAffinityFilter>`
.. |AggregateInstanceExtraSpecsFilter| replace:: :class:`AggregateInstanceExtraSpecsFilter <nova.scheduler.filters.aggregate_instance_extra_specs.AggregateInstanceExtraSpecsFilter>`
+.. |AggregateMultiTenancyIsolation| replace:: :class:`AggregateMultiTenancyIsolation <nova.scheduler.filters.aggregate_multitenancy_isolation.AggregateMultiTenancyIsolation>`
diff --git a/doc/source/devref/index.rst b/doc/source/devref/index.rst
index 0b7883f7b..93399957e 100644
--- a/doc/source/devref/index.rst
+++ b/doc/source/devref/index.rst
@@ -41,7 +41,6 @@ Background Concepts for Nova
vmstates
il8n
filter_scheduler
- multinic
rpc
hooks
@@ -74,7 +73,6 @@ Module Reference
scheduler
fakes
nova
- cloudpipe
objectstore
glance
diff --git a/doc/source/devref/interfaces b/doc/source/devref/interfaces
deleted file mode 100644
index b7116aeb7..000000000
--- a/doc/source/devref/interfaces
+++ /dev/null
@@ -1,17 +0,0 @@
-# This file describes the network interfaces available on your system
-# and how to activate them. For more information, see interfaces(5).
-
-# The loopback network interface
-auto lo
-iface lo inet loopback
-
-# The primary network interface
-auto eth0
-iface eth0 inet manual
- up ifconfig $IFACE 0.0.0.0 up
- down ifconfig $IFACE down
-
-auto br0
-iface br0 inet dhcp
- bridge_ports eth0
-
diff --git a/doc/source/devref/multinic.rst b/doc/source/devref/multinic.rst
deleted file mode 100644
index 43830258f..000000000
--- a/doc/source/devref/multinic.rst
+++ /dev/null
@@ -1,39 +0,0 @@
-MultiNic
-========
-
-What is it
-----------
-
-Multinic allows an instance to have more than one vif connected to it. Each vif is representative of a separate network with its own IP block.
-
-Managers
---------
-
-Each of the network managers are designed to run independently of the compute manager. They expose a common API for the compute manager to call to determine and configure the network(s) for an instance. Direct calls to either the network api or especially the DB should be avoided by the virt layers.
-
-On startup a manager looks in the networks table for networks it is assigned and configures itself to support that network. Using the periodic task, they will claim new networks that have no host set. Only one network per network-host will be claimed at a time. This allows for psuedo-loadbalancing if there are multiple network-hosts running.
-
-Flat Manager
-------------
-
- .. image:: /images/multinic_flat.png
-
-The Flat manager is most similar to a traditional switched network environment. It assumes that the IP routing, DNS, DHCP (possibly) and bridge creation is handled by something else. That is it makes no attempt to configure any of this. It does keep track of a range of IPs for the instances that are connected to the network to be allocated.
-
-Each instance will get a fixed IP from each network's pool. The guest operating system may be configured to gather this information through an agent or by the hypervisor injecting the files, or it may ignore it completely and come up with only a layer 2 connection.
-
-Flat manager requires at least one nova-network process running that will listen to the API queue and respond to queries. It does not need to sit on any of the networks but it does keep track of the IPs it hands out to instances.
-
-FlatDHCP Manager
-----------------
-
- .. image:: /images/multinic_dhcp.png
-
-FlatDHCP manager builds on the the Flat manager adding dnsmask (DNS and DHCP) and radvd (Router Advertisement) servers on the bridge for that network. The services run on the host that is assigned to that network. The FlatDHCP manager will create its bridge as specified when the network was created on the network-host when the network host starts up or when a new network gets allocated to that host. Compute nodes will also create the bridges as necessary and connect instance VIFs to them.
-
-VLAN Manager
-------------
-
- .. image:: /images/multinic_vlan.png
-
-The VLAN manager sets up forwarding to/from a cloudpipe instance in addition to providing dnsmask (DNS and DHCP) and radvd (Router Advertisement) services for each network. The manager will create its bridge as specified when the network was created on the network-host when the network host starts up or when a new network gets allocated to that host. Compute nodes will also create the bridges as necessary and connect instance VIFs to them.
diff --git a/doc/source/devref/network.rst b/doc/source/devref/network.rst
index 45a4bd184..56e9682eb 100644
--- a/doc/source/devref/network.rst
+++ b/doc/source/devref/network.rst
@@ -18,12 +18,6 @@
Networking
==========
-.. todo::
-
- * document hardware specific commands (maybe in admin guide?) (todd)
- * document a map between flags and managers/backends (todd)
-
-
The :mod:`nova.network.manager` Module
--------------------------------------
@@ -53,76 +47,3 @@ The :mod:`network_unittest` Module
:members:
:undoc-members:
:show-inheritance:
-
-
-Legacy docs
------------
-
-The nova networking components manage private networks, public IP addressing, VPN connectivity, and firewall rules.
-
-Components
-----------
-There are several key components:
-
-* NetworkController (Manages address and vlan allocation)
-* RoutingNode (NATs public IPs to private IPs, and enforces firewall rules)
-* AddressingNode (runs DHCP services for private networks)
-* BridgingNode (a subclass of the basic nova ComputeNode)
-* TunnelingNode (provides VPN connectivity)
-
-Component Diagram
------------------
-
-Overview::
-
- (PUBLIC INTERNET)
- | \
- / \ / \
- [RoutingNode] ... [RN] [TunnelingNode] ... [TN]
- | \ / | |
- | < AMQP > | |
- [AddressingNode]-- (VLAN) ... | (VLAN)... (VLAN) --- [AddressingNode]
- \ | \ /
- / \ / \ / \ / \
- [BridgingNode] ... [BridgingNode]
-
-
- [NetworkController] ... [NetworkController]
- \ /
- < AMQP >
- |
- / \
- [CloudController]...[CloudController]
-
-While this diagram may not make this entirely clear, nodes and controllers communicate exclusively across the message bus (AMQP, currently).
-
-State Model
------------
-Network State consists of the following facts:
-
-* VLAN assignment (to a project)
-* Private Subnet assignment (to a security group) in a VLAN
-* Private IP assignments (to running instances)
-* Public IP allocations (to a project)
-* Public IP associations (to a private IP / running instance)
-
-While copies of this state exist in many places (expressed in IPTables rule chains, DHCP hosts files, etc), the controllers rely only on the distributed "fact engine" for state, queried over RPC (currently AMQP). The NetworkController inserts most records into this datastore (allocating addresses, etc) - however, individual nodes update state e.g. when running instances crash.
-
-The Public Traffic Path
------------------------
-
-Public Traffic::
-
- (PUBLIC INTERNET)
- |
- <NAT> <-- [RoutingNode]
- |
- [AddressingNode] --> |
- ( VLAN )
- | <-- [BridgingNode]
- |
- <RUNNING INSTANCE>
-
-The RoutingNode is currently implemented using IPTables rules, which implement both NATing of public IP addresses, and the appropriate firewall chains. We are also looking at using Netomata / Clusto to manage NATting within a switch or router, and/or to manage firewall rules within a hardware firewall appliance.
-
-Similarly, the AddressingNode currently manages running DNSMasq instances for DHCP services. However, we could run an internal DHCP server (using Scapy ala Clusto), or even switch to static addressing by inserting the private address into the disk image the same way we insert the SSH keys. (See compute for more details).
diff --git a/doc/source/devref/server.conf.template b/doc/source/devref/server.conf.template
deleted file mode 100644
index feee3185b..000000000
--- a/doc/source/devref/server.conf.template
+++ /dev/null
@@ -1,34 +0,0 @@
-port 1194
-proto udp
-dev tap0
-up "/etc/openvpn/up.sh br0"
-down "/etc/openvpn/down.sh br0"
-
-persist-key
-persist-tun
-
-ca ca.crt
-cert server.crt
-key server.key # This file should be kept secret
-
-dh dh1024.pem
-ifconfig-pool-persist ipp.txt
-
-server-bridge VPN_IP DHCP_SUBNET DHCP_LOWER DHCP_UPPER
-
-client-to-client
-keepalive 10 120
-comp-lzo
-
-max-clients 1
-
-user nobody
-group nogroup
-
-persist-key
-persist-tun
-
-status openvpn-status.log
-
-verb 3
-mute 20 \ No newline at end of file
diff --git a/doc/source/devref/up.sh b/doc/source/devref/up.sh
deleted file mode 100644
index 073a58e15..000000000
--- a/doc/source/devref/up.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/sh
-
-BR=$1
-DEV=$2
-MTU=$3
-/sbin/ifconfig $DEV mtu $MTU promisc up
-/usr/sbin/brctl addif $BR $DEV
diff --git a/doc/source/image_src/multinic_1.odg b/doc/source/image_src/multinic_1.odg
deleted file mode 100644
index bbd76b10e..000000000
--- a/doc/source/image_src/multinic_1.odg
+++ /dev/null
Binary files differ
diff --git a/doc/source/image_src/multinic_2.odg b/doc/source/image_src/multinic_2.odg
deleted file mode 100644
index 1f1e4251a..000000000
--- a/doc/source/image_src/multinic_2.odg
+++ /dev/null
Binary files differ
diff --git a/doc/source/image_src/multinic_3.odg b/doc/source/image_src/multinic_3.odg
deleted file mode 100644
index d29e16353..000000000
--- a/doc/source/image_src/multinic_3.odg
+++ /dev/null
Binary files differ
diff --git a/doc/source/images/multinic_dhcp.png b/doc/source/images/multinic_dhcp.png
deleted file mode 100644
index bce05b595..000000000
--- a/doc/source/images/multinic_dhcp.png
+++ /dev/null
Binary files differ
diff --git a/doc/source/images/multinic_flat.png b/doc/source/images/multinic_flat.png
deleted file mode 100644
index e055e60e8..000000000
--- a/doc/source/images/multinic_flat.png
+++ /dev/null
Binary files differ
diff --git a/doc/source/images/multinic_vlan.png b/doc/source/images/multinic_vlan.png
deleted file mode 100644
index 9b0e4fd63..000000000
--- a/doc/source/images/multinic_vlan.png
+++ /dev/null
Binary files differ
diff --git a/etc/nova/nova.conf.sample b/etc/nova/nova.conf.sample
index 61350b183..9cbb8c1a5 100644
--- a/etc/nova/nova.conf.sample
+++ b/etc/nova/nova.conf.sample
@@ -1689,7 +1689,8 @@
# Options defined in nova.servicegroup.api
#
-# The driver for servicegroup service. (string value)
+# The driver for servicegroup service (valid options are: db,
+# zk, mc) (string value)
#servicegroup_driver=db
diff --git a/etc/nova/policy.json b/etc/nova/policy.json
index 2d3c4ed06..5a6800f94 100644
--- a/etc/nova/policy.json
+++ b/etc/nova/policy.json
@@ -29,6 +29,7 @@
"compute_extension:admin_actions:migrate": "rule:admin_api",
"compute_extension:aggregates": "rule:admin_api",
"compute_extension:agents": "rule:admin_api",
+ "compute_extension:attach_interfaces": "",
"compute_extension:baremetal_nodes": "rule:admin_api",
"compute_extension:cells": "rule:admin_api",
"compute_extension:certificates": "",
@@ -66,6 +67,7 @@
"compute_extension:hide_server_addresses": "is_admin:False",
"compute_extension:hosts": "rule:admin_api",
"compute_extension:hypervisors": "rule:admin_api",
+ "compute_extension:image_size": "",
"compute_extension:instance_actions": "",
"compute_extension:instance_actions:events": "rule:admin_api",
"compute_extension:instance_usage_audit_log": "rule:admin_api",
@@ -78,6 +80,7 @@
"compute_extension:quotas:update": "rule:admin_api",
"compute_extension:quota_classes": "",
"compute_extension:rescue": "",
+ "compute_extension:security_group_default_rules": "rule:admin_api",
"compute_extension:security_groups": "",
"compute_extension:server_diagnostics": "rule:admin_api",
"compute_extension:server_password": "",
diff --git a/etc/nova/rootwrap.d/compute.filters b/etc/nova/rootwrap.d/compute.filters
index 6396315b9..f0d9c6fee 100644
--- a/etc/nova/rootwrap.d/compute.filters
+++ b/etc/nova/rootwrap.d/compute.filters
@@ -14,6 +14,7 @@ tune2fs: CommandFilter, /sbin/tune2fs, root
# nova/virt/disk/api.py: 'mount', '-o', 'bind', src, target
# nova/virt/xenapi/vm_utils.py: 'mount', '-t', 'ext2,ext3,ext4,reiserfs'..
# nova/virt/configdrive.py: 'mount', device, mountdir
+# nova/virt/libvirt/volume.py: 'mount', '-t', 'sofs' ...
mount: CommandFilter, /bin/mount, root
# nova/virt/disk/mount/api.py: 'umount', mapped_device
@@ -185,3 +186,12 @@ tgtadm: CommandFilter, /usr/sbin/tgtadm, root
read_passwd: RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/passwd
read_shadow: RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/shadow
+# nova/virt/libvirt/volume.py: 'multipath' '-R'
+multipath: CommandFilter, /sbin/multipath, root
+
+# nova/virt/libvirt/utils.py:
+systool: CommandFilter, /usr/bin/systool, root
+
+# nova/virt/libvirt/volume.py:
+sginfo: CommandFilter, /usr/bin/sginfo, root
+sg_scan: CommandFilter, /usr/bin/sg_scan, root
diff --git a/nova/api/auth.py b/nova/api/auth.py
index 83388d638..04bb34066 100644
--- a/nova/api/auth.py
+++ b/nova/api/auth.py
@@ -18,11 +18,11 @@ Common Auth Middleware.
"""
+from oslo.config import cfg
import webob.dec
import webob.exc
from nova import context
-from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import wsgi
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index 85b87e3e5..397190fae 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -23,6 +23,7 @@ Starting point for routing EC2 requests.
import urlparse
from eventlet.green import httplib
+from oslo.config import cfg
import webob
import webob.dec
import webob.exc
@@ -34,7 +35,6 @@ from nova.api import validator
from nova.common import memorycache
from nova import context
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index b66b15852..b3f9bd099 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -25,6 +25,8 @@ datastore.
import base64
import time
+from oslo.config import cfg
+
from nova.api.ec2 import ec2utils
from nova.api.ec2 import inst_state
from nova.api.metadata import password
@@ -40,7 +42,6 @@ from nova import db
from nova import exception
from nova.image import s3
from nova import network
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import quota
@@ -213,7 +214,7 @@ class CloudController(object):
self.image_service = s3.S3ImageService()
self.network_api = network.API()
self.volume_api = volume.API()
- self.security_group_api = CloudSecurityGroupAPI()
+ self.security_group_api = get_cloud_security_group_api()
self.compute_api = compute.API(network_api=self.network_api,
volume_api=self.volume_api,
security_group_api=self.security_group_api)
@@ -711,8 +712,8 @@ class CloudController(object):
self.security_group_api.validate_property(group_name, 'name',
allowed)
- group_ref = self.security_group_api.create(context, group_name,
- group_description)
+ group_ref = self.security_group_api.create_security_group(
+ context, group_name, group_description)
return {'securityGroupSet': [self._format_security_group(context,
group_ref)]}
@@ -1661,7 +1662,7 @@ class CloudController(object):
return {'imageId': ec2_id}
-class CloudSecurityGroupAPI(compute_api.SecurityGroupAPI):
+class EC2SecurityGroupExceptions(object):
@staticmethod
def raise_invalid_property(msg):
raise exception.InvalidParameterValue(err=msg)
@@ -1688,3 +1689,13 @@ class CloudSecurityGroupAPI(compute_api.SecurityGroupAPI):
@staticmethod
def raise_not_found(msg):
pass
+
+
+class CloudSecurityGroupNovaAPI(compute_api.SecurityGroupAPI,
+ EC2SecurityGroupExceptions):
+ pass
+
+
+def get_cloud_security_group_api():
+ if cfg.CONF.security_group_api.lower() == 'nova':
+ return CloudSecurityGroupNovaAPI()
diff --git a/nova/api/ec2/faults.py b/nova/api/ec2/faults.py
index 9299a946c..f13f26a77 100644
--- a/nova/api/ec2/faults.py
+++ b/nova/api/ec2/faults.py
@@ -12,11 +12,11 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo.config import cfg
import webob.dec
import webob.exc
from nova import context
-from nova.openstack.common import cfg
from nova import utils
CONF = cfg.CONF
diff --git a/nova/api/metadata/base.py b/nova/api/metadata/base.py
index fca580b29..41f36541c 100644
--- a/nova/api/metadata/base.py
+++ b/nova/api/metadata/base.py
@@ -23,13 +23,14 @@ import json
import os
import posixpath
+from oslo.config import cfg
+
from nova.api.ec2 import ec2utils
from nova.api.metadata import password
from nova import block_device
from nova import conductor
from nova import context
from nova import network
-from nova.openstack.common import cfg
from nova.openstack.common import timeutils
from nova.virt import netutils
diff --git a/nova/api/metadata/handler.py b/nova/api/metadata/handler.py
index cedb27370..7cccbc324 100644
--- a/nova/api/metadata/handler.py
+++ b/nova/api/metadata/handler.py
@@ -21,6 +21,7 @@ import hashlib
import hmac
import os
+from oslo.config import cfg
import webob.dec
import webob.exc
@@ -28,7 +29,6 @@ from nova.api.metadata import base
from nova.common import memorycache
from nova import conductor
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import wsgi
diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py
index 24276b2bd..979aa74b6 100644
--- a/nova/api/openstack/auth.py
+++ b/nova/api/openstack/auth.py
@@ -17,12 +17,12 @@
import os
+from oslo.config import cfg
import webob.dec
import webob.exc
from nova.api.openstack import wsgi
from nova import context
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import wsgi as base_wsgi
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index b2583588d..8b880720b 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -20,8 +20,8 @@ import os
import re
import urlparse
+from oslo.config import cfg
import webob
-from xml.dom import minidom
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
@@ -29,9 +29,9 @@ from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import quota
+from nova import utils
osapi_opts = [
cfg.IntOpt('osapi_max_limit',
@@ -356,7 +356,7 @@ def raise_http_conflict_for_instance_invalid_state(exc, action):
class MetadataDeserializer(wsgi.MetadataXMLDeserializer):
def deserialize(self, text):
- dom = minidom.parseString(text)
+ dom = utils.safe_minidom_parse_string(text)
metadata_node = self.find_first_child_named(dom, "metadata")
metadata = self.extract_metadata(metadata_node)
return {'body': {'metadata': metadata}}
@@ -364,7 +364,7 @@ class MetadataDeserializer(wsgi.MetadataXMLDeserializer):
class MetaItemDeserializer(wsgi.MetadataXMLDeserializer):
def deserialize(self, text):
- dom = minidom.parseString(text)
+ dom = utils.safe_minidom_parse_string(text)
metadata_item = self.extract_metadata(dom)
return {'body': {'meta': metadata_item}}
@@ -382,7 +382,7 @@ class MetadataXMLDeserializer(wsgi.XMLDeserializer):
return metadata
def _extract_metadata_container(self, datastring):
- dom = minidom.parseString(datastring)
+ dom = utils.safe_minidom_parse_string(datastring)
metadata_node = self.find_first_child_named(dom, "metadata")
metadata = self.extract_metadata(metadata_node)
return {'body': {'metadata': metadata}}
@@ -394,7 +394,7 @@ class MetadataXMLDeserializer(wsgi.XMLDeserializer):
return self._extract_metadata_container(datastring)
def update(self, datastring):
- dom = minidom.parseString(datastring)
+ dom = utils.safe_minidom_parse_string(datastring)
metadata_item = self.extract_metadata(dom)
return {'body': {'meta': metadata_item}}
diff --git a/nova/api/openstack/compute/__init__.py b/nova/api/openstack/compute/__init__.py
index 92c84c13f..6b05dcf5f 100644
--- a/nova/api/openstack/compute/__init__.py
+++ b/nova/api/openstack/compute/__init__.py
@@ -20,6 +20,8 @@
WSGI middleware for OpenStack Compute API.
"""
+from oslo.config import cfg
+
import nova.api.openstack
from nova.api.openstack.compute import consoles
from nova.api.openstack.compute import extensions
@@ -31,10 +33,8 @@ from nova.api.openstack.compute import limits
from nova.api.openstack.compute import server_metadata
from nova.api.openstack.compute import servers
from nova.api.openstack.compute import versions
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
-
LOG = logging.getLogger(__name__)
allow_instance_snapshots_opt = cfg.BoolOpt('allow_instance_snapshots',
diff --git a/nova/api/openstack/compute/contrib/__init__.py b/nova/api/openstack/compute/contrib/__init__.py
index 7b21a0be8..8e1e8aa85 100644
--- a/nova/api/openstack/compute/contrib/__init__.py
+++ b/nova/api/openstack/compute/contrib/__init__.py
@@ -21,8 +21,9 @@ It can't be called 'extensions' because that causes namespacing problems.
"""
+from oslo.config import cfg
+
from nova.api.openstack import extensions
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
ext_opts = [
diff --git a/nova/api/openstack/compute/contrib/admin_actions.py b/nova/api/openstack/compute/contrib/admin_actions.py
index dc3ee8fc4..a3f68e828 100644
--- a/nova/api/openstack/compute/contrib/admin_actions.py
+++ b/nova/api/openstack/compute/contrib/admin_actions.py
@@ -285,8 +285,12 @@ class AdminActionsController(wsgi.Controller):
except exception.ComputeServiceUnavailable as ex:
raise exc.HTTPBadRequest(explanation=str(ex))
except Exception:
- msg = _("Live migration of instance %(id)s to host %(host)s"
- " failed") % locals()
+ if host is None:
+ msg = _("Live migration of instance %(id)s to another host"
+ " failed") % locals()
+ else:
+ msg = _("Live migration of instance %(id)s to host %(host)s"
+ " failed") % locals()
LOG.exception(msg)
# Return messages from scheduler
raise exc.HTTPBadRequest(explanation=msg)
diff --git a/nova/api/openstack/compute/contrib/attach_interfaces.py b/nova/api/openstack/compute/contrib/attach_interfaces.py
new file mode 100644
index 000000000..a838354d0
--- /dev/null
+++ b/nova/api/openstack/compute/contrib/attach_interfaces.py
@@ -0,0 +1,192 @@
+# Copyright 2012 SINA Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""The instance interfaces extension."""
+
+import webob
+from webob import exc
+
+from nova.api.openstack import extensions
+from nova import compute
+from nova import exception
+from nova import network
+from nova.openstack.common import log as logging
+
+
+LOG = logging.getLogger(__name__)
+authorize = extensions.extension_authorizer('compute', 'attach_interfaces')
+
+
+def _translate_interface_attachment_view(port_info):
+ """Maps keys for interface attachment details view."""
+ return {
+ 'net_id': port_info['network_id'],
+ 'port_id': port_info['id'],
+ 'mac_addr': port_info['mac_address'],
+ 'port_state': port_info['status'],
+ 'fixed_ips': port_info.get('fixed_ips', None),
+ }
+
+
+class InterfaceAttachmentController(object):
+ """The interface attachment API controller for the OpenStack API."""
+
+ def __init__(self):
+ self.compute_api = compute.API()
+ self.network_api = network.API()
+ super(InterfaceAttachmentController, self).__init__()
+
+ def index(self, req, server_id):
+ """Returns the list of interface attachments for a given instance."""
+ return self._items(req, server_id,
+ entity_maker=_translate_interface_attachment_view)
+
+ def show(self, req, server_id, id):
+ """Return data about the given interface attachment."""
+ context = req.environ['nova.context']
+ authorize(context)
+
+ port_id = id
+ try:
+ instance = self.compute_api.get(context, server_id)
+ except exception.NotFound:
+ raise exc.HTTPNotFound()
+
+ try:
+ port_info = self.network_api.show_port(context, port_id)
+ except exception.NotFound:
+ raise exc.HTTPNotFound()
+
+ if port_info['port']['device_id'] != server_id:
+ raise exc.HTTPNotFound()
+
+ return {'interfaceAttachment': _translate_interface_attachment_view(
+ port_info['port'])}
+
+ def create(self, req, server_id, body):
+ """Attach an interface to an instance."""
+ context = req.environ['nova.context']
+ authorize(context)
+
+ network_id = None
+ port_id = None
+ req_ip = None
+ if body:
+ attachment = body['interfaceAttachment']
+ network_id = attachment.get('net_id', None)
+ port_id = attachment.get('port_id', None)
+ try:
+ req_ip = attachment['fixed_ips'][0]['ip_address']
+ except Exception:
+ pass
+
+ if network_id and port_id:
+ raise exc.HTTPBadRequest()
+ if req_ip and not network_id:
+ raise exc.HTTPBadRequest()
+
+ try:
+ instance = self.compute_api.get(context, server_id)
+ LOG.audit(_("Attach interface"), instance=instance)
+ network_info = self.compute_api.attach_interface(context,
+ instance, network_id, port_id, req_ip)
+ except exception.NotFound, e:
+ LOG.exception(e)
+ raise exc.HTTPNotFound()
+ except NotImplementedError:
+ msg = _("Network driver does not support this function.")
+ raise webob.exc.HTTPNotImplemented(explanation=msg)
+ except exception.InterfaceAttachFailed, e:
+ LOG.exception(e)
+ msg = _("Failed to attach interface")
+ raise webob.exc.HTTPInternalServerError(explanation=msg)
+
+ network, mapping = network_info
+ return self.show(req, server_id, mapping['vif_uuid'])
+
+ def update(self, req, server_id, id, body):
+ """Update a interface attachment. We don't currently support this."""
+ msg = _("Attachments update is not supported")
+ raise exc.HTTPNotImplemented(explanation=msg)
+
+ def delete(self, req, server_id, id):
+ """Detach an interface from an instance."""
+ context = req.environ['nova.context']
+ authorize(context)
+ port_id = id
+
+ try:
+ instance = self.compute_api.get(context, server_id)
+ LOG.audit(_("Detach interface %s"), port_id, instance=instance)
+
+ except exception.NotFound:
+ raise exc.HTTPNotFound()
+ try:
+ self.compute_api.detach_interface(context,
+ instance, port_id=port_id)
+ except exception.PortNotFound:
+ raise exc.HTTPNotFound
+ except NotImplementedError:
+ msg = _("Network driver does not support this function.")
+ raise webob.exc.HTTPNotImplemented(explanation=msg)
+
+ return webob.Response(status_int=202)
+
+ def _items(self, req, server_id, entity_maker):
+ """Returns a list of attachments, transformed through entity_maker."""
+ context = req.environ['nova.context']
+ authorize(context)
+
+ try:
+ instance = self.compute_api.get(context, server_id)
+ except exception.NotFound:
+ raise exc.HTTPNotFound()
+
+ results = []
+ search_opts = {'device_id': instance['uuid']}
+
+ try:
+ data = self.network_api.list_ports(context, **search_opts)
+ except exception.NotFound:
+ raise exc.HTTPNotFound()
+ except NotImplementedError:
+ msg = _("Network driver does not support this function.")
+ raise webob.exc.HTTPNotImplemented(explanation=msg)
+
+ ports = data.get('ports', [])
+ results = [entity_maker(port) for port in ports]
+
+ return {'interfaceAttachments': results}
+
+
+class Attach_interfaces(extensions.ExtensionDescriptor):
+ """Attach interface support."""
+
+ name = "AttachInterfaces"
+ alias = "os-attach-interfaces"
+ namespace = "http://docs.openstack.org/compute/ext/interfaces/api/v1.1"
+ updated = "2012-07-22T00:00:00+00:00"
+
+ def get_resources(self):
+ resources = []
+
+ res = extensions.ResourceExtension('os-interface',
+ InterfaceAttachmentController(),
+ parent=dict(
+ member_name='server',
+ collection_name='servers'))
+ resources.append(res)
+
+ return resources
diff --git a/nova/api/openstack/compute/contrib/availability_zone.py b/nova/api/openstack/compute/contrib/availability_zone.py
index 98c508bd7..c79556839 100644
--- a/nova/api/openstack/compute/contrib/availability_zone.py
+++ b/nova/api/openstack/compute/contrib/availability_zone.py
@@ -14,17 +14,17 @@
# License for the specific language governing permissions and limitations
# under the License
+from oslo.config import cfg
+
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import availability_zones
from nova import db
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import servicegroup
-
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
diff --git a/nova/api/openstack/compute/contrib/cells.py b/nova/api/openstack/compute/contrib/cells.py
index 03e2e4ca2..8e31777e2 100644
--- a/nova/api/openstack/compute/contrib/cells.py
+++ b/nova/api/openstack/compute/contrib/cells.py
@@ -16,10 +16,10 @@
# under the License.
"""The cells extension."""
-from xml.dom import minidom
-from xml.parsers import expat
+from oslo.config import cfg
from webob import exc
+from xml.parsers import expat
from nova.api.openstack import common
from nova.api.openstack import extensions
@@ -29,9 +29,9 @@ from nova.cells import rpcapi as cells_rpcapi
from nova.compute import api as compute
from nova import db
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
+from nova import utils
LOG = logging.getLogger(__name__)
@@ -99,7 +99,7 @@ class CellDeserializer(wsgi.XMLDeserializer):
def default(self, string):
"""Deserialize an xml-formatted cell create request."""
try:
- node = minidom.parseString(string)
+ node = utils.safe_minidom_parse_string(string)
except expat.ExpatError:
msg = _("cannot understand XML")
raise exception.MalformedRequestBody(reason=msg)
diff --git a/nova/api/openstack/compute/contrib/cloudpipe.py b/nova/api/openstack/compute/contrib/cloudpipe.py
index 4e224be46..20ae87fe1 100644
--- a/nova/api/openstack/compute/contrib/cloudpipe.py
+++ b/nova/api/openstack/compute/contrib/cloudpipe.py
@@ -14,6 +14,8 @@
"""Connect your vlan to the world."""
+from oslo.config import cfg
+
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
@@ -24,7 +26,6 @@ from nova.compute import vm_states
from nova import db
from nova import exception
from nova import network
-from nova.openstack.common import cfg
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
diff --git a/nova/api/openstack/compute/contrib/floating_ips_bulk.py b/nova/api/openstack/compute/contrib/floating_ips_bulk.py
index 11ab0ec69..afd42e12f 100644
--- a/nova/api/openstack/compute/contrib/floating_ips_bulk.py
+++ b/nova/api/openstack/compute/contrib/floating_ips_bulk.py
@@ -16,12 +16,12 @@
# under the License.
import netaddr
+from oslo.config import cfg
import webob.exc
from nova.api.openstack import extensions
from nova import db
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
CONF = cfg.CONF
diff --git a/nova/api/openstack/compute/contrib/fping.py b/nova/api/openstack/compute/contrib/fping.py
index 282be19ca..394d5924e 100644
--- a/nova/api/openstack/compute/contrib/fping.py
+++ b/nova/api/openstack/compute/contrib/fping.py
@@ -19,17 +19,16 @@
import itertools
import os
+from oslo.config import cfg
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova import compute
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import utils
-
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'fping')
authorize_all_tenants = extensions.extension_authorizer(
diff --git a/nova/api/openstack/compute/contrib/hide_server_addresses.py b/nova/api/openstack/compute/contrib/hide_server_addresses.py
index bb8ee553a..4d8bcd94e 100644
--- a/nova/api/openstack/compute/contrib/hide_server_addresses.py
+++ b/nova/api/openstack/compute/contrib/hide_server_addresses.py
@@ -15,11 +15,11 @@
"""Extension for hiding server addresses in certain states."""
+from oslo.config import cfg
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.compute import vm_states
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
opts = [
diff --git a/nova/api/openstack/compute/contrib/hosts.py b/nova/api/openstack/compute/contrib/hosts.py
index 9ce278900..a3cfd229c 100644
--- a/nova/api/openstack/compute/contrib/hosts.py
+++ b/nova/api/openstack/compute/contrib/hosts.py
@@ -16,7 +16,6 @@
"""The hosts admin extension."""
import webob.exc
-from xml.dom import minidom
from xml.parsers import expat
from nova.api.openstack import extensions
@@ -25,6 +24,7 @@ from nova.api.openstack import xmlutil
from nova import compute
from nova import exception
from nova.openstack.common import log as logging
+from nova import utils
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'hosts')
@@ -72,7 +72,7 @@ class HostShowTemplate(xmlutil.TemplateBuilder):
class HostUpdateDeserializer(wsgi.XMLDeserializer):
def default(self, string):
try:
- node = minidom.parseString(string)
+ node = utils.safe_minidom_parse_string(string)
except expat.ExpatError:
msg = _("cannot understand XML")
raise exception.MalformedRequestBody(reason=msg)
diff --git a/nova/api/openstack/compute/contrib/image_size.py b/nova/api/openstack/compute/contrib/image_size.py
new file mode 100644
index 000000000..21998738f
--- /dev/null
+++ b/nova/api/openstack/compute/contrib/image_size.py
@@ -0,0 +1,88 @@
+# Copyright 2013 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
+from nova.api.openstack import xmlutil
+
+authorize = extensions.soft_extension_authorizer('compute', 'image_size')
+
+
+def make_image(elem):
+ elem.set('{%s}size' % Image_size.namespace, '%s:size' % Image_size.alias)
+
+
+class ImagesSizeTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('images')
+ elem = xmlutil.SubTemplateElement(root, 'image', selector='images')
+ make_image(elem)
+ return xmlutil.SlaveTemplate(root, 1, nsmap={
+ Image_size.alias: Image_size.namespace})
+
+
+class ImageSizeTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('image', selector='image')
+ make_image(root)
+ return xmlutil.SlaveTemplate(root, 1, nsmap={
+ Image_size.alias: Image_size.namespace})
+
+
+class ImageSizeController(wsgi.Controller):
+
+ def _extend_image(self, image, image_cache):
+ key = "%s:size" % Image_size.alias
+ image[key] = image_cache['size']
+
+ @wsgi.extends
+ def show(self, req, resp_obj, id):
+ context = req.environ["nova.context"]
+ if authorize(context):
+ # Attach our slave template to the response object
+ resp_obj.attach(xml=ImageSizeTemplate())
+ image_resp = resp_obj.obj['image']
+ # image guaranteed to be in the cache due to the core API adding
+ # it in its 'show' method
+ image_cached = req.get_db_item('images', image_resp['id'])
+ self._extend_image(image_resp, image_cached)
+
+ @wsgi.extends
+ def detail(self, req, resp_obj):
+ context = req.environ['nova.context']
+ if authorize(context):
+ # Attach our slave template to the response object
+ resp_obj.attach(xml=ImagesSizeTemplate())
+ images_resp = list(resp_obj.obj['images'])
+ # images guaranteed to be in the cache due to the core API adding
+ # it in its 'detail' method
+ for image in images_resp:
+ image_cached = req.get_db_item('images', image['id'])
+ self._extend_image(image, image_cached)
+
+
+class Image_size(extensions.ExtensionDescriptor):
+ """Adds image size to image listings."""
+
+ name = "ImageSize"
+ alias = "OS-EXT-IMG-SIZE"
+ namespace = ("http://docs.openstack.org/compute/ext/"
+ "image_size/api/v1.1")
+ updated = "2013-02-19T00:00:00+00:00"
+
+ def get_controller_extensions(self):
+ controller = ImageSizeController()
+ extension = extensions.ControllerExtension(self, 'images', controller)
+ return [extension]
diff --git a/nova/api/openstack/compute/contrib/instance_actions.py b/nova/api/openstack/compute/contrib/instance_actions.py
index 4ab32ad4c..ecacde7bf 100644
--- a/nova/api/openstack/compute/contrib/instance_actions.py
+++ b/nova/api/openstack/compute/contrib/instance_actions.py
@@ -71,15 +71,13 @@ class InstanceActionsController(wsgi.Controller):
def _format_action(self, action_raw):
action = {}
for key in ACTION_KEYS:
- if key in action_raw:
- action[key] = action_raw[key]
+ action[key] = action_raw.get(key)
return action
def _format_event(self, event_raw):
event = {}
for key in EVENT_KEYS:
- if key in event_raw:
- event[key] = event_raw[key]
+ event[key] = event_raw.get(key)
return event
@wsgi.serializers(xml=InstanceActionsTemplate)
diff --git a/nova/api/openstack/compute/contrib/instance_usage_audit_log.py b/nova/api/openstack/compute/contrib/instance_usage_audit_log.py
index 512b58869..d6ffe3677 100644
--- a/nova/api/openstack/compute/contrib/instance_usage_audit_log.py
+++ b/nova/api/openstack/compute/contrib/instance_usage_audit_log.py
@@ -18,11 +18,11 @@
import datetime
+from oslo.config import cfg
import webob.exc
from nova.api.openstack import extensions
from nova import compute
-from nova.openstack.common import cfg
from nova import utils
CONF = cfg.CONF
diff --git a/nova/api/openstack/compute/contrib/os_networks.py b/nova/api/openstack/compute/contrib/os_networks.py
index d1d172686..bef2c3d1f 100644
--- a/nova/api/openstack/compute/contrib/os_networks.py
+++ b/nova/api/openstack/compute/contrib/os_networks.py
@@ -46,7 +46,7 @@ def network_dict(context, network):
# are only visible if they are an admin.
if context.is_admin:
fields += admin_fields
- result = dict((field, network[field]) for field in fields)
+ result = dict((field, network.get(field)) for field in fields)
if 'uuid' in network:
result['id'] = network['uuid']
return result
diff --git a/nova/api/openstack/compute/contrib/os_tenant_networks.py b/nova/api/openstack/compute/contrib/os_tenant_networks.py
index 03178ab65..c22e64b1f 100644
--- a/nova/api/openstack/compute/contrib/os_tenant_networks.py
+++ b/nova/api/openstack/compute/contrib/os_tenant_networks.py
@@ -18,13 +18,13 @@
import netaddr
import netaddr.core as netexc
+from oslo.config import cfg
from webob import exc
from nova.api.openstack import extensions
from nova import context as nova_context
from nova import exception
import nova.network
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import quota
@@ -64,9 +64,9 @@ authorize = extensions.extension_authorizer('compute', 'os-tenant-networks')
def network_dict(network):
- return {"id": network.get("uuid") or network["id"],
- "cidr": network["cidr"],
- "label": network["label"]}
+ return {"id": network.get("uuid") or network.get("id"),
+ "cidr": network.get("cidr"),
+ "label": network.get("label")}
class NetworkController(object):
@@ -108,7 +108,7 @@ class NetworkController(object):
network = self.network_api.get(context, id)
except exception.NetworkNotFound:
raise exc.HTTPNotFound(_("Network not found"))
- return network_dict(network)
+ return {'network': network_dict(network)}
def delete(self, req, id):
context = req.environ['nova.context']
diff --git a/nova/api/openstack/compute/contrib/quota_classes.py b/nova/api/openstack/compute/contrib/quota_classes.py
index f3f5b9b08..7b94e45b1 100644
--- a/nova/api/openstack/compute/contrib/quota_classes.py
+++ b/nova/api/openstack/compute/contrib/quota_classes.py
@@ -18,6 +18,7 @@ import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
+import nova.context
from nova import db
from nova import exception
from nova import quota
@@ -59,7 +60,7 @@ class QuotaClassSetsController(object):
context = req.environ['nova.context']
authorize(context)
try:
- db.sqlalchemy.api.authorize_quota_class_context(context, id)
+ nova.context.authorize_quota_class_context(context, id)
return self._format_quota_set(id,
QUOTAS.get_class_quotas(context, id))
except exception.NotAuthorized:
diff --git a/nova/api/openstack/compute/contrib/quotas.py b/nova/api/openstack/compute/contrib/quotas.py
index 728c3fad6..b1a461431 100644
--- a/nova/api/openstack/compute/contrib/quotas.py
+++ b/nova/api/openstack/compute/contrib/quotas.py
@@ -20,8 +20,8 @@ import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
+import nova.context
from nova import db
-from nova.db.sqlalchemy import api as sqlalchemy_api
from nova import exception
from nova.openstack.common import log as logging
from nova import quota
@@ -78,7 +78,7 @@ class QuotaSetsController(object):
context = req.environ['nova.context']
authorize_show(context)
try:
- sqlalchemy_api.authorize_project_context(context, id)
+ nova.context.authorize_project_context(context, id)
return self._format_quota_set(id, self._get_quotas(context, id))
except exception.NotAuthorized:
raise webob.exc.HTTPForbidden()
diff --git a/nova/api/openstack/compute/contrib/rescue.py b/nova/api/openstack/compute/contrib/rescue.py
index 29135418e..c62ed5894 100644
--- a/nova/api/openstack/compute/contrib/rescue.py
+++ b/nova/api/openstack/compute/contrib/rescue.py
@@ -14,6 +14,7 @@
"""The rescue mode extension."""
+from oslo.config import cfg
import webob
from webob import exc
@@ -22,7 +23,6 @@ from nova.api.openstack import extensions as exts
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import utils
diff --git a/nova/api/openstack/compute/contrib/security_group_default_rules.py b/nova/api/openstack/compute/contrib/security_group_default_rules.py
new file mode 100644
index 000000000..e2bba8127
--- /dev/null
+++ b/nova/api/openstack/compute/contrib/security_group_default_rules.py
@@ -0,0 +1,216 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 Metacloud Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from xml.dom import minidom
+
+import webob
+from webob import exc
+
+from nova.api.openstack.compute.contrib import security_groups as sg
+from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
+from nova.api.openstack import xmlutil
+from nova import exception
+from nova.network.security_group import openstack_driver
+from nova.openstack.common import log as logging
+
+
+LOG = logging.getLogger(__name__)
+authorize = extensions.extension_authorizer('compute',
+ 'security_group_default_rules')
+
+sg_nsmap = {None: wsgi.XMLNS_V11}
+
+
+def make_default_rule(elem):
+ elem.set('id')
+
+ proto = xmlutil.SubTemplateElement(elem, 'ip_protocol')
+ proto.text = 'ip_protocol'
+
+ from_port = xmlutil.SubTemplateElement(elem, 'from_port')
+ from_port.text = 'from_port'
+
+ to_port = xmlutil.SubTemplateElement(elem, 'to_port')
+ to_port.text = 'to_port'
+
+ ip_range = xmlutil.SubTemplateElement(elem, 'ip_range',
+ selector='ip_range')
+ cidr = xmlutil.SubTemplateElement(ip_range, 'cidr')
+ cidr.text = 'cidr'
+
+
+class SecurityGroupDefaultRulesTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('security_group_default_rules')
+ elem = xmlutil.SubTemplateElement(root, 'security_group_default_rule',
+ selector='security_group_default_rules')
+
+ make_default_rule(elem)
+ return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap)
+
+
+class SecurityGroupDefaultRuleTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('security_group_default_rule',
+ selector='security_group_default_rule')
+ make_default_rule(root)
+ return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap)
+
+
+class SecurityGroupDefaultRulesXMLDeserializer(wsgi.MetadataXMLDeserializer):
+ def default(self, string):
+ dom = minidom.parseString(string)
+ security_group_rule = self._extract_security_group_default_rule(dom)
+ return {'body': {'security_group_default_rule': security_group_rule}}
+
+ def _extract_security_group_default_rule(self, node):
+ sg_rule = {}
+ sg_rule_node = self.find_first_child_named(node,
+ 'security_group_default_rule')
+ if sg_rule_node is not None:
+ ip_protocol_node = self.find_first_child_named(sg_rule_node,
+ "ip_protocol")
+ if ip_protocol_node is not None:
+ sg_rule['ip_protocol'] = self.extract_text(ip_protocol_node)
+
+ from_port_node = self.find_first_child_named(sg_rule_node,
+ "from_port")
+ if from_port_node is not None:
+ sg_rule['from_port'] = self.extract_text(from_port_node)
+
+ to_port_node = self.find_first_child_named(sg_rule_node, "to_port")
+ if to_port_node is not None:
+ sg_rule['to_port'] = self.extract_text(to_port_node)
+
+ cidr_node = self.find_first_child_named(sg_rule_node, "cidr")
+ if cidr_node is not None:
+ sg_rule['cidr'] = self.extract_text(cidr_node)
+
+ return sg_rule
+
+
+class SecurityGroupDefaultRulesController(sg.SecurityGroupControllerBase):
+
+ def __init__(self):
+ self.security_group_api = (
+ openstack_driver.get_openstack_security_group_driver())
+
+ @wsgi.serializers(xml=SecurityGroupDefaultRuleTemplate)
+ @wsgi.deserializers(xml=SecurityGroupDefaultRulesXMLDeserializer)
+ def create(self, req, body):
+ context = self._authorize_context(req)
+ authorize(context)
+
+ sg_rule = self._from_body(body, 'security_group_default_rule')
+
+ try:
+ values = self._rule_args_to_dict(to_port=sg_rule.get('to_port'),
+ from_port=sg_rule.get('from_port'),
+ ip_protocol=sg_rule.get('ip_protocol'),
+ cidr=sg_rule.get('cidr'))
+ except Exception as exp:
+ raise exc.HTTPBadRequest(explanation=unicode(exp))
+
+ if values is None:
+ msg = _('Not enough parameters to build a valid rule.')
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ if self.security_group_api.default_rule_exists(context, values):
+ msg = _('This default rule already exists.')
+ raise exc.HTTPBadRequest(explanation=msg)
+ security_group_rule = self.security_group_api.add_default_rules(
+ context, [values])[0]
+ fmt_rule = self._format_security_group_default_rule(
+ security_group_rule)
+ return {'security_group_default_rule': fmt_rule}
+
+ def _rule_args_to_dict(self, to_port=None, from_port=None,
+ ip_protocol=None, cidr=None):
+ cidr = self.security_group_api.parse_cidr(cidr)
+ return self.security_group_api.new_cidr_ingress_rule(
+ cidr, ip_protocol, from_port, to_port)
+
+ @wsgi.serializers(xml=SecurityGroupDefaultRuleTemplate)
+ def show(self, req, id):
+ context = self._authorize_context(req)
+ authorize(context)
+
+ id = self.security_group_api.validate_id(id)
+
+ LOG.debug(_("Showing security_group_default_rule with id %s") % id)
+ try:
+ rule = self.security_group_api.get_default_rule(context, id)
+ except exception.SecurityGroupDefaultRuleNotFound:
+ raise exc.HTTPNotFound(_("security group default rule not found"))
+
+ fmt_rule = self._format_security_group_default_rule(rule)
+ return {"security_group_default_rule": fmt_rule}
+
+ def delete(self, req, id):
+ context = self._authorize_context(req)
+ authorize(context)
+
+ id = self.security_group_api.validate_id(id)
+
+ rule = self.security_group_api.get_default_rule(context, id)
+
+ self.security_group_api.remove_default_rules(context, [rule['id']])
+
+ return webob.Response(status_int=204)
+
+ @wsgi.serializers(xml=SecurityGroupDefaultRulesTemplate)
+ def index(self, req):
+
+ context = self._authorize_context(req)
+ authorize(context)
+
+ ret = {'security_group_default_rules': []}
+ for rule in self.security_group_api.get_all_default_rules(context):
+ rule_fmt = self._format_security_group_default_rule(rule)
+ ret['security_group_default_rules'].append(rule_fmt)
+
+ return ret
+
+ def _format_security_group_default_rule(self, rule):
+ sg_rule = {}
+ sg_rule['id'] = rule['id']
+ sg_rule['ip_protocol'] = rule['protocol']
+ sg_rule['from_port'] = rule['from_port']
+ sg_rule['to_port'] = rule['to_port']
+ sg_rule['ip_range'] = {}
+ sg_rule['ip_range'] = {'cidr': rule['cidr']}
+ return sg_rule
+
+
+class Security_group_default_rules(extensions.ExtensionDescriptor):
+ """Default rules for security group support."""
+ name = "SecurityGroupDefaultRules"
+ alias = "os-security-group-default-rules"
+ namespace = ("http://docs.openstack.org/compute/ext/"
+ "securitygroupdefaultrules/api/v1.1")
+ updated = "2013-02-05T00:00:00+00:00"
+
+ def get_resources(self):
+ resources = [
+ extensions.ResourceExtension('os-security-group-default-rules',
+ SecurityGroupDefaultRulesController(),
+ collection_actions={'create': 'POST',
+ 'delete': 'DELETE',
+ 'index': 'GET'},
+ member_actions={'show': 'GET'})]
+
+ return resources
diff --git a/nova/api/openstack/compute/contrib/security_groups.py b/nova/api/openstack/compute/contrib/security_groups.py
index c49e7af70..092e89b9b 100644
--- a/nova/api/openstack/compute/contrib/security_groups.py
+++ b/nova/api/openstack/compute/contrib/security_groups.py
@@ -16,10 +16,10 @@
"""The security groups extension."""
-from xml.dom import minidom
-
+import json
import webob
from webob import exc
+from xml.dom import minidom
from nova.api.openstack import common
from nova.api.openstack import extensions
@@ -29,9 +29,13 @@ from nova import compute
from nova.compute import api as compute_api
from nova import db
from nova import exception
+from nova.network.security_group import openstack_driver
+from nova.network.security_group import quantum_driver
from nova.openstack.common import log as logging
+from nova import utils
from nova.virt import netutils
+
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'security_groups')
softauth = extensions.soft_extension_authorizer('compute', 'security_groups')
@@ -109,7 +113,7 @@ class SecurityGroupXMLDeserializer(wsgi.MetadataXMLDeserializer):
"""
def default(self, string):
"""Deserialize an xml-formatted security group create request."""
- dom = minidom.parseString(string)
+ dom = utils.safe_minidom_parse_string(string)
security_group = {}
sg_node = self.find_first_child_named(dom,
'security_group')
@@ -130,7 +134,7 @@ class SecurityGroupRulesXMLDeserializer(wsgi.MetadataXMLDeserializer):
def default(self, string):
"""Deserialize an xml-formatted security group create request."""
- dom = minidom.parseString(string)
+ dom = utils.safe_minidom_parse_string(string)
security_group_rule = self._extract_security_group_rule(dom)
return {'body': {'security_group_rule': security_group_rule}}
@@ -176,7 +180,8 @@ class SecurityGroupControllerBase(object):
"""Base class for Security Group controllers."""
def __init__(self):
- self.security_group_api = NativeSecurityGroupAPI()
+ self.security_group_api = (
+ openstack_driver.get_openstack_security_group_driver())
self.compute_api = compute.API(
security_group_api=self.security_group_api)
@@ -192,8 +197,8 @@ class SecurityGroupControllerBase(object):
if rule['group_id']:
source_group = self.security_group_api.get(context,
id=rule['group_id'])
- sg_rule['group'] = {'name': source_group.name,
- 'tenant_id': source_group.project_id}
+ sg_rule['group'] = {'name': source_group.get('name'),
+ 'tenant_id': source_group.get('project_id')}
else:
sg_rule['ip_range'] = {'cidr': rule['cidr']}
return sg_rule
@@ -215,13 +220,6 @@ class SecurityGroupControllerBase(object):
authorize(context)
return context
- def _validate_id(self, id):
- try:
- return int(id)
- except ValueError:
- msg = _("Security group id should be integer")
- raise exc.HTTPBadRequest(explanation=msg)
-
def _from_body(self, body, key):
if not body:
raise exc.HTTPUnprocessableEntity()
@@ -239,7 +237,7 @@ class SecurityGroupController(SecurityGroupControllerBase):
"""Return data about the given security group."""
context = self._authorize_context(req)
- id = self._validate_id(id)
+ id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
@@ -251,7 +249,7 @@ class SecurityGroupController(SecurityGroupControllerBase):
"""Delete a security group."""
context = self._authorize_context(req)
- id = self._validate_id(id)
+ id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
@@ -274,7 +272,7 @@ class SecurityGroupController(SecurityGroupControllerBase):
limited_list = common.limited(raw_groups, req)
result = [self._format_security_group(context, group)
- for group in limited_list]
+ for group in limited_list]
return {'security_groups':
list(sorted(result,
@@ -295,11 +293,11 @@ class SecurityGroupController(SecurityGroupControllerBase):
self.security_group_api.validate_property(group_description,
'description', None)
- group_ref = self.security_group_api.create(context, group_name,
- group_description)
+ group_ref = self.security_group_api.create_security_group(
+ context, group_name, group_description)
return {'security_group': self._format_security_group(context,
- group_ref)}
+ group_ref)}
class SecurityGroupRulesController(SecurityGroupControllerBase):
@@ -311,14 +309,13 @@ class SecurityGroupRulesController(SecurityGroupControllerBase):
sg_rule = self._from_body(body, 'security_group_rule')
- parent_group_id = self._validate_id(sg_rule.get('parent_group_id',
- None))
+ parent_group_id = self.security_group_api.validate_id(
+ sg_rule.get('parent_group_id', None))
security_group = self.security_group_api.get(context, None,
parent_group_id, map_exception=True)
-
try:
- values = self._rule_args_to_dict(context,
+ new_rule = self._rule_args_to_dict(context,
to_port=sg_rule.get('to_port'),
from_port=sg_rule.get('from_port'),
ip_protocol=sg_rule.get('ip_protocol'),
@@ -327,24 +324,21 @@ class SecurityGroupRulesController(SecurityGroupControllerBase):
except Exception as exp:
raise exc.HTTPBadRequest(explanation=unicode(exp))
- if values is None:
+ if new_rule is None:
msg = _("Not enough parameters to build a valid rule.")
raise exc.HTTPBadRequest(explanation=msg)
- values['parent_group_id'] = security_group.id
+ new_rule['parent_group_id'] = security_group['id']
- if 'cidr' in values:
- net, prefixlen = netutils.get_net_and_prefixlen(values['cidr'])
+ if 'cidr' in new_rule:
+ net, prefixlen = netutils.get_net_and_prefixlen(new_rule['cidr'])
if net != '0.0.0.0' and prefixlen == '0':
- msg = _("Bad prefix for network in cidr %s") % values['cidr']
+ msg = _("Bad prefix for network in cidr %s") % new_rule['cidr']
raise exc.HTTPBadRequest(explanation=msg)
- if self.security_group_api.rule_exists(security_group, values):
- msg = _('This rule already exists in group %s') % parent_group_id
- raise exc.HTTPBadRequest(explanation=msg)
-
- security_group_rule = self.security_group_api.add_rules(
- context, parent_group_id, security_group['name'], [values])[0]
+ security_group_rule = (
+ self.security_group_api.create_security_group_rule(
+ context, security_group, new_rule))
return {"security_group_rule": self._format_security_group_rule(
context,
@@ -354,8 +348,9 @@ class SecurityGroupRulesController(SecurityGroupControllerBase):
ip_protocol=None, cidr=None, group_id=None):
if group_id is not None:
- group_id = self._validate_id(group_id)
- #check if groupId exists
+ group_id = self.security_group_api.validate_id(group_id)
+
+ # check if groupId exists
self.security_group_api.get(context, id=group_id)
return self.security_group_api.new_group_ingress_rule(
group_id, ip_protocol, from_port, to_port)
@@ -367,11 +362,11 @@ class SecurityGroupRulesController(SecurityGroupControllerBase):
def delete(self, req, id):
context = self._authorize_context(req)
- id = self._validate_id(id)
+ id = self.security_group_api.validate_id(id)
rule = self.security_group_api.get_rule(context, id)
- group_id = rule.parent_group_id
+ group_id = rule['parent_group_id']
security_group = self.security_group_api.get(context, None, group_id,
map_exception=True)
@@ -409,7 +404,8 @@ class ServerSecurityGroupController(SecurityGroupControllerBase):
class SecurityGroupActionController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SecurityGroupActionController, self).__init__(*args, **kwargs)
- self.security_group_api = NativeSecurityGroupAPI()
+ self.security_group_api = (
+ openstack_driver.get_openstack_security_group_driver())
self.compute_api = compute.API(
security_group_api=self.security_group_api)
@@ -468,14 +464,51 @@ class SecurityGroupsOutputController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SecurityGroupsOutputController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
+ self.security_group_api = (
+ openstack_driver.get_openstack_security_group_driver())
def _extend_servers(self, req, servers):
key = "security_groups"
- for server in servers:
- instance = req.get_db_instance(server['id'])
- groups = instance.get(key)
- if groups:
- server[key] = [{"name": group["name"]} for group in groups]
+ if not openstack_driver.is_quantum_security_groups():
+ for server in servers:
+ instance = req.get_db_instance(server['id'])
+ groups = instance.get(key)
+ if groups:
+ server[key] = [{"name": group["name"]} for group in groups]
+ else:
+ # If method is a POST we get the security groups intended for an
+ # instance from the request. The reason for this is if using
+ # quantum security groups the requested security groups for the
+ # instance are not in the db and have not been sent to quantum yet.
+ instance_sgs = []
+ if req.method != 'POST':
+ for server in servers:
+ instance_sgs = (
+ self.security_group_api.get_instance_security_groups(
+ req, server['id']))
+ else:
+ try:
+ # try converting to json
+ req_obj = json.loads(req.body)
+ # Add security group to server, if no security group was in
+ # request add default since that is the group it is part of
+ instance_sgs = req_obj['server'].get(
+ key, [{'name': 'default'}])
+ except ValueError:
+ root = minidom.parseString(req.body)
+ sg_root = root.getElementsByTagName(key)
+ if sg_root:
+ security_groups = sg_root[0].getElementsByTagName(
+ 'security_group')
+ for security_group in security_groups:
+ instance_sgs.append(
+ {'name': security_group.getAttribute('name')})
+ if not instance_sgs:
+ instance_sgs = [{'name': 'default'}]
+
+ if instance_sgs:
+ for server in servers:
+ server[key] = instance_sgs
def _show(self, req, resp_obj):
if not softauth(req.environ['nova.context']):
@@ -563,7 +596,7 @@ class Security_groups(extensions.ExtensionDescriptor):
return resources
-class NativeSecurityGroupAPI(compute_api.SecurityGroupAPI):
+class NativeSecurityGroupExceptions(object):
@staticmethod
def raise_invalid_property(msg):
raise exc.HTTPBadRequest(explanation=msg)
@@ -587,3 +620,13 @@ class NativeSecurityGroupAPI(compute_api.SecurityGroupAPI):
@staticmethod
def raise_not_found(msg):
raise exc.HTTPNotFound(explanation=msg)
+
+
+class NativeNovaSecurityGroupAPI(compute_api.SecurityGroupAPI,
+ NativeSecurityGroupExceptions):
+ pass
+
+
+class NativeQuantumSecurityGroupAPI(quantum_driver.SecurityGroupAPI,
+ NativeSecurityGroupExceptions):
+ pass
diff --git a/nova/api/openstack/compute/contrib/services.py b/nova/api/openstack/compute/contrib/services.py
index 2786ad814..0f91bf13f 100644
--- a/nova/api/openstack/compute/contrib/services.py
+++ b/nova/api/openstack/compute/contrib/services.py
@@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-
+from oslo.config import cfg
import webob.exc
from nova.api.openstack import extensions
@@ -24,12 +24,10 @@ from nova.api.openstack import xmlutil
from nova import availability_zones
from nova import db
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import utils
-
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'services')
CONF = cfg.CONF
diff --git a/nova/api/openstack/compute/contrib/volumes.py b/nova/api/openstack/compute/contrib/volumes.py
index 0ab93db35..760dc953a 100644
--- a/nova/api/openstack/compute/contrib/volumes.py
+++ b/nova/api/openstack/compute/contrib/volumes.py
@@ -17,7 +17,6 @@
import webob
from webob import exc
-from xml.dom import minidom
from nova.api.openstack import common
from nova.api.openstack import extensions
@@ -155,7 +154,7 @@ class CreateDeserializer(CommonDeserializer):
def default(self, string):
"""Deserialize an xml-formatted volume create request."""
- dom = minidom.parseString(string)
+ dom = utils.safe_minidom_parse_string(string)
vol = self._extract_volume(dom)
return {'body': {'volume': vol}}
diff --git a/nova/api/openstack/compute/extensions.py b/nova/api/openstack/compute/extensions.py
index 1d0738417..4e49482f5 100644
--- a/nova/api/openstack/compute/extensions.py
+++ b/nova/api/openstack/compute/extensions.py
@@ -15,8 +15,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo.config import cfg
+
from nova.api.openstack import extensions as base_extensions
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common.plugin import pluginmanager
diff --git a/nova/api/openstack/compute/images.py b/nova/api/openstack/compute/images.py
index 7dda64f87..703d2fe2d 100644
--- a/nova/api/openstack/compute/images.py
+++ b/nova/api/openstack/compute/images.py
@@ -144,6 +144,7 @@ class Controller(wsgi.Controller):
explanation = _("Image not found.")
raise webob.exc.HTTPNotFound(explanation=explanation)
+ req.cache_db_items('images', [image], 'id')
return self._view_builder.show(req, image)
def delete(self, req, id):
@@ -200,6 +201,7 @@ class Controller(wsgi.Controller):
except exception.Invalid as e:
raise webob.exc.HTTPBadRequest(explanation=str(e))
+ req.cache_db_items('images', images, 'id')
return self._view_builder.detail(req, images)
def create(self, *args, **kwargs):
diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py
index c10c6e1b3..cc33537af 100644
--- a/nova/api/openstack/compute/servers.py
+++ b/nova/api/openstack/compute/servers.py
@@ -19,9 +19,9 @@ import os
import re
import socket
+from oslo.config import cfg
import webob
from webob import exc
-from xml.dom import minidom
from nova.api.openstack import common
from nova.api.openstack.compute import ips
@@ -31,7 +31,6 @@ from nova.api.openstack import xmlutil
from nova import compute
from nova.compute import instance_types
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import common as rpc_common
@@ -312,7 +311,7 @@ class ActionDeserializer(CommonDeserializer):
"""
def default(self, string):
- dom = minidom.parseString(string)
+ dom = utils.safe_minidom_parse_string(string)
action_node = dom.childNodes[0]
action_name = action_node.tagName
@@ -419,7 +418,7 @@ class CreateDeserializer(CommonDeserializer):
def default(self, string):
"""Deserialize an xml-formatted server create request."""
- dom = minidom.parseString(string)
+ dom = utils.safe_minidom_parse_string(string)
server = self._extract_server(dom)
return {'body': {'server': server}}
@@ -629,7 +628,7 @@ class Controller(wsgi.Controller):
if port_id:
network_uuid = None
if not self._is_quantum_v2():
- # port parameter is only for qunatum v2.0
+ # port parameter is only for quantum v2.0
msg = _("Unknown argment : port")
raise exc.HTTPBadRequest(explanation=msg)
if not uuidutils.is_uuid_like(port_id):
diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py
index f68eff2a7..8c77f1c9c 100644
--- a/nova/api/openstack/wsgi.py
+++ b/nova/api/openstack/wsgi.py
@@ -27,6 +27,7 @@ import webob
from nova import exception
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
+from nova import utils
from nova import wsgi
@@ -217,7 +218,7 @@ class XMLDeserializer(TextDeserializer):
plurals = set(self.metadata.get('plurals', {}))
try:
- node = minidom.parseString(datastring).childNodes[0]
+ node = utils.safe_minidom_parse_string(datastring).childNodes[0]
return {node.nodeName: self._from_xml_node(node, plurals)}
except expat.ExpatError:
msg = _("cannot understand XML")
@@ -268,11 +269,11 @@ class XMLDeserializer(TextDeserializer):
def extract_text(self, node):
"""Get the text field contained by the given node."""
- if len(node.childNodes) == 1:
- child = node.childNodes[0]
+ ret_val = ""
+ for child in node.childNodes:
if child.nodeType == child.TEXT_NODE:
- return child.nodeValue
- return ""
+ ret_val += child.nodeValue
+ return ret_val
def extract_elements(self, node):
"""Get only Element type childs from node."""
@@ -633,7 +634,7 @@ def action_peek_json(body):
def action_peek_xml(body):
"""Determine action to invoke."""
- dom = minidom.parseString(body)
+ dom = utils.safe_minidom_parse_string(body)
action_node = dom.childNodes[0]
return action_node.tagName
diff --git a/nova/api/sizelimit.py b/nova/api/sizelimit.py
index 77ab4415c..67d459583 100644
--- a/nova/api/sizelimit.py
+++ b/nova/api/sizelimit.py
@@ -18,10 +18,10 @@ Request Body limiting middleware.
"""
+from oslo.config import cfg
import webob.dec
import webob.exc
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import wsgi
diff --git a/nova/api/validator.py b/nova/api/validator.py
index ddcc3ed2a..9304387fd 100644
--- a/nova/api/validator.py
+++ b/nova/api/validator.py
@@ -17,10 +17,12 @@
# under the License.
import base64
-import logging
import re
import socket
+from nova.openstack.common import log as logging
+
+
LOG = logging.getLogger(__name__)
diff --git a/nova/availability_zones.py b/nova/availability_zones.py
index 8c9d4acf7..61486517d 100644
--- a/nova/availability_zones.py
+++ b/nova/availability_zones.py
@@ -15,8 +15,9 @@
"""Availability zone helper functions."""
+from oslo.config import cfg
+
from nova import db
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
availability_zone_opts = [
diff --git a/nova/cells/manager.py b/nova/cells/manager.py
index c07a23ebb..55b3dadf7 100644
--- a/nova/cells/manager.py
+++ b/nova/cells/manager.py
@@ -19,13 +19,14 @@ Cells Service Manager
import datetime
import time
+from oslo.config import cfg
+
from nova.cells import messaging
from nova.cells import state as cells_state
from nova.cells import utils as cells_utils
from nova import context
from nova import exception
from nova import manager
-from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
diff --git a/nova/cells/messaging.py b/nova/cells/messaging.py
index 5c7247085..50a673464 100644
--- a/nova/cells/messaging.py
+++ b/nova/cells/messaging.py
@@ -25,6 +25,7 @@ The interface into this module is the MessageRunner class.
import sys
from eventlet import queue
+from oslo.config import cfg
from nova.cells import state as cells_state
from nova.cells import utils as cells_utils
@@ -32,7 +33,6 @@ from nova import compute
from nova import context
from nova.db import base
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
diff --git a/nova/cells/opts.py b/nova/cells/opts.py
index 45b453ebc..b66dd6b42 100644
--- a/nova/cells/opts.py
+++ b/nova/cells/opts.py
@@ -18,7 +18,7 @@
Global cells config options
"""
-from nova.openstack.common import cfg
+from oslo.config import cfg
cells_opts = [
cfg.BoolOpt('enable',
diff --git a/nova/cells/rpc_driver.py b/nova/cells/rpc_driver.py
index 5e420aa8e..0dcf1184c 100644
--- a/nova/cells/rpc_driver.py
+++ b/nova/cells/rpc_driver.py
@@ -16,8 +16,9 @@
"""
Cells RPC Communication Driver
"""
+from oslo.config import cfg
+
from nova.cells import driver
-from nova.openstack.common import cfg
from nova.openstack.common import rpc
from nova.openstack.common.rpc import dispatcher as rpc_dispatcher
from nova.openstack.common.rpc import proxy as rpc_proxy
diff --git a/nova/cells/rpcapi.py b/nova/cells/rpcapi.py
index e7db2582f..910c4ab9d 100644
--- a/nova/cells/rpcapi.py
+++ b/nova/cells/rpcapi.py
@@ -22,7 +22,8 @@ services. That communication is handled by the cells driver via the
messging module.
"""
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import proxy as rpc_proxy
diff --git a/nova/cells/scheduler.py b/nova/cells/scheduler.py
index 0b730290a..3b69b2eac 100644
--- a/nova/cells/scheduler.py
+++ b/nova/cells/scheduler.py
@@ -19,11 +19,12 @@ Cells Scheduler
import random
import time
+from oslo.config import cfg
+
from nova import compute
from nova.compute import vm_states
from nova.db import base
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.scheduler import rpcapi as scheduler_rpcapi
@@ -55,7 +56,8 @@ class CellsScheduler(base.Base):
def _create_instances_here(self, ctxt, request_spec):
instance_values = request_spec['instance_properties']
- for instance_uuid in request_spec['instance_uuids']:
+ num_instances = len(request_spec['instance_uuids'])
+ for i, instance_uuid in enumerate(request_spec['instance_uuids']):
instance_values['uuid'] = instance_uuid
instance = self.compute_api.create_db_entry_for_new_instance(
ctxt,
@@ -63,7 +65,9 @@ class CellsScheduler(base.Base):
request_spec['image'],
instance_values,
request_spec['security_group'],
- request_spec['block_device_mapping'])
+ request_spec['block_device_mapping'],
+ num_instances, i)
+
self.msg_runner.instance_update_at_top(ctxt, instance)
def _get_possible_cells(self):
diff --git a/nova/cells/state.py b/nova/cells/state.py
index e3886bedb..53c536888 100644
--- a/nova/cells/state.py
+++ b/nova/cells/state.py
@@ -20,10 +20,11 @@ import copy
import datetime
import functools
+from oslo.config import cfg
+
from nova.cells import rpc_driver
from nova import context
from nova.db import base
-from nova.openstack.common import cfg
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
diff --git a/nova/cert/rpcapi.py b/nova/cert/rpcapi.py
index fdaa327cb..7dd843813 100644
--- a/nova/cert/rpcapi.py
+++ b/nova/cert/rpcapi.py
@@ -18,7 +18,8 @@
Client side of the cert manager RPC API.
"""
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
import nova.openstack.common.rpc.proxy
rpcapi_opts = [
diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py
index cb7fb8bac..c395a754c 100644
--- a/nova/cloudpipe/pipelib.py
+++ b/nova/cloudpipe/pipelib.py
@@ -26,12 +26,13 @@ import os
import string
import zipfile
+from oslo.config import cfg
+
from nova import compute
from nova.compute import instance_types
from nova import crypto
from nova import db
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
from nova import paths
diff --git a/nova/common/memorycache.py b/nova/common/memorycache.py
index 86057b6ae..f89e4b265 100644
--- a/nova/common/memorycache.py
+++ b/nova/common/memorycache.py
@@ -18,7 +18,8 @@
"""Super simple fake memcache client."""
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common import timeutils
memcache_opts = [
diff --git a/nova/compute/__init__.py b/nova/compute/__init__.py
index d1f8cc16c..f1451aab3 100644
--- a/nova/compute/__init__.py
+++ b/nova/compute/__init__.py
@@ -16,24 +16,25 @@
# License for the specific language governing permissions and limitations
# under the License.
+import oslo.config.cfg
+
# Importing full names to not pollute the namespace and cause possible
# collisions with use of 'from nova.compute import <foo>' elsewhere.
-import nova.openstack.common.cfg
import nova.openstack.common.importutils
_compute_opts = [
- nova.openstack.common.cfg.StrOpt('compute_api_class',
- default='nova.compute.api.API',
- help='The full class name of the '
- 'compute API class to use'),
+ oslo.config.cfg.StrOpt('compute_api_class',
+ default='nova.compute.api.API',
+ help='The full class name of the '
+ 'compute API class to use'),
]
-nova.openstack.common.cfg.CONF.register_opts(_compute_opts)
+oslo.config.cfg.CONF.register_opts(_compute_opts)
def API(*args, **kwargs):
importutils = nova.openstack.common.importutils
- class_name = nova.openstack.common.cfg.CONF.compute_api_class
+ class_name = oslo.config.cfg.CONF.compute_api_class
return importutils.import_object(class_name, *args, **kwargs)
@@ -43,7 +44,7 @@ def HostAPI(*args, **kwargs):
api
"""
importutils = nova.openstack.common.importutils
- compute_api_class_name = nova.openstack.common.cfg.CONF.compute_api_class
+ compute_api_class_name = oslo.config.cfg.CONF.compute_api_class
compute_api_class = importutils.import_class(compute_api_class_name)
class_name = compute_api_class.__module__ + ".HostAPI"
return importutils.import_object(class_name, *args, **kwargs)
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 5e160d2ef..cc07a998a 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -3,7 +3,7 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
-# Copyright 2012 Red Hat, Inc.
+# Copyright 2012-2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -26,9 +26,10 @@ import functools
import re
import string
import time
-import urllib
import uuid
+from oslo.config import cfg
+
from nova import availability_zones
from nova import block_device
from nova.compute import instance_actions
@@ -45,10 +46,10 @@ from nova import exception
from nova import hooks
from nova.image import glance
from nova import network
+from nova.network.security_group import openstack_driver
+from nova.network.security_group import security_group_base
from nova import notifications
-from nova.openstack.common import cfg
from nova.openstack.common import excutils
-from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
@@ -60,7 +61,6 @@ from nova import servicegroup
from nova import utils
from nova import volume
-
LOG = logging.getLogger(__name__)
compute_opts = [
@@ -80,12 +80,16 @@ compute_opts = [
default='nokernel',
help='kernel image that indicates not to use a kernel, but to '
'use a raw disk image instead'),
- cfg.StrOpt('security_group_handler',
- default='nova.network.sg.NullSecurityGroupHandler',
- help='The full class name of the security group handler class'),
- cfg.StrOpt('security_group_api',
- default='nova.compute.api.SecurityGroupAPI',
- help='The full class name of the security API class'),
+ cfg.StrOpt('multi_instance_display_name_template',
+ default='%(name)s-%(uuid)s',
+ help='When creating multiple instances with a single request '
+ 'using the os-multiple-create API extension, this '
+ 'template will be used to build the display name for '
+ 'each instance. The benefit is that the instances '
+ 'end up with different hostnames. To restore legacy '
+ 'behavior of every instance having the same name, set '
+ 'this option to "%(name)s". Valid keys for the '
+ 'template are: name, uuid, count.'),
]
@@ -180,9 +184,8 @@ class API(base.Base):
self.network_api = network_api or network.API()
self.volume_api = volume_api or volume.API()
self.security_group_api = (security_group_api or
- importutils.import_object(
- CONF.security_group_api))
- self.sgh = importutils.import_object(CONF.security_group_handler)
+ openstack_driver.get_openstack_security_group_driver())
+ self.sgh = openstack_driver.get_security_group_handler()
self.consoleauth_rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
@@ -419,6 +422,26 @@ class API(base.Base):
options_from_image['auto_disk_config'] = auto_disk_config
return options_from_image
+ def _apply_instance_name_template(self, context, instance, index):
+ params = {
+ 'uuid': instance['uuid'],
+ 'name': instance['display_name'],
+ 'count': index + 1,
+ }
+ try:
+ new_name = (CONF.multi_instance_display_name_template %
+ params)
+ except KeyError, TypeError:
+ LOG.exception(_('Failed to set instance name using '
+ 'multi_instance_display_name_template.'))
+ new_name = instance['display_name']
+ updates = {'display_name': new_name}
+ if not instance.get('hostname'):
+ updates['hostname'] = utils.sanitize_hostname(new_name)
+ instance = self.db.instance_update(context,
+ instance['uuid'], updates)
+ return instance
+
def _validate_and_provision_instance(self, context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
@@ -573,7 +596,8 @@ class API(base.Base):
options = base_options.copy()
instance = self.create_db_entry_for_new_instance(
context, instance_type, image, options,
- security_group, block_device_mapping)
+ security_group, block_device_mapping, num_instances, i)
+
instances.append(instance)
instance_uuids.append(instance['uuid'])
self._validate_bdm(context, instance)
@@ -777,7 +801,7 @@ class API(base.Base):
image_properties.get('block_device_mapping')):
instance['shutdown_terminate'] = False
- def _populate_instance_names(self, instance):
+ def _populate_instance_names(self, instance, num_instances):
"""Populate instance display_name and hostname."""
display_name = instance.get('display_name')
hostname = instance.get('hostname')
@@ -785,9 +809,17 @@ class API(base.Base):
if display_name is None:
display_name = self._default_display_name(instance['uuid'])
instance['display_name'] = display_name
- if hostname is None:
+
+ if hostname is None and num_instances == 1:
+ # NOTE(russellb) In the multi-instance case, we're going to
+ # overwrite the display_name using the
+ # multi_instance_display_name_template. We need the default
+ # display_name set so that it can be used in the template, though.
+ # Only set the hostname here if we're only creating one instance.
+ # Otherwise, it will be built after the template based
+ # display_name.
hostname = display_name
- instance['hostname'] = utils.sanitize_hostname(hostname)
+ instance['hostname'] = utils.sanitize_hostname(hostname)
def _default_display_name(self, instance_uuid):
return "Server %s" % instance_uuid
@@ -824,20 +856,16 @@ class API(base.Base):
base_image_ref = base_options['image_ref']
instance['system_metadata']['image_base_image_ref'] = base_image_ref
-
- # Use 'default' security_group if none specified.
- if security_groups is None:
- security_groups = ['default']
- elif not isinstance(security_groups, list):
- security_groups = [security_groups]
- instance['security_groups'] = security_groups
+ self.security_group_api.populate_security_groups(instance,
+ security_groups)
return instance
#NOTE(bcwaldon): No policy check since this is only used by scheduler and
# the compute api. That should probably be cleaned up, though.
def create_db_entry_for_new_instance(self, context, instance_type, image,
- base_options, security_group, block_device_mapping):
+ base_options, security_group, block_device_mapping, num_instances,
+ index):
"""Create an entry in the DB for this new instance,
including any related table updates (such as security group,
etc).
@@ -848,7 +876,7 @@ class API(base.Base):
instance = self._populate_instance_for_create(base_options,
image, security_group)
- self._populate_instance_names(instance)
+ self._populate_instance_names(instance, num_instances)
self._populate_instance_shutdown_terminate(instance, image,
block_device_mapping)
@@ -859,6 +887,13 @@ class API(base.Base):
self.security_group_api.ensure_default(context)
instance = self.db.instance_create(context, instance)
+ if num_instances > 1:
+ # NOTE(russellb) We wait until this spot to handle
+ # multi_instance_display_name_template, because we need
+ # the UUID from the instance.
+ instance = self._apply_instance_name_template(context, instance,
+ index)
+
self._populate_instance_for_bdm(context, instance,
instance_type, image, block_device_mapping)
@@ -1981,7 +2016,8 @@ class API(base.Base):
request_spec = {
'instance_type': new_instance_type,
'instance_uuids': [instance['uuid']],
- 'instance_properties': instance}
+ 'instance_properties': instance,
+ 'image': image}
filter_properties = {'ignore_hosts': []}
@@ -2284,6 +2320,20 @@ class API(base.Base):
self._detach_volume(context, instance, volume_id)
@wrap_check_policy
+ def attach_interface(self, context, instance, network_id, port_id,
+ requested_ip):
+ """Use hotplug to add an network adapter to an instance."""
+ return self.compute_rpcapi.attach_interface(context,
+ instance=instance, network_id=network_id, port_id=port_id,
+ requested_ip=requested_ip)
+
+ @wrap_check_policy
+ def detach_interface(self, context, instance, port_id):
+ """Detach an network adapter from an instance."""
+ self.compute_rpcapi.detach_interface(context, instance=instance,
+ port_id=port_id)
+
+ @wrap_check_policy
def get_instance_metadata(self, context, instance):
"""Get all metadata associated with an instance."""
rv = self.db.instance_metadata_get(context, instance['uuid'])
@@ -2367,7 +2417,7 @@ class API(base.Base):
disk_over_commit, host_name):
"""Migrate a server lively to a new host."""
LOG.debug(_("Going to try to live migrate instance to %s"),
- host_name, instance=instance)
+ host_name or "another host", instance=instance)
instance = self.update(context, instance,
task_state=task_states.MIGRATING,
@@ -2692,15 +2742,19 @@ class KeypairAPI(base.Base):
'fingerprint': key_pair['fingerprint']}
-class SecurityGroupAPI(base.Base):
+class SecurityGroupAPI(base.Base, security_group_base.SecurityGroupBase):
"""
Sub-set of the Compute API related to managing security groups
and security group rules
"""
+
+ # The nova seurity group api does not use a uuid for the id.
+ id_is_uuid = False
+
def __init__(self, **kwargs):
super(SecurityGroupAPI, self).__init__(**kwargs)
self.security_group_rpcapi = compute_rpcapi.SecurityGroupAPI()
- self.sgh = importutils.import_object(CONF.security_group_handler)
+ self.sgh = openstack_driver.get_security_group_handler()
def validate_property(self, value, property, allowed):
"""
@@ -2747,7 +2801,7 @@ class SecurityGroupAPI(base.Base):
if not existed:
self.sgh.trigger_security_group_create_refresh(context, group)
- def create(self, context, name, description):
+ def create_security_group(self, context, name, description):
try:
reservations = QUOTAS.reserve(context, security_groups=1)
except exception.OverQuota:
@@ -2926,164 +2980,15 @@ class SecurityGroupAPI(base.Base):
self.trigger_handler('instance_remove_security_group',
context, instance, security_group_name)
- def trigger_handler(self, event, *args):
- handle = getattr(self.sgh, 'trigger_%s_refresh' % event)
- handle(*args)
-
- def trigger_rules_refresh(self, context, id):
- """Called when a rule is added to or removed from a security_group."""
-
- security_group = self.db.security_group_get(context, id)
-
- for instance in security_group['instances']:
- if instance['host'] is not None:
- self.security_group_rpcapi.refresh_instance_security_rules(
- context, instance['host'], instance)
-
- def trigger_members_refresh(self, context, group_ids):
- """Called when a security group gains a new or loses a member.
-
- Sends an update request to each compute node for each instance for
- which this is relevant.
- """
- # First, we get the security group rules that reference these groups as
- # the grantee..
- security_group_rules = set()
- for group_id in group_ids:
- security_group_rules.update(
- self.db.security_group_rule_get_by_security_group_grantee(
- context,
- group_id))
-
- # ..then we distill the rules into the groups to which they belong..
- security_groups = set()
- for rule in security_group_rules:
- security_group = self.db.security_group_get(
- context,
- rule['parent_group_id'])
- security_groups.add(security_group)
-
- # ..then we find the instances that are members of these groups..
- instances = {}
- for security_group in security_groups:
- for instance in security_group['instances']:
- if instance['uuid'] not in instances:
- instances[instance['uuid']] = instance
-
- # ..then we send a request to refresh the rules for each instance.
- for instance in instances.values():
- if instance['host']:
- self.security_group_rpcapi.refresh_instance_security_rules(
- context, instance['host'], instance)
-
- def parse_cidr(self, cidr):
- if cidr:
- try:
- cidr = urllib.unquote(cidr).decode()
- except Exception as e:
- self.raise_invalid_cidr(cidr, e)
-
- if not utils.is_valid_cidr(cidr):
- self.raise_invalid_cidr(cidr)
-
- return cidr
- else:
- return '0.0.0.0/0'
-
- @staticmethod
- def new_group_ingress_rule(grantee_group_id, protocol, from_port,
- to_port):
- return SecurityGroupAPI._new_ingress_rule(protocol, from_port,
- to_port, group_id=grantee_group_id)
-
- @staticmethod
- def new_cidr_ingress_rule(grantee_cidr, protocol, from_port, to_port):
- return SecurityGroupAPI._new_ingress_rule(protocol, from_port,
- to_port, cidr=grantee_cidr)
-
- @staticmethod
- def _new_ingress_rule(ip_protocol, from_port, to_port,
- group_id=None, cidr=None):
- values = {}
-
- if group_id:
- values['group_id'] = group_id
- # Open everything if an explicit port range or type/code are not
- # specified, but only if a source group was specified.
- ip_proto_upper = ip_protocol.upper() if ip_protocol else ''
- if (ip_proto_upper == 'ICMP' and
- from_port is None and to_port is None):
- from_port = -1
- to_port = -1
- elif (ip_proto_upper in ['TCP', 'UDP'] and from_port is None
- and to_port is None):
- from_port = 1
- to_port = 65535
-
- elif cidr:
- values['cidr'] = cidr
-
- if ip_protocol and from_port is not None and to_port is not None:
-
- ip_protocol = str(ip_protocol)
- try:
- # Verify integer conversions
- from_port = int(from_port)
- to_port = int(to_port)
- except ValueError:
- if ip_protocol.upper() == 'ICMP':
- raise exception.InvalidInput(reason="Type and"
- " Code must be integers for ICMP protocol type")
- else:
- raise exception.InvalidInput(reason="To and From ports "
- "must be integers")
-
- if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']:
- raise exception.InvalidIpProtocol(protocol=ip_protocol)
-
- # Verify that from_port must always be less than
- # or equal to to_port
- if (ip_protocol.upper() in ['TCP', 'UDP'] and
- (from_port > to_port)):
- raise exception.InvalidPortRange(from_port=from_port,
- to_port=to_port, msg="Former value cannot"
- " be greater than the later")
-
- # Verify valid TCP, UDP port ranges
- if (ip_protocol.upper() in ['TCP', 'UDP'] and
- (from_port < 1 or to_port > 65535)):
- raise exception.InvalidPortRange(from_port=from_port,
- to_port=to_port, msg="Valid TCP ports should"
- " be between 1-65535")
-
- # Verify ICMP type and code
- if (ip_protocol.upper() == "ICMP" and
- (from_port < -1 or from_port > 255 or
- to_port < -1 or to_port > 255)):
- raise exception.InvalidPortRange(from_port=from_port,
- to_port=to_port, msg="For ICMP, the"
- " type:code must be valid")
-
- values['protocol'] = ip_protocol
- values['from_port'] = from_port
- values['to_port'] = to_port
-
- else:
- # If cidr based filtering, protocol and ports are mandatory
- if cidr:
- return None
-
- return values
-
- def rule_exists(self, security_group, values):
- """Indicates whether the specified rule values are already
+ def rule_exists(self, security_group, new_rule):
+ """Indicates whether the specified rule is already
defined in the given security group.
"""
for rule in security_group['rules']:
is_duplicate = True
keys = ('group_id', 'cidr', 'from_port', 'to_port', 'protocol')
for key in keys:
- if rule.get(key) != values.get(key):
+ if rule.get(key) != new_rule.get(key):
is_duplicate = False
break
if is_duplicate:
@@ -3099,6 +3004,13 @@ class SecurityGroupAPI(base.Base):
self.raise_not_found(msg)
def add_rules(self, context, id, name, vals):
+ """Add security group rule(s) to security group.
+
+ Note: the Nova security group API doesn't support adding muliple
+ security group rules at once but the EC2 one does. Therefore,
+ this function is writen to support both.
+ """
+
count = QUOTAS.count(context, 'security_group_rules', id)
try:
projected = count + len(vals)
@@ -3128,26 +3040,122 @@ class SecurityGroupAPI(base.Base):
self.trigger_rules_refresh(context, id=security_group['id'])
self.trigger_handler('security_group_rule_destroy', context, rule_ids)
- @staticmethod
- def raise_invalid_property(msg):
- raise NotImplementedError()
+ def remove_default_rules(self, context, rule_ids):
+ for rule_id in rule_ids:
+ self.db.security_group_default_rule_destroy(context, rule_id)
- @staticmethod
- def raise_group_already_exists(msg):
- raise NotImplementedError()
+ def add_default_rules(self, context, vals):
+ rules = [self.db.security_group_default_rule_create(context, v)
+ for v in vals]
+ return rules
- @staticmethod
- def raise_invalid_group(msg):
- raise NotImplementedError()
+ def default_rule_exists(self, context, values):
+ """Indicates whether the specified rule values are already
+ defined in the default security group rules.
+ """
+ for rule in self.db.security_group_default_rule_list(context):
+ is_duplicate = True
+ keys = ('cidr', 'from_port', 'to_port', 'protocol')
+ for key in keys:
+ if rule.get(key) != values.get(key):
+ is_duplicate = False
+ break
+ if is_duplicate:
+ return rule.get('id') or True
+ return False
- @staticmethod
- def raise_invalid_cidr(cidr, decoding_exception=None):
- raise NotImplementedError()
+ def get_all_default_rules(self, context):
+ try:
+ rules = self.db.security_group_default_rule_list(context)
+ except Exception:
+ msg = 'cannot get default security group rules'
+ raise exception.SecurityGroupDefaultRuleNotFound(msg)
- @staticmethod
- def raise_over_quota(msg):
- raise NotImplementedError()
+ return rules
- @staticmethod
- def raise_not_found(msg):
- raise NotImplementedError()
+ def get_default_rule(self, context, id):
+ try:
+ return self.db.security_group_default_rule_get(context, id)
+ except exception.NotFound:
+ msg = _("Rule (%s) not found") % id
+ self.raise_not_found(msg)
+
+ def validate_id(self, id):
+ try:
+ return int(id)
+ except ValueError:
+ msg = _("Security group id should be integer")
+ self.raise_invalid_property(msg)
+
+ def create_security_group_rule(self, context, security_group, new_rule):
+ if self.rule_exists(security_group, new_rule):
+ msg = (_('This rule already exists in group %s') %
+ new_rule['parent_group_id'])
+ self.raise_group_already_exists(msg)
+ return self.add_rules(context, new_rule['parent_group_id'],
+ security_group['name'],
+ [new_rule])[0]
+
+ def trigger_handler(self, event, *args):
+ handle = getattr(self.sgh, 'trigger_%s_refresh' % event)
+ handle(*args)
+
+ def trigger_rules_refresh(self, context, id):
+ """Called when a rule is added to or removed from a security_group."""
+
+ security_group = self.db.security_group_get(context, id)
+
+ for instance in security_group['instances']:
+ if instance['host'] is not None:
+ self.security_group_rpcapi.refresh_instance_security_rules(
+ context, instance['host'], instance)
+
+ def trigger_members_refresh(self, context, group_ids):
+ """Called when a security group gains a new or loses a member.
+
+ Sends an update request to each compute node for each instance for
+ which this is relevant.
+ """
+ # First, we get the security group rules that reference these groups as
+ # the grantee..
+ security_group_rules = set()
+ for group_id in group_ids:
+ security_group_rules.update(
+ self.db.security_group_rule_get_by_security_group_grantee(
+ context,
+ group_id))
+
+ # ..then we distill the rules into the groups to which they belong..
+ security_groups = set()
+ for rule in security_group_rules:
+ security_group = self.db.security_group_get(
+ context,
+ rule['parent_group_id'])
+ security_groups.add(security_group)
+
+ # ..then we find the instances that are members of these groups..
+ instances = {}
+ for security_group in security_groups:
+ for instance in security_group['instances']:
+ if instance['uuid'] not in instances:
+ instances[instance['uuid']] = instance
+
+ # ..then we send a request to refresh the rules for each instance.
+ for instance in instances.values():
+ if instance['host']:
+ self.security_group_rpcapi.refresh_instance_security_rules(
+ context, instance['host'], instance)
+
+ def get_instance_security_groups(self, req, instance_id):
+ instance = req.get_db_instance(instance_id)
+ groups = instance.get('security_groups')
+ if groups:
+ return [{'name': group['name']} for group in groups]
+
+ def populate_security_groups(self, instance, security_groups):
+ # Use 'default' security_group if none specified.
+ if security_groups is None:
+ security_groups = ['default']
+ elif not isinstance(security_groups, list):
+ security_groups = [security_groups]
+ instance['security_groups'] = security_groups
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py
index 4cc5d5d4e..73105b33f 100644
--- a/nova/compute/instance_types.py
+++ b/nova/compute/instance_types.py
@@ -23,10 +23,11 @@
import re
import uuid
+from oslo.config import cfg
+
from nova import context
from nova import db
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.openstack.common import log as logging
from nova import utils
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 1b545495c..afeb9f02e 100755
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -37,6 +37,7 @@ import traceback
import uuid
from eventlet import greenthread
+from oslo.config import cfg
from nova import block_device
from nova.cloudpipe import pipelib
@@ -56,7 +57,7 @@ from nova.image import glance
from nova import manager
from nova import network
from nova.network import model as network_model
-from nova.openstack.common import cfg
+from nova.network.security_group import openstack_driver
from nova.openstack.common import excutils
from nova.openstack.common import jsonutils
from nova.openstack.common import lockutils
@@ -69,6 +70,7 @@ from nova import quota
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova import utils
from nova.virt import driver
+from nova.virt import event as virtevent
from nova.virt import storage_users
from nova.virt import virtapi
from nova import volume
@@ -315,7 +317,7 @@ class ComputeVirtAPI(virtapi.VirtAPI):
class ComputeManager(manager.SchedulerDependentManager):
"""Manages the running instances from creation to destruction."""
- RPC_API_VERSION = '2.24'
+ RPC_API_VERSION = '2.25'
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
@@ -331,7 +333,8 @@ class ComputeManager(manager.SchedulerDependentManager):
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.conductor_api = conductor.API()
-
+ self.is_quantum_security_groups = (
+ openstack_driver.is_quantum_security_groups())
super(ComputeManager, self).__init__(service_name="compute",
*args, **kwargs)
@@ -472,7 +475,7 @@ class ComputeManager(manager.SchedulerDependentManager):
block_device_info = \
self._get_instance_volume_block_device_info(
- context, instance['uuid'])
+ context, instance)
try:
self.driver.resume_state_on_host_boot(
@@ -499,6 +502,40 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.warning(_('Hypervisor driver does not support '
'firewall rules'), instance=instance)
+ def handle_lifecycle_event(self, event):
+ LOG.info(_("Lifecycle event %(state)d on VM %(uuid)s") %
+ {'state': event.get_transition(),
+ 'uuid': event.get_instance_uuid()})
+ context = nova.context.get_admin_context()
+ instance = self.conductor_api.instance_get_by_uuid(
+ context, event.get_instance_uuid())
+ vm_power_state = None
+ if event.get_transition() == virtevent.EVENT_LIFECYCLE_STOPPED:
+ vm_power_state = power_state.SHUTDOWN
+ elif event.get_transition() == virtevent.EVENT_LIFECYCLE_STARTED:
+ vm_power_state = power_state.RUNNING
+ elif event.get_transition() == virtevent.EVENT_LIFECYCLE_PAUSED:
+ vm_power_state = power_state.PAUSED
+ elif event.get_transition() == virtevent.EVENT_LIFECYCLE_RESUMED:
+ vm_power_state = power_state.RUNNING
+ else:
+ LOG.warning(_("Unexpected power state %d") %
+ event.get_transition())
+
+ if vm_power_state is not None:
+ self._sync_instance_power_state(context,
+ instance,
+ vm_power_state)
+
+ def handle_events(self, event):
+ if isinstance(event, virtevent.LifecycleEvent):
+ self.handle_lifecycle_event(event)
+ else:
+ LOG.debug(_("Ignoring event %s") % event)
+
+ def init_virt_events(self):
+ self.driver.register_event_listener(self.handle_events)
+
def init_host(self):
"""Initialization for a standalone compute service."""
self.driver.init_host(host=self.host)
@@ -509,6 +546,8 @@ class ComputeManager(manager.SchedulerDependentManager):
if CONF.defer_iptables_apply:
self.driver.filter_defer_apply_on()
+ self.init_virt_events()
+
try:
# checking that instance was not already evacuated to other host
self._destroy_evacuated_instances(context)
@@ -579,8 +618,13 @@ class ComputeManager(manager.SchedulerDependentManager):
Passes straight through to the virtualization driver.
+ Synchronise the call beacuse we may still be in the middle of
+ creating the instance.
"""
- return self.driver.refresh_instance_security_rules(instance)
+ @lockutils.synchronized(instance['uuid'], 'nova-')
+ def _sync_refresh():
+ return self.driver.refresh_instance_security_rules(instance)
+ return _sync_refresh()
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def refresh_provider_fw_rules(self, context):
@@ -674,6 +718,13 @@ class ComputeManager(manager.SchedulerDependentManager):
"""Launch a new instance with specified options."""
context = context.elevated()
+ # If quantum security groups pass requested security
+ # groups to allocate_for_instance()
+ if request_spec and self.is_quantum_security_groups:
+ security_groups = request_spec.get('security_group')
+ else:
+ security_groups = []
+
try:
self._check_instance_exists(context, instance)
image_meta = self._check_image_size(context, instance)
@@ -705,7 +756,7 @@ class ComputeManager(manager.SchedulerDependentManager):
macs = self.driver.macs_for_instance(instance)
network_info = self._allocate_network(context, instance,
- requested_networks, macs)
+ requested_networks, macs, security_groups)
self._instance_update(
context, instance['uuid'],
@@ -940,7 +991,8 @@ class ComputeManager(manager.SchedulerDependentManager):
expected_task_state=(task_states.SCHEDULING,
None))
- def _allocate_network(self, context, instance, requested_networks, macs):
+ def _allocate_network(self, context, instance, requested_networks, macs,
+ security_groups):
"""Allocate networks for an instance and return the network info."""
instance = self._instance_update(context, instance['uuid'],
vm_state=vm_states.BUILDING,
@@ -952,7 +1004,9 @@ class ComputeManager(manager.SchedulerDependentManager):
network_info = self.network_api.allocate_for_instance(
context, instance, vpn=is_vpn,
requested_networks=requested_networks,
- macs=macs, conductor_api=self.conductor_api)
+ macs=macs,
+ conductor_api=self.conductor_api,
+ security_groups=security_groups)
except Exception:
LOG.exception(_('Instance failed network setup'),
instance=instance)
@@ -2686,6 +2740,39 @@ class ComputeManager(manager.SchedulerDependentManager):
except exception.NotFound:
pass
+ def attach_interface(self, context, instance, network_id, port_id,
+ requested_ip=None):
+ """Use hotplug to add an network adapter to an instance."""
+ network_info = self.network_api.allocate_port_for_instance(
+ context, instance, port_id, network_id, requested_ip,
+ self.conductor_api)
+ image_meta = _get_image_meta(context, instance['image_ref'])
+ legacy_net_info = self._legacy_nw_info(network_info)
+ for (network, mapping) in legacy_net_info:
+ if mapping['vif_uuid'] == port_id:
+ self.driver.attach_interface(instance, image_meta,
+ [(network, mapping)])
+ return (network, mapping)
+
+ def detach_interface(self, context, instance, port_id):
+ """Detach an network adapter from an instance."""
+ network_info = self.network_api.get_instance_nw_info(
+ context.elevated(), instance, conductor_api=self.conductor_api)
+ legacy_nwinfo = self._legacy_nw_info(network_info)
+ condemned = None
+ for (network, mapping) in legacy_nwinfo:
+ if mapping['vif_uuid'] == port_id:
+ condemned = (network, mapping)
+ break
+ if condemned is None:
+ raise exception.PortNotFound(_("Port %(port_id)s is not "
+ "attached") % locals())
+
+ self.network_api.deallocate_port_for_instance(context, instance,
+ port_id,
+ self.conductor_api)
+ self.driver.detach_interface(instance, [condemned])
+
def _get_compute_info(self, context, host):
compute_node_ref = self.conductor_api.service_get_by_compute_host(
context, host)
@@ -3351,7 +3438,7 @@ class ComputeManager(manager.SchedulerDependentManager):
capability['host_ip'] = CONF.my_ip
self.update_service_capabilities(capabilities)
- @manager.periodic_task(spacing=600.0)
+ @manager.periodic_task(spacing=600.0, run_immediately=True)
def _sync_power_states(self, context):
"""Align power states between the database and the hypervisor.
diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py
index 825422e86..efb95dd7b 100644
--- a/nova/compute/resource_tracker.py
+++ b/nova/compute/resource_tracker.py
@@ -19,6 +19,8 @@ scheduler with useful information about availability through the ComputeNode
model.
"""
+from oslo.config import cfg
+
from nova.compute import claims
from nova.compute import instance_types
from nova.compute import task_states
@@ -26,7 +28,6 @@ from nova.compute import vm_states
from nova import conductor
from nova import context
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import lockutils
diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py
index 525d1adc7..0be9972da 100644
--- a/nova/compute/rpcapi.py
+++ b/nova/compute/rpcapi.py
@@ -18,8 +18,9 @@
Client side of the compute RPC API.
"""
+from oslo.config import cfg
+
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import rpc
import nova.openstack.common.rpc.proxy
@@ -159,6 +160,7 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
rebuild_instance()
2.23 - Remove network_info from reboot_instance
2.24 - Added get_spice_console method
+ 2.25 - Add attach_interface() and detach_interface()
'''
#
@@ -200,6 +202,15 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
instance=instance_p, network_id=network_id),
topic=_compute_topic(self.topic, ctxt, None, instance))
+ def attach_interface(self, ctxt, instance, network_id, port_id,
+ requested_ip):
+ instance_p = jsonutils.to_primitive(instance)
+ return self.call(ctxt, self.make_msg('attach_interface',
+ instance=instance_p, network_id=network_id,
+ port_id=port_id, requested_ip=requested_ip),
+ topic=_compute_topic(self.topic, ctxt, None, instance),
+ version='2.25')
+
def attach_volume(self, ctxt, instance, volume_id, mountpoint):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('attach_volume',
@@ -243,6 +254,13 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
topic=_compute_topic(self.topic, ctxt, host, instance),
version='2.7')
+ def detach_interface(self, ctxt, instance, port_id):
+ instance_p = jsonutils.to_primitive(instance)
+ self.cast(ctxt, self.make_msg('detach_interface',
+ instance=instance_p, port_id=port_id),
+ topic=_compute_topic(self.topic, ctxt, None, instance),
+ version='2.25')
+
def detach_volume(self, ctxt, instance, volume_id):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('detach_volume',
diff --git a/nova/compute/utils.py b/nova/compute/utils.py
index 35139838d..8c1f1d577 100644
--- a/nova/compute/utils.py
+++ b/nova/compute/utils.py
@@ -20,12 +20,13 @@ import re
import string
import traceback
+from oslo.config import cfg
+
from nova import block_device
from nova.compute import instance_types
from nova import exception
from nova.network import model as network_model
from nova import notifications
-from nova.openstack.common import cfg
from nova.openstack.common import log
from nova.openstack.common.notifier import api as notifier_api
from nova.openstack.common import timeutils
diff --git a/nova/conductor/__init__.py b/nova/conductor/__init__.py
index 4c702d037..b192ca7a7 100644
--- a/nova/conductor/__init__.py
+++ b/nova/conductor/__init__.py
@@ -12,14 +12,14 @@
# License for the specific language governing permissions and limitations
# under the License.
+import oslo.config.cfg
+
from nova.conductor import api as conductor_api
-import nova.openstack.common.cfg
-import nova.openstack.common.importutils
def API(*args, **kwargs):
use_local = kwargs.pop('use_local', False)
- if nova.openstack.common.cfg.CONF.conductor.use_local or use_local:
+ if oslo.config.cfg.CONF.conductor.use_local or use_local:
api = conductor_api.LocalAPI
else:
api = conductor_api.API
diff --git a/nova/conductor/api.py b/nova/conductor/api.py
index 9026eb8a2..6bc2d3f25 100644
--- a/nova/conductor/api.py
+++ b/nova/conductor/api.py
@@ -14,10 +14,11 @@
"""Handles all requests to the conductor service."""
+from oslo.config import cfg
+
from nova.conductor import manager
from nova.conductor import rpcapi
from nova import exception as exc
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import common as rpc_common
from nova import utils
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index a986b0415..1c54b1f0b 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -20,6 +20,7 @@ from nova.compute import utils as compute_utils
from nova import exception
from nova import manager
from nova import network
+from nova.network.security_group import openstack_driver
from nova import notifications
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
@@ -53,7 +54,8 @@ class ConductorManager(manager.SchedulerDependentManager):
def __init__(self, *args, **kwargs):
super(ConductorManager, self).__init__(service_name='conductor',
*args, **kwargs)
- self.security_group_api = compute_api.SecurityGroupAPI()
+ self.security_group_api = (
+ openstack_driver.get_openstack_security_group_driver())
self._network_api = None
self._compute_api = None
self.quotas = quota.QUOTAS
@@ -208,9 +210,9 @@ class ConductorManager(manager.SchedulerDependentManager):
return jsonutils.to_primitive(group)
def security_group_rule_get_by_security_group(self, context, secgroup):
- rule = self.db.security_group_rule_get_by_security_group(
+ rules = self.db.security_group_rule_get_by_security_group(
context, secgroup['id'])
- return jsonutils.to_primitive(rule)
+ return jsonutils.to_primitive(rules, max_depth=4)
def provider_fw_rule_get_all(self, context):
rules = self.db.provider_fw_rule_get_all(context)
diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py
index b82f2b8e1..f7b7a7814 100644
--- a/nova/conductor/rpcapi.py
+++ b/nova/conductor/rpcapi.py
@@ -14,7 +14,8 @@
"""Client side of the conductor RPC API."""
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common import jsonutils
import nova.openstack.common.rpc.proxy
diff --git a/nova/config.py b/nova/config.py
index 18147bdbb..ff6681b44 100644
--- a/nova/config.py
+++ b/nova/config.py
@@ -17,7 +17,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.openstack.common import rpc
from nova import paths
diff --git a/nova/console/api.py b/nova/console/api.py
index 57c5cb0e3..a2bc40ae0 100644
--- a/nova/console/api.py
+++ b/nova/console/api.py
@@ -17,10 +17,11 @@
"""Handles ConsoleProxy API requests."""
+from oslo.config import cfg
+
from nova.compute import rpcapi as compute_rpcapi
from nova.console import rpcapi as console_rpcapi
from nova.db import base
-from nova.openstack.common import cfg
from nova.openstack.common import rpc
from nova.openstack.common import uuidutils
diff --git a/nova/console/manager.py b/nova/console/manager.py
index 2045f824d..77be2ad4d 100644
--- a/nova/console/manager.py
+++ b/nova/console/manager.py
@@ -19,10 +19,11 @@
import socket
+from oslo.config import cfg
+
from nova.compute import rpcapi as compute_rpcapi
from nova import exception
from nova import manager
-from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import utils
diff --git a/nova/console/rpcapi.py b/nova/console/rpcapi.py
index 5a91a81cb..78c0c9c1a 100644
--- a/nova/console/rpcapi.py
+++ b/nova/console/rpcapi.py
@@ -18,7 +18,8 @@
Client side of the console RPC API.
"""
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
import nova.openstack.common.rpc.proxy
rpcapi_opts = [
diff --git a/nova/console/vmrc.py b/nova/console/vmrc.py
index fcd733cf9..d7c66a00d 100644
--- a/nova/console/vmrc.py
+++ b/nova/console/vmrc.py
@@ -19,8 +19,9 @@
import base64
+from oslo.config import cfg
+
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.virt.vmwareapi import vim_util
diff --git a/nova/console/vmrc_manager.py b/nova/console/vmrc_manager.py
index bb1818943..f1ea41796 100644
--- a/nova/console/vmrc_manager.py
+++ b/nova/console/vmrc_manager.py
@@ -17,10 +17,11 @@
"""VMRC Console Manager."""
+from oslo.config import cfg
+
from nova.compute import rpcapi as compute_rpcapi
from nova import exception
from nova import manager
-from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.virt.vmwareapi import driver as vmwareapi_conn
diff --git a/nova/console/xvp.py b/nova/console/xvp.py
index ce2eb5350..c090e84d4 100644
--- a/nova/console/xvp.py
+++ b/nova/console/xvp.py
@@ -21,11 +21,11 @@ import os
import signal
from Cheetah import Template
+from oslo.config import cfg
from nova import context
from nova import db
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import paths
from nova import utils
diff --git a/nova/consoleauth/__init__.py b/nova/consoleauth/__init__.py
index c533043f3..3100ee15c 100644
--- a/nova/consoleauth/__init__.py
+++ b/nova/consoleauth/__init__.py
@@ -18,7 +18,7 @@
"""Module to authenticate Consoles."""
-from nova.openstack.common import cfg
+from oslo.config import cfg
consoleauth_topic_opt = cfg.StrOpt('consoleauth_topic',
diff --git a/nova/consoleauth/manager.py b/nova/consoleauth/manager.py
index 2dfc72435..74321a27b 100644
--- a/nova/consoleauth/manager.py
+++ b/nova/consoleauth/manager.py
@@ -20,9 +20,10 @@
import time
+from oslo.config import cfg
+
from nova.common import memorycache
from nova import manager
-from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
diff --git a/nova/consoleauth/rpcapi.py b/nova/consoleauth/rpcapi.py
index 64b915ec3..813143f76 100644
--- a/nova/consoleauth/rpcapi.py
+++ b/nova/consoleauth/rpcapi.py
@@ -18,7 +18,8 @@
Client side of the consoleauth RPC API.
"""
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
import nova.openstack.common.rpc.proxy
CONF = cfg.CONF
diff --git a/nova/context.py b/nova/context.py
index 60fd5b4c0..831a91b11 100644
--- a/nova/context.py
+++ b/nova/context.py
@@ -22,6 +22,7 @@
import copy
import uuid
+from nova import exception
from nova.openstack.common import local
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
@@ -166,3 +167,55 @@ def get_admin_context(read_deleted="no"):
is_admin=True,
read_deleted=read_deleted,
overwrite=False)
+
+
+def is_user_context(context):
+ """Indicates if the request context is a normal user."""
+ if not context:
+ return False
+ if context.is_admin:
+ return False
+ if not context.user_id or not context.project_id:
+ return False
+ return True
+
+
+def require_admin_context(ctxt):
+ """Raise exception.AdminRequired() if context is an admin context."""
+ if not ctxt.is_admin:
+ raise exception.AdminRequired()
+
+
+def require_context(ctxt):
+ """Raise exception.NotAuthorized() if context is not a user or an
+ admin context.
+ """
+ if not ctxt.is_admin and not is_user_context(ctxt):
+ raise exception.NotAuthorized()
+
+
+def authorize_project_context(context, project_id):
+ """Ensures a request has permission to access the given project."""
+ if is_user_context(context):
+ if not context.project_id:
+ raise exception.NotAuthorized()
+ elif context.project_id != project_id:
+ raise exception.NotAuthorized()
+
+
+def authorize_user_context(context, user_id):
+ """Ensures a request has permission to access the given user."""
+ if is_user_context(context):
+ if not context.user_id:
+ raise exception.NotAuthorized()
+ elif context.user_id != user_id:
+ raise exception.NotAuthorized()
+
+
+def authorize_quota_class_context(context, class_name):
+ """Ensures a request has permission to access the given quota class."""
+ if is_user_context(context):
+ if not context.quota_class:
+ raise exception.NotAuthorized()
+ elif context.quota_class != class_name:
+ raise exception.NotAuthorized()
diff --git a/nova/crypto.py b/nova/crypto.py
index 96e545893..4d72aa624 100644
--- a/nova/crypto.py
+++ b/nova/crypto.py
@@ -31,13 +31,13 @@ import re
import string
import struct
+from oslo.config import cfg
from pyasn1.codec.der import encoder as der_encoder
from pyasn1.type import univ
from nova import context
from nova import db
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
diff --git a/nova/db/api.py b/nova/db/api.py
index 6ec0b3a95..d14999b45 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -43,17 +43,15 @@ these objects be simple dictionaries.
"""
+from oslo.config import cfg
+
from nova.cells import rpcapi as cells_rpcapi
from nova import exception
-from nova.openstack.common import cfg
+from nova.openstack.common.db import api as db_api
from nova.openstack.common import log as logging
-from nova import utils
db_opts = [
- cfg.StrOpt('db_backend',
- default='sqlalchemy',
- help='The backend to use for db'),
cfg.BoolOpt('enable_new_services',
default=True,
help='Services to be added to the available pool on create'),
@@ -68,8 +66,10 @@ db_opts = [
CONF = cfg.CONF
CONF.register_opts(db_opts)
-IMPL = utils.LazyPluggable('db_backend',
- sqlalchemy='nova.db.sqlalchemy.api')
+_BACKEND_MAPPING = {'sqlalchemy': 'nova.db.sqlalchemy.api'}
+
+
+IMPL = db_api.DBAPI(backend_mapping=_BACKEND_MAPPING)
LOG = logging.getLogger(__name__)
@@ -542,11 +542,6 @@ def virtual_interface_get_by_instance_and_network(context, instance_id,
network_id)
-def virtual_interface_delete(context, vif_id):
- """Delete virtual interface record from the database."""
- return IMPL.virtual_interface_delete(context, vif_id)
-
-
def virtual_interface_delete_by_instance(context, instance_id):
"""Delete virtual interface records associated with instance."""
return IMPL.virtual_interface_delete_by_instance(context, instance_id)
@@ -617,11 +612,6 @@ def instance_get_active_by_window_joined(context, begin, end=None,
project_id, host)
-def instance_get_all_by_project(context, project_id):
- """Get all instances belonging to a project."""
- return IMPL.instance_get_all_by_project(context, project_id)
-
-
def instance_get_all_by_host(context, host):
"""Get all instances belonging to a host."""
return IMPL.instance_get_all_by_host(context, host)
@@ -848,11 +838,6 @@ def network_get_associated_fixed_ips(context, network_id, host=None):
return IMPL.network_get_associated_fixed_ips(context, network_id, host)
-def network_get_by_bridge(context, bridge):
- """Get a network by bridge or raise if it does not exist."""
- return IMPL.network_get_by_bridge(context, bridge)
-
-
def network_get_by_uuid(context, uuid):
"""Get a network by uuid or raise if it does not exist."""
return IMPL.network_get_by_uuid(context, uuid)
@@ -863,11 +848,6 @@ def network_get_by_cidr(context, cidr):
return IMPL.network_get_by_cidr(context, cidr)
-def network_get_by_instance(context, instance_id):
- """Get a network by instance id or raise if it does not exist."""
- return IMPL.network_get_by_instance(context, instance_id)
-
-
def network_get_all_by_instance(context, instance_id):
"""Get all networks by instance id or raise if none exist."""
return IMPL.network_get_all_by_instance(context, instance_id)
@@ -1206,6 +1186,28 @@ def security_group_rule_count_by_group(context, security_group_id):
###################
+def security_group_default_rule_get(context, security_group_rule_default_id):
+ return IMPL.security_group_default_rule_get(context,
+ security_group_rule_default_id)
+
+
+def security_group_default_rule_destroy(context,
+ security_group_rule_default_id):
+ return IMPL.security_group_default_rule_destroy(
+ context, security_group_rule_default_id)
+
+
+def security_group_default_rule_create(context, values):
+ return IMPL.security_group_default_rule_create(context, values)
+
+
+def security_group_default_rule_list(context):
+ return IMPL.security_group_default_rule_list(context)
+
+
+###################
+
+
def provider_fw_rule_create(context, rule):
"""Add a firewall rule at the provider level (all hosts & instances)."""
return IMPL.provider_fw_rule_create(context, rule)
diff --git a/nova/db/base.py b/nova/db/base.py
index 2028e375e..1d8433661 100644
--- a/nova/db/base.py
+++ b/nova/db/base.py
@@ -18,7 +18,8 @@
"""Base class for classes that need modular database access."""
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common import importutils
db_driver_opt = cfg.StrOpt('db_driver',
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index eb9181fce..360bd1b3a 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -23,10 +23,14 @@ import collections
import copy
import datetime
import functools
+import sys
+import time
import uuid
+from oslo.config import cfg
from sqlalchemy import and_
from sqlalchemy import Boolean
+from sqlalchemy import exc as sqla_exc
from sqlalchemy.exc import IntegrityError
from sqlalchemy.exc import NoSuchTableError
from sqlalchemy import Integer
@@ -44,10 +48,10 @@ from sqlalchemy import String
from nova import block_device
from nova.compute import task_states
from nova.compute import vm_states
+import nova.context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.openstack.common.db.sqlalchemy import utils as sqlalchemyutils
from nova.openstack.common import log as logging
@@ -74,42 +78,9 @@ get_engine = db_session.get_engine
get_session = db_session.get_session
-def is_user_context(context):
- """Indicates if the request context is a normal user."""
- if not context:
- return False
- if context.is_admin:
- return False
- if not context.user_id or not context.project_id:
- return False
- return True
-
-
-def authorize_project_context(context, project_id):
- """Ensures a request has permission to access the given project."""
- if is_user_context(context):
- if not context.project_id:
- raise exception.NotAuthorized()
- elif context.project_id != project_id:
- raise exception.NotAuthorized()
-
-
-def authorize_user_context(context, user_id):
- """Ensures a request has permission to access the given user."""
- if is_user_context(context):
- if not context.user_id:
- raise exception.NotAuthorized()
- elif context.user_id != user_id:
- raise exception.NotAuthorized()
-
-
-def authorize_quota_class_context(context, class_name):
- """Ensures a request has permission to access the given quota class."""
- if is_user_context(context):
- if not context.quota_class:
- raise exception.NotAuthorized()
- elif context.quota_class != class_name:
- raise exception.NotAuthorized()
+def get_backend():
+ """The backend is this module itself."""
+ return sys.modules[__name__]
def require_admin_context(f):
@@ -120,9 +91,7 @@ def require_admin_context(f):
"""
def wrapper(*args, **kwargs):
- context = args[0]
- if not context.is_admin:
- raise exception.AdminRequired()
+ nova.context.require_admin_context(args[0])
return f(*args, **kwargs)
return wrapper
@@ -131,17 +100,15 @@ def require_context(f):
"""Decorator to require *any* user or admin context.
This does no authorization for user or project access matching, see
- :py:func:`authorize_project_context` and
- :py:func:`authorize_user_context`.
+ :py:func:`nova.context.authorize_project_context` and
+ :py:func:`nova.context.authorize_user_context`.
The first argument to the wrapped function must be the context.
"""
def wrapper(*args, **kwargs):
- context = args[0]
- if not context.is_admin and not is_user_context(context):
- raise exception.NotAuthorized()
+ nova.context.require_context(args[0])
return f(*args, **kwargs)
return wrapper
@@ -174,6 +141,39 @@ def require_aggregate_exists(f):
return wrapper
+def _retry_on_deadlock(f):
+ """Decorator to retry a DB API call if Deadlock was received."""
+ def _is_deadlock_exc(dberr_info):
+ deadlock_str = 'Deadlock found when trying to get lock'
+ try:
+ if not isinstance(dberr_info, sqla_exc.OperationalError):
+ return False
+ if deadlock_str in dberr_info.message:
+ LOG.warn(_("Deadlock detected when running "
+ "'%(func_name)s': Retrying..."),
+ dict(func_name=f.__name__))
+ return True
+ except Exception:
+ pass
+ return False
+
+ @functools.wraps(f)
+ def wrapped(*args, **kwargs):
+ while True:
+ try:
+ return f(*args, **kwargs)
+ except db_session.DBError as db_err:
+ exc_info = sys.exc_info()
+ dberr_info = db_err.inner_exception
+ if not _is_deadlock_exc(dberr_info):
+ raise exc_info[0], exc_info[1], exc_info[2]
+ # Retry!
+ time.sleep(0.5)
+ continue
+ functools.update_wrapper(wrapped, f)
+ return wrapped
+
+
def model_query(context, model, *args, **kwargs):
"""Query helper that accounts for context's `read_deleted` field.
@@ -215,7 +215,7 @@ def model_query(context, model, *args, **kwargs):
raise Exception(_("Unrecognized read_deleted value '%s'")
% read_deleted)
- if is_user_context(context) and project_only:
+ if nova.context.is_user_context(context) and project_only:
if project_only == 'allow_none':
query = query.\
filter(or_(base_model.project_id == context.project_id,
@@ -658,7 +658,7 @@ def floating_ip_get_pools(context):
@require_context
def floating_ip_allocate_address(context, project_id, pool):
- authorize_project_context(context, project_id)
+ nova.context.authorize_project_context(context, project_id)
session = get_session()
with session.begin():
floating_ip_ref = model_query(context, models.FloatingIp,
@@ -749,7 +749,7 @@ def floating_ip_create(context, values, session=None):
@require_context
def floating_ip_count_by_project(context, project_id, session=None):
- authorize_project_context(context, project_id)
+ nova.context.authorize_project_context(context, project_id)
# TODO(tr3buchet): why leave auto_assigned floating IPs out?
return model_query(context, models.FloatingIp, read_deleted="no",
session=session).\
@@ -848,7 +848,7 @@ def floating_ip_get_all_by_host(context, host):
@require_context
def floating_ip_get_all_by_project(context, project_id):
- authorize_project_context(context, project_id)
+ nova.context.authorize_project_context(context, project_id)
# TODO(tr3buchet): why do we not want auto_assigned floating IPs here?
return _floating_ip_get_all(context).\
filter_by(project_id=project_id).\
@@ -879,8 +879,8 @@ def _floating_ip_get_by_address(context, address, session=None):
# If the floating IP has a project ID set, check to make sure
# the non-admin user has access.
- if result.project_id and is_user_context(context):
- authorize_project_context(context, result.project_id)
+ if result.project_id and nova.context.is_user_context(context):
+ nova.context.authorize_project_context(context, result.project_id)
return result
@@ -1128,10 +1128,11 @@ def fixed_ip_get(context, id, get_network=False):
# FIXME(sirp): shouldn't we just use project_only here to restrict the
# results?
- if is_user_context(context) and result['instance_uuid'] is not None:
+ if (nova.context.is_user_context(context) and
+ result['instance_uuid'] is not None):
instance = instance_get_by_uuid(context.elevated(read_deleted='yes'),
result['instance_uuid'])
- authorize_project_context(context, instance.project_id)
+ nova.context.authorize_project_context(context, instance.project_id)
return result
@@ -1157,11 +1158,12 @@ def fixed_ip_get_by_address(context, address, session=None):
# NOTE(sirp): shouldn't we just use project_only here to restrict the
# results?
- if is_user_context(context) and result['instance_uuid'] is not None:
+ if (nova.context.is_user_context(context) and
+ result['instance_uuid'] is not None):
instance = _instance_get_by_uuid(context.elevated(read_deleted='yes'),
result['instance_uuid'],
session)
- authorize_project_context(context, instance.project_id)
+ nova.context.authorize_project_context(context, instance.project_id)
return result
@@ -1335,17 +1337,6 @@ def virtual_interface_get_by_instance_and_network(context, instance_uuid,
@require_context
-def virtual_interface_delete(context, vif_id):
- """Delete virtual interface record from the database.
-
- :param vif_id: = id of vif to delete
- """
- _virtual_interface_query(context).\
- filter_by(id=vif_id).\
- delete()
-
-
-@require_context
def virtual_interface_delete_by_instance(context, instance_uuid):
"""Delete virtual interface records that are associated
with the instance given by instance_id.
@@ -1716,14 +1707,6 @@ def instance_get_all_by_host_and_not_type(context, host, type_id=None):
@require_context
-def instance_get_all_by_project(context, project_id):
- authorize_project_context(context, project_id)
- return _instance_get_all_query(context).\
- filter_by(project_id=project_id).\
- all()
-
-
-@require_context
def instance_get_all_by_reservation(context, reservation_id):
return _instance_get_all_query(context, project_only=True).\
filter_by(reservation_id=reservation_id).\
@@ -1860,8 +1843,9 @@ def _instance_update(context, instance_uuid, values, copy_old_instance=False):
raise exception.UnexpectedTaskStateError(actual=actual_state,
expected=expected)
+ instance_hostname = instance_ref['hostname'] or ''
if ("hostname" in values and
- values["hostname"].lower() != instance_ref["hostname"].lower()):
+ values["hostname"].lower() != instance_hostname.lower()):
_validate_unique_server_name(context,
session,
values['hostname'])
@@ -1984,7 +1968,7 @@ def key_pair_create(context, values):
@require_context
def key_pair_destroy(context, user_id, name):
- authorize_user_context(context, user_id)
+ nova.context.authorize_user_context(context, user_id)
model_query(context, models.KeyPair).\
filter_by(user_id=user_id).\
filter_by(name=name).\
@@ -1993,7 +1977,7 @@ def key_pair_destroy(context, user_id, name):
@require_context
def key_pair_get(context, user_id, name):
- authorize_user_context(context, user_id)
+ nova.context.authorize_user_context(context, user_id)
result = model_query(context, models.KeyPair).\
filter_by(user_id=user_id).\
filter_by(name=name).\
@@ -2007,14 +1991,14 @@ def key_pair_get(context, user_id, name):
@require_context
def key_pair_get_all_by_user(context, user_id):
- authorize_user_context(context, user_id)
+ nova.context.authorize_user_context(context, user_id)
return model_query(context, models.KeyPair, read_deleted="no").\
filter_by(user_id=user_id).\
all()
def key_pair_count_by_user(context, user_id):
- authorize_user_context(context, user_id)
+ nova.context.authorize_user_context(context, user_id)
return model_query(context, models.KeyPair, read_deleted="no").\
filter_by(user_id=user_id).\
count()
@@ -2088,12 +2072,6 @@ def network_count_reserved_ips(context, network_id):
@require_admin_context
def network_create_safe(context, values):
- if values.get('vlan'):
- if model_query(context, models.Network, read_deleted="no")\
- .filter_by(vlan=values['vlan'])\
- .first():
- raise exception.DuplicateVlan(vlan=values['vlan'])
-
network_ref = models.Network()
network_ref['uuid'] = str(uuid.uuid4())
network_ref.update(values)
@@ -2101,8 +2079,8 @@ def network_create_safe(context, values):
try:
network_ref.save()
return network_ref
- except IntegrityError:
- return None
+ except db_session.DBDuplicateEntry:
+ raise exception.DuplicateVlan(vlan=values['vlan'])
@require_admin_context
@@ -2255,16 +2233,6 @@ def _network_get_query(context, session=None):
@require_admin_context
-def network_get_by_bridge(context, bridge):
- result = _network_get_query(context).filter_by(bridge=bridge).first()
-
- if not result:
- raise exception.NetworkNotFoundForBridge(bridge=bridge)
-
- return result
-
-
-@require_admin_context
def network_get_by_uuid(context, uuid):
result = _network_get_query(context).filter_by(uuid=uuid).first()
@@ -2288,20 +2256,6 @@ def network_get_by_cidr(context, cidr):
@require_admin_context
-def network_get_by_instance(context, instance_id):
- # note this uses fixed IP to get to instance
- # only works for networks the instance has an IP from
- result = _network_get_query(context).\
- filter_by(instance_id=instance_id).\
- first()
-
- if not result:
- raise exception.NetworkNotFoundForInstance(instance_id=instance_id)
-
- return result
-
-
-@require_admin_context
def network_get_all_by_instance(context, instance_id):
result = _network_get_query(context).\
filter_by(instance_id=instance_id).\
@@ -2365,7 +2319,10 @@ def network_update(context, network_id, values):
with session.begin():
network_ref = network_get(context, network_id, session=session)
network_ref.update(values)
- network_ref.save(session=session)
+ try:
+ network_ref.save(session=session)
+ except db_session.DBDuplicateEntry:
+ raise exception.DuplicateVlan(vlan=values['vlan'])
return network_ref
@@ -2410,7 +2367,7 @@ def quota_get(context, project_id, resource):
@require_context
def quota_get_all_by_project(context, project_id):
- authorize_project_context(context, project_id)
+ nova.context.authorize_project_context(context, project_id)
rows = model_query(context, models.Quota, read_deleted="no").\
filter_by(project_id=project_id).\
@@ -2462,7 +2419,7 @@ def quota_class_get(context, class_name, resource):
@require_context
def quota_class_get_all_by_name(context, class_name):
- authorize_quota_class_context(context, class_name)
+ nova.context.authorize_quota_class_context(context, class_name)
rows = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
@@ -2514,7 +2471,7 @@ def quota_usage_get(context, project_id, resource):
@require_context
def quota_usage_get_all_by_project(context, project_id):
- authorize_project_context(context, project_id)
+ nova.context.authorize_project_context(context, project_id)
rows = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
@@ -3180,6 +3137,16 @@ def security_group_ensure_default(context, session=None):
'project_id': context.project_id}
default_group = security_group_create(context, values,
session=session)
+ for default_rule in security_group_default_rule_list(context):
+ # This is suboptimal, it should be programmatic to know
+ # the values of the default_rule
+ rule_values = {'protocol': default_rule.protocol,
+ 'from_port': default_rule.from_port,
+ 'to_port': default_rule.to_port,
+ 'cidr': default_rule.cidr,
+ 'parent_group_id': default_group.id,
+ }
+ security_group_rule_create(context, rule_values)
return (False, default_group)
@@ -3203,7 +3170,7 @@ def security_group_destroy(context, security_group_id):
@require_context
def security_group_count_by_project(context, project_id, session=None):
- authorize_project_context(context, project_id)
+ nova.context.authorize_project_context(context, project_id)
return model_query(context, models.SecurityGroup, read_deleted="no",
session=session).\
filter_by(project_id=project_id).\
@@ -3235,7 +3202,8 @@ def security_group_rule_get_by_security_group(context, security_group_id,
session=None):
return _security_group_rule_get_query(context, session=session).\
filter_by(parent_group_id=security_group_id).\
- options(joinedload_all('grantee_group.instances.instance_type')).\
+ options(joinedload_all('grantee_group.instances.'
+ 'system_metadata')).\
all()
@@ -3280,6 +3248,56 @@ def security_group_rule_count_by_group(context, security_group_id):
###################
+def _security_group_rule_get_default_query(context, session=None):
+ return model_query(context, models.SecurityGroupIngressDefaultRule,
+ session=session)
+
+
+@require_context
+def security_group_default_rule_get(context, security_group_rule_default_id,
+ session=None):
+ result = _security_group_rule_get_default_query(context, session=session).\
+ filter_by(id=security_group_rule_default_id).\
+ first()
+
+ if not result:
+ raise exception.SecurityGroupDefaultRuleNotFound(
+ rule_id=security_group_rule_default_id)
+
+ return result
+
+
+@require_admin_context
+def security_group_default_rule_destroy(context,
+ security_group_rule_default_id):
+ session = get_session()
+ with session.begin():
+ count = _security_group_rule_get_default_query(context,
+ session=session).\
+ filter_by(id=security_group_rule_default_id).\
+ soft_delete()
+ if count == 0:
+ raise exception.SecurityGroupDefaultRuleNotFound(
+ rule_id=security_group_rule_default_id)
+
+
+@require_admin_context
+def security_group_default_rule_create(context, values):
+ security_group_default_rule_ref = models.SecurityGroupIngressDefaultRule()
+ security_group_default_rule_ref.update(values)
+ security_group_default_rule_ref.save()
+ return security_group_default_rule_ref
+
+
+@require_context
+def security_group_default_rule_list(context, session=None):
+ return _security_group_rule_get_default_query(context, session=session).\
+ all()
+
+
+###################
+
+
@require_admin_context
def provider_fw_rule_create(context, rule):
fw_rule_ref = models.ProviderFirewallRule()
@@ -3788,20 +3806,6 @@ def instance_metadata_delete(context, instance_uuid, key):
@require_context
-def instance_metadata_get_item(context, instance_uuid, key, session=None):
- result = _instance_metadata_get_query(
- context, instance_uuid, session=session).\
- filter_by(key=key).\
- first()
-
- if not result:
- raise exception.InstanceMetadataNotFound(metadata_key=key,
- instance_uuid=instance_uuid)
-
- return result
-
-
-@require_context
def instance_metadata_update(context, instance_uuid, metadata, delete,
session=None):
all_keys = metadata.keys()
@@ -3858,20 +3862,6 @@ def instance_system_metadata_get(context, instance_uuid, session=None):
return result
-def _instance_system_metadata_get_item(context, instance_uuid, key,
- session=None):
- result = _instance_system_metadata_get_query(
- context, instance_uuid, session=session).\
- filter_by(key=key).\
- first()
-
- if not result:
- raise exception.InstanceSystemMetadataNotFound(
- metadata_key=key, instance_uuid=instance_uuid)
-
- return result
-
-
@require_context
def instance_system_metadata_update(context, instance_uuid, metadata, delete,
session=None):
@@ -3986,6 +3976,7 @@ def bw_usage_get_by_uuids(context, uuids, start_period):
@require_context
+@_retry_on_deadlock
def bw_usage_update(context, uuid, mac, start_period, bw_in, bw_out,
last_ctr_in, last_ctr_out, last_refreshed=None,
session=None):
@@ -4065,20 +4056,6 @@ def instance_type_extra_specs_delete(context, flavor_id, key):
@require_context
-def instance_type_extra_specs_get_item(context, flavor_id, key,
- session=None):
- result = _instance_type_extra_specs_get_query(
- context, flavor_id, session=session).\
- filter(models.InstanceTypeExtraSpecs.key == key).\
- first()
- if not result:
- raise exception.InstanceTypeExtraSpecsNotFound(
- extra_specs_key=key, instance_type_id=flavor_id)
-
- return result
-
-
-@require_context
def instance_type_extra_specs_update_or_create(context, flavor_id, specs):
# NOTE(boris-42): There is a race condition in this method. We should add
# UniqueConstraint on (instance_type_id, key, deleted) to
@@ -4427,23 +4404,6 @@ def aggregate_metadata_delete(context, aggregate_id, key):
@require_admin_context
@require_aggregate_exists
-def aggregate_metadata_get_item(context, aggregate_id, key, session=None):
- result = _aggregate_get_query(context,
- models.AggregateMetadata,
- models.AggregateMetadata.aggregate_id,
- aggregate_id, session=session,
- read_deleted='yes').\
- filter_by(key=key).first()
-
- if not result:
- raise exception.AggregateMetadataNotFound(metadata_key=key,
- aggregate_id=aggregate_id)
-
- return result
-
-
-@require_admin_context
-@require_aggregate_exists
def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False):
# NOTE(boris-42): There is a race condition in this method. We should add
# UniqueConstraint on (start_period, uuid, mac, deleted) to
@@ -4756,27 +4716,21 @@ def task_log_get_all(context, task_name, period_beginning, period_ending,
@require_admin_context
def task_log_begin_task(context, task_name, period_beginning, period_ending,
host, task_items=None, message=None):
- # NOTE(boris-42): This method has a race condition and will be rewritten
- # after bp/db-unique-keys implementation.
- session = get_session()
- with session.begin():
- task_ref = _task_log_get_query(context, task_name, period_beginning,
- period_ending, host, session=session).\
- first()
- if task_ref:
- #It's already run(ning)!
- raise exception.TaskAlreadyRunning(task_name=task_name, host=host)
- task = models.TaskLog()
- task.task_name = task_name
- task.period_beginning = period_beginning
- task.period_ending = period_ending
- task.host = host
- task.state = "RUNNING"
- if message:
- task.message = message
- if task_items:
- task.task_items = task_items
- task.save(session=session)
+
+ task = models.TaskLog()
+ task.task_name = task_name
+ task.period_beginning = period_beginning
+ task.period_ending = period_ending
+ task.host = host
+ task.state = "RUNNING"
+ if message:
+ task.message = message
+ if task_items:
+ task.task_items = task_items
+ try:
+ task.save()
+ except db_session.DBDuplicateEntry:
+ raise exception.TaskAlreadyRunning(task_name=task_name, host=host)
@require_admin_context
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/149_inet_datatype_for_postgres.py b/nova/db/sqlalchemy/migrate_repo/versions/149_inet_datatype_for_postgres.py
index fe9889e35..3eb26ac7d 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/149_inet_datatype_for_postgres.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/149_inet_datatype_for_postgres.py
@@ -58,7 +58,7 @@ def upgrade(migrate_engine):
else:
for table, column in TABLE_COLUMNS:
t = Table(table, meta, autoload=True)
- getattr(t.c, column).alter(type=String(39))
+ getattr(t.c, column).alter(type=String(43))
def downgrade(migrate_engine):
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/155_add_task_log_uc.py b/nova/db/sqlalchemy/migrate_repo/versions/155_add_task_log_uc.py
new file mode 100644
index 000000000..04e033544
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/155_add_task_log_uc.py
@@ -0,0 +1,40 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2013 Boris Pavlovic (boris@pavlovic.me).
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from migrate.changeset import UniqueConstraint
+from sqlalchemy import MetaData, Table
+
+from nova.db.sqlalchemy import utils
+
+
+UC_NAME = "uniq_task_name_x_host_x_period_beginning_x_period_ending"
+COLUMNS = ('task_name', 'host', 'period_beginning', 'period_ending')
+TABLE_NAME = 'task_log'
+
+
+def upgrade(migrate_engine):
+ meta = MetaData(bind=migrate_engine)
+ t = Table(TABLE_NAME, meta, autoload=True)
+
+ utils.drop_old_duplicate_entries_from_table(migrate_engine, TABLE_NAME,
+ False, *COLUMNS)
+ uc = UniqueConstraint(*COLUMNS, table=t, name=UC_NAME)
+ uc.create()
+
+
+def downgrade(migrate_engine):
+ utils.drop_unique_constraint(migrate_engine, TABLE_NAME, UC_NAME, *COLUMNS)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/156_cidr_column_length.py b/nova/db/sqlalchemy/migrate_repo/versions/156_cidr_column_length.py
new file mode 100644
index 000000000..fda0c5075
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/156_cidr_column_length.py
@@ -0,0 +1,56 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import MetaData, String, Table
+from sqlalchemy.dialects import postgresql
+
+
+CIDR_TABLE_COLUMNS = [
+ # table name, column name
+ ('security_group_rules', 'cidr'),
+ ('provider_fw_rules', 'cidr'),
+ ('networks', 'cidr'),
+ ('networks', 'cidr_v6')]
+
+
+def upgrade(migrate_engine):
+ """Convert String columns holding IP addresses to INET for postgresql."""
+ meta = MetaData()
+ meta.bind = migrate_engine
+ dialect = migrate_engine.url.get_dialect()
+
+ if dialect is postgresql.dialect:
+ for table, column in CIDR_TABLE_COLUMNS:
+ # can't use migrate's alter() because it does not support
+ # explicit casting
+ migrate_engine.execute(
+ "ALTER TABLE %(table)s "
+ "ALTER COLUMN %(column)s TYPE INET USING %(column)s::INET"
+ % locals())
+ else:
+ for table, column in CIDR_TABLE_COLUMNS:
+ t = Table(table, meta, autoload=True)
+ getattr(t.c, column).alter(type=String(43))
+
+
+def downgrade(migrate_engine):
+ """Convert columns back to the larger String(255)."""
+ meta = MetaData()
+ meta.bind = migrate_engine
+ for table, column in CIDR_TABLE_COLUMNS:
+ t = Table(table, meta, autoload=True)
+ getattr(t.c, column).alter(type=String(39))
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/157_add_security_group_default_rules.py b/nova/db/sqlalchemy/migrate_repo/versions/157_add_security_group_default_rules.py
new file mode 100644
index 000000000..5dcfdbb90
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/157_add_security_group_default_rules.py
@@ -0,0 +1,61 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Column, DateTime, Integer, MetaData, String, Table
+from nova.db.sqlalchemy import types
+
+from nova.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ security_group_default_rules = Table('security_group_default_rules', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Integer, default=0),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('protocol', String(length=5)),
+ Column('from_port', Integer),
+ Column('to_port', Integer),
+ Column('cidr', types.CIDR()),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ try:
+ security_group_default_rules.create()
+ except Exception:
+ msg = "Exception while creating table 'security_group_default_rules"
+ LOG.exception(msg)
+ raise
+
+
+def downgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+ security_group_default_rules = Table('security_group_default_rules',
+ meta,
+ autoload=True)
+ try:
+ security_group_default_rules.drop()
+ except Exception:
+ msg = "Exception while droppping table 'security_group_default_rules'"
+ LOG.exception(msg)
+ raise
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/158_add_networks_uc.py b/nova/db/sqlalchemy/migrate_repo/versions/158_add_networks_uc.py
new file mode 100644
index 000000000..18644d140
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/158_add_networks_uc.py
@@ -0,0 +1,40 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2013 Boris Pavlovic (boris@pavlovic.me).
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from migrate.changeset import UniqueConstraint
+from sqlalchemy import MetaData, Table
+
+from nova.db.sqlalchemy import utils
+
+
+UC_NAME = "uniq_vlan_x_deleted"
+COLUMNS = ('vlan', 'deleted')
+TABLE_NAME = 'networks'
+
+
+def upgrade(migrate_engine):
+ meta = MetaData(bind=migrate_engine)
+ t = Table(TABLE_NAME, meta, autoload=True)
+
+ utils.drop_old_duplicate_entries_from_table(migrate_engine, TABLE_NAME,
+ True, *COLUMNS)
+ uc = UniqueConstraint(*COLUMNS, table=t, name=UC_NAME)
+ uc.create()
+
+
+def downgrade(migrate_engine):
+ utils.drop_unique_constraint(migrate_engine, TABLE_NAME, UC_NAME, *COLUMNS)
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 5eeae30dc..95456bf98 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -25,13 +25,12 @@ from sqlalchemy import Column, Integer, BigInteger, String, schema
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float
from sqlalchemy.orm import relationship, backref, object_mapper
+from oslo.config import cfg
from nova.db.sqlalchemy import types
-from nova.openstack.common import cfg
from nova.openstack.common.db.sqlalchemy import models
from nova.openstack.common import timeutils
-
CONF = cfg.CONF
BASE = declarative_base()
@@ -529,7 +528,7 @@ class SecurityGroupIngressRule(BASE, NovaBase):
protocol = Column(String(5)) # "tcp", "udp", or "icmp"
from_port = Column(Integer)
to_port = Column(Integer)
- cidr = Column(types.IPAddress())
+ cidr = Column(types.CIDR())
# Note: This is not the parent SecurityGroup. It's SecurityGroup we're
# granting access for.
@@ -541,6 +540,15 @@ class SecurityGroupIngressRule(BASE, NovaBase):
'SecurityGroupIngressRule.deleted == 0)')
+class SecurityGroupIngressDefaultRule(BASE, NovaBase):
+ __tablename__ = 'security_group_default_rules'
+ id = Column(Integer, primary_key=True)
+ protocol = Column(String(5)) # "tcp", "udp" or "icmp"
+ from_port = Column(Integer)
+ to_port = Column(Integer)
+ cidr = Column(types.CIDR())
+
+
class ProviderFirewallRule(BASE, NovaBase):
"""Represents a rule in a security group."""
__tablename__ = 'provider_fw_rules'
@@ -549,7 +557,7 @@ class ProviderFirewallRule(BASE, NovaBase):
protocol = Column(String(5)) # "tcp", "udp", or "icmp"
from_port = Column(Integer)
to_port = Column(Integer)
- cidr = Column(types.IPAddress())
+ cidr = Column(types.CIDR())
class KeyPair(BASE, NovaBase):
@@ -599,8 +607,8 @@ class Network(BASE, NovaBase):
label = Column(String(255))
injected = Column(Boolean, default=False)
- cidr = Column(types.IPAddress(), unique=True)
- cidr_v6 = Column(types.IPAddress(), unique=True)
+ cidr = Column(types.CIDR(), unique=True)
+ cidr_v6 = Column(types.CIDR(), unique=True)
multi_host = Column(Boolean, default=False)
gateway_v6 = Column(types.IPAddress())
diff --git a/nova/db/sqlalchemy/types.py b/nova/db/sqlalchemy/types.py
index ef861b832..5a04a0253 100644
--- a/nova/db/sqlalchemy/types.py
+++ b/nova/db/sqlalchemy/types.py
@@ -36,3 +36,15 @@ class IPAddress(types.TypeDecorator):
elif utils.is_valid_ipv6(value):
return utils.get_shortened_ipv6(value)
return value
+
+
+class CIDR(types.TypeDecorator):
+ """An SQLAlchemy type representing a CIDR definition."""
+ impl = types.String(43).with_variant(postgresql.INET(), 'postgresql')
+
+ def process_bind_param(self, value, dialect):
+ """Process/Formats the value before insert it into the db."""
+ # NOTE(sdague): normalize all the inserts
+ if utils.is_valid_ipv6_cidr(value):
+ return utils.get_shortened_ipv6_cidr(value)
+ return value
diff --git a/nova/db/sqlalchemy/utils.py b/nova/db/sqlalchemy/utils.py
index 2faa5021f..f8ccbb6f2 100644
--- a/nova/db/sqlalchemy/utils.py
+++ b/nova/db/sqlalchemy/utils.py
@@ -18,11 +18,19 @@
from migrate.changeset import UniqueConstraint
from sqlalchemy.engine import reflection
from sqlalchemy.ext.compiler import compiles
+from sqlalchemy import func
from sqlalchemy import MetaData, Table, Column, Index
-from sqlalchemy.sql.expression import UpdateBase
+from sqlalchemy.sql.expression import UpdateBase, literal_column
+from sqlalchemy.sql import select
from sqlalchemy.types import NullType
+
from nova import exception
+from nova.openstack.common import log as logging
+from nova.openstack.common import timeutils
+
+
+LOG = logging.getLogger(__name__)
class InsertFromSelect(UpdateBase):
@@ -98,10 +106,10 @@ def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns,
4) Rename new table to the name of old table.
:param migrate_engine: sqlalchemy engine
- :oaram table_name: name of table that contains uniq constarint.
+ :param table_name: name of table that contains uniq constarint.
:param uc_name: name of uniq constraint that will be dropped.
:param columns: columns that are in uniq constarint.
- :param col_name_col_instance: constains pair column_name=column_instance.
+ :param col_name_col_instance: contains pair column_name=column_instance.
column_instance is instance of Column. These params
are required only for columns that have unsupported
types by sqlite. For example BigInteger.
@@ -110,8 +118,51 @@ def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns,
meta = MetaData()
meta.bind = migrate_engine
t = Table(table_name, meta, autoload=True)
- uc = UniqueConstraint(*fields, table=t, name=uc_name)
+ uc = UniqueConstraint(*columns, table=t, name=uc_name)
uc.drop()
else:
_drop_unique_constraint_in_sqlite(migrate_engine, table_name, uc_name,
**col_name_col_instance)
+
+
+def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
+ use_soft_delete, *uc_column_names):
+ """
+ This method is used to drop all old rows that have the same values for
+ columns in uc_columns.
+ """
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ table = Table(table_name, meta, autoload=True)
+ columns_for_group_by = [table.c[name] for name in uc_column_names]
+
+ columns_for_select = [func.max(table.c.id)]
+ columns_for_select.extend(list(columns_for_group_by))
+
+ duplicated_rows_select = select(columns_for_select,
+ group_by=columns_for_group_by,
+ having=func.count(table.c.id) > 1)
+
+ for row in migrate_engine.execute(duplicated_rows_select):
+ # NOTE(boris-42): Do not remove row that has the biggest ID.
+ delete_condition = table.c.id != row[0]
+ for name in uc_column_names:
+ delete_condition &= table.c[name] == row[name]
+
+ rows_to_delete_select = select([table.c.id]).where(delete_condition)
+ for row in migrate_engine.execute(rows_to_delete_select).fetchall():
+ LOG.info(_("Deleted duplicated row with id: %(id)s from table: "
+ "%(table)s") % dict(id=row[0], table=table_name))
+
+ if use_soft_delete:
+ delete_statement = table.update().\
+ where(delete_condition).\
+ values({
+ 'deleted': literal_column('id'),
+ 'updated_at': literal_column('updated_at'),
+ 'deleted_at': timeutils.utcnow()
+ })
+ else:
+ delete_statement = table.delete().where(delete_condition)
+ migrate_engine.execute(delete_statement)
diff --git a/nova/exception.py b/nova/exception.py
index 9e9e5182b..cad1c5c02 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -26,9 +26,9 @@ SHOULD include dedicated exception logging.
import functools
+from oslo.config import cfg
import webob.exc
-from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
@@ -335,6 +335,10 @@ class InstanceTerminationFailure(Invalid):
message = _("Failed to terminate instance") + ": %(reason)s"
+class InstanceDeployFailure(Invalid):
+ message = _("Failed to deploy instance") + ": %(reason)s"
+
+
class ServiceUnavailable(Invalid):
message = _("Service is unavailable at this time.")
@@ -496,6 +500,10 @@ class NetworkNotFound(NotFound):
message = _("Network %(network_id)s could not be found.")
+class PortNotFound(NotFound):
+ message = _("Port id %(port_id)s could not be found.")
+
+
class NetworkNotFoundForBridge(NetworkNotFound):
message = _("Network could not be found for bridge %(bridge)s")
@@ -529,10 +537,6 @@ class PortInUse(NovaException):
message = _("Port %(port_id)s is still in use.")
-class PortNotFound(NotFound):
- message = _("Port %(port_id)s could not be found.")
-
-
class PortNotUsable(NovaException):
message = _("Port %(port_id)s not usable for instance %(instance)s.")
@@ -732,6 +736,20 @@ class SecurityGroupNotExistsForInstance(Invalid):
" the instance %(instance_id)s")
+class SecurityGroupDefaultRuleNotFound(Invalid):
+ message = _("Security group default rule (%rule_id)s not found.")
+
+
+class SecurityGroupCannotBeApplied(Invalid):
+ message = _("Network requires port_security_enabled and subnet associated"
+ " in order to apply security groups.")
+
+
+class NoUniqueMatch(NovaException):
+ message = _("No Unique Match Found.")
+ code = 409
+
+
class MigrationNotFound(NotFound):
message = _("Migration %(migration_id)s could not be found.")
@@ -1042,6 +1060,14 @@ class InstanceNotFound(NotFound):
message = _("Instance %(instance_id)s could not be found.")
+class NodeNotFound(NotFound):
+ message = _("Node %(node_id)s could not be found.")
+
+
+class NodeNotFoundByUUID(NotFound):
+ message = _("Node with UUID %(node_uuid)s could not be found.")
+
+
class MarkerNotFound(NotFound):
message = _("Marker %(marker)s could not be found.")
@@ -1076,6 +1102,14 @@ class ConfigDriveUnknownFormat(NovaException):
"iso9660 or vfat.")
+class InterfaceAttachFailed(Invalid):
+ message = _("Failed to attach network adapter device to %(instance)s")
+
+
+class InterfaceDetachFailed(Invalid):
+ message = _("Failed to detach network adapter device from %(instance)s")
+
+
class InstanceUserDataTooLarge(NovaException):
message = _("User data too large. User data must be no larger than "
"%(maxsize)s bytes once base64 encoded. Your data is "
diff --git a/nova/image/glance.py b/nova/image/glance.py
index 78cfc3dee..79614d6a7 100644
--- a/nova/image/glance.py
+++ b/nova/image/glance.py
@@ -29,9 +29,9 @@ import urlparse
import glanceclient
import glanceclient.exc
+from oslo.config import cfg
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
diff --git a/nova/image/s3.py b/nova/image/s3.py
index 833fb44f9..100d80030 100644
--- a/nova/image/s3.py
+++ b/nova/image/s3.py
@@ -28,12 +28,12 @@ import tempfile
import boto.s3.connection
import eventlet
from lxml import etree
+from oslo.config import cfg
from nova.api.ec2 import ec2utils
import nova.cert.rpcapi
from nova import exception
from nova.image import glance
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import utils
diff --git a/nova/ipv6/api.py b/nova/ipv6/api.py
index 43a185101..7f94abcfb 100644
--- a/nova/ipv6/api.py
+++ b/nova/ipv6/api.py
@@ -14,9 +14,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-from nova.openstack.common import cfg
-from nova import utils
+from oslo.config import cfg
+from nova import utils
ipv6_backend_opt = cfg.StrOpt('ipv6_backend',
default='rfc2462',
diff --git a/nova/locale/bs/LC_MESSAGES/nova.po b/nova/locale/bs/LC_MESSAGES/nova.po
index 1c828e2cc..a8c2f28aa 100644
--- a/nova/locale/bs/LC_MESSAGES/nova.po
+++ b/nova/locale/bs/LC_MESSAGES/nova.po
@@ -4105,7 +4105,7 @@ msgstr ""
#: nova/network/linux_net.py:1126
#, python-format
-msgid "Starting VLAN inteface %s"
+msgid "Starting VLAN interface %s"
msgstr ""
#: nova/network/linux_net.py:1162
diff --git a/nova/locale/cs/LC_MESSAGES/nova.po b/nova/locale/cs/LC_MESSAGES/nova.po
index 5f16b0a97..728eb3e2f 100644
--- a/nova/locale/cs/LC_MESSAGES/nova.po
+++ b/nova/locale/cs/LC_MESSAGES/nova.po
@@ -4274,7 +4274,7 @@ msgstr "Pid %d je starý, znovu spouštění radvd"
#: nova/network/linux_net.py:1126
#, python-format
-msgid "Starting VLAN inteface %s"
+msgid "Starting VLAN interface %s"
msgstr "Spouštění rozhraní VLAN %s"
#: nova/network/linux_net.py:1162
diff --git a/nova/locale/da/LC_MESSAGES/nova.po b/nova/locale/da/LC_MESSAGES/nova.po
index 9110f1a19..c5bf005ae 100644
--- a/nova/locale/da/LC_MESSAGES/nova.po
+++ b/nova/locale/da/LC_MESSAGES/nova.po
@@ -4105,7 +4105,7 @@ msgstr ""
#: nova/network/linux_net.py:1126
#, python-format
-msgid "Starting VLAN inteface %s"
+msgid "Starting VLAN interface %s"
msgstr ""
#: nova/network/linux_net.py:1162
diff --git a/nova/locale/de/LC_MESSAGES/nova.po b/nova/locale/de/LC_MESSAGES/nova.po
index 7648cb6b7..90888108b 100644
--- a/nova/locale/de/LC_MESSAGES/nova.po
+++ b/nova/locale/de/LC_MESSAGES/nova.po
@@ -4114,7 +4114,7 @@ msgstr ""
#: nova/network/linux_net.py:1126
#, python-format
-msgid "Starting VLAN inteface %s"
+msgid "Starting VLAN interface %s"
msgstr ""
#: nova/network/linux_net.py:1162
diff --git a/nova/locale/en_AU/LC_MESSAGES/nova.po b/nova/locale/en_AU/LC_MESSAGES/nova.po
index 34fe02f2f..85439f90d 100644
--- a/nova/locale/en_AU/LC_MESSAGES/nova.po
+++ b/nova/locale/en_AU/LC_MESSAGES/nova.po
@@ -4146,8 +4146,8 @@ msgstr "Pid %d is stale, relaunching radvd"
#: nova/network/linux_net.py:1126
#, python-format
-msgid "Starting VLAN inteface %s"
-msgstr "Starting VLAN inteface %s"
+msgid "Starting VLAN interface %s"
+msgstr "Starting VLAN interface %s"
#: nova/network/linux_net.py:1162
#, python-format
diff --git a/nova/locale/en_GB/LC_MESSAGES/nova.po b/nova/locale/en_GB/LC_MESSAGES/nova.po
index c4d5733bf..8578686a7 100644
--- a/nova/locale/en_GB/LC_MESSAGES/nova.po
+++ b/nova/locale/en_GB/LC_MESSAGES/nova.po
@@ -4128,8 +4128,8 @@ msgstr "Pid %d is stale, relaunching radvd"
#: nova/network/linux_net.py:1126
#, python-format
-msgid "Starting VLAN inteface %s"
-msgstr "Starting VLAN inteface %s"
+msgid "Starting VLAN interface %s"
+msgstr "Starting VLAN interface %s"
#: nova/network/linux_net.py:1162
#, python-format
diff --git a/nova/locale/en_US/LC_MESSAGES/nova.po b/nova/locale/en_US/LC_MESSAGES/nova.po
index 74873ef96..1c33f1050 100644
--- a/nova/locale/en_US/LC_MESSAGES/nova.po
+++ b/nova/locale/en_US/LC_MESSAGES/nova.po
@@ -4228,8 +4228,8 @@ msgstr "Pid %d is stale, relaunching radvd"
#: nova/network/linux_net.py:1126
#, python-format
-msgid "Starting VLAN inteface %s"
-msgstr "Starting VLAN inteface %s"
+msgid "Starting VLAN interface %s"
+msgstr "Starting VLAN interface %s"
#: nova/network/linux_net.py:1162
#, python-format
diff --git a/nova/locale/es/LC_MESSAGES/nova.po b/nova/locale/es/LC_MESSAGES/nova.po
index 4b51385c1..4f4881583 100644
--- a/nova/locale/es/LC_MESSAGES/nova.po
+++ b/nova/locale/es/LC_MESSAGES/nova.po
@@ -4156,7 +4156,7 @@ msgstr "Pid %d corrupto, relanzando radvd"
#: nova/network/linux_net.py:1126
#, python-format
-msgid "Starting VLAN inteface %s"
+msgid "Starting VLAN interface %s"
msgstr "Iniciando interfaz VLAN %s"
#: nova/network/linux_net.py:1162
diff --git a/nova/locale/fr/LC_MESSAGES/nova.po b/nova/locale/fr/LC_MESSAGES/nova.po
index 5292926e8..d775eb199 100644
--- a/nova/locale/fr/LC_MESSAGES/nova.po
+++ b/nova/locale/fr/LC_MESSAGES/nova.po
@@ -4211,7 +4211,7 @@ msgstr "Pid %d est dépassé, re-démarrage radvd"
#: nova/network/linux_net.py:1126
#, python-format
-msgid "Starting VLAN inteface %s"
+msgid "Starting VLAN interface %s"
msgstr "Démarrage de l'interface VLAN %s"
#: nova/network/linux_net.py:1162
diff --git a/nova/locale/it/LC_MESSAGES/nova.po b/nova/locale/it/LC_MESSAGES/nova.po
index 53a40b994..5427d247c 100644
--- a/nova/locale/it/LC_MESSAGES/nova.po
+++ b/nova/locale/it/LC_MESSAGES/nova.po
@@ -4125,7 +4125,7 @@ msgstr ""
#: nova/network/linux_net.py:1126
#, python-format
-msgid "Starting VLAN inteface %s"
+msgid "Starting VLAN interface %s"
msgstr "Avviando l'interfaccia VLAN %s"
#: nova/network/linux_net.py:1162
diff --git a/nova/locale/ja/LC_MESSAGES/nova.po b/nova/locale/ja/LC_MESSAGES/nova.po
index b97544a9a..0e3e5346e 100644
--- a/nova/locale/ja/LC_MESSAGES/nova.po
+++ b/nova/locale/ja/LC_MESSAGES/nova.po
@@ -4144,7 +4144,7 @@ msgstr "Pid %d ãŒã‚¹ãƒˆãƒ¼ãƒ«ã—ã¦ã„ã‚‹ã®ã§ radvd ã‚’å†å®Ÿè¡Œã—ã¦ã„ã¾ã
#: nova/network/linux_net.py:1126
#, python-format
-msgid "Starting VLAN inteface %s"
+msgid "Starting VLAN interface %s"
msgstr "VLANインタフェース %s ã‚’é–‹å§‹ã—ã¾ã™ã€‚"
#: nova/network/linux_net.py:1162
diff --git a/nova/locale/ko/LC_MESSAGES/nova.po b/nova/locale/ko/LC_MESSAGES/nova.po
index a18f08a79..cb7d925b9 100644
--- a/nova/locale/ko/LC_MESSAGES/nova.po
+++ b/nova/locale/ko/LC_MESSAGES/nova.po
@@ -4116,7 +4116,7 @@ msgstr ""
#: nova/network/linux_net.py:1126
#, python-format
-msgid "Starting VLAN inteface %s"
+msgid "Starting VLAN interface %s"
msgstr ""
#: nova/network/linux_net.py:1162
diff --git a/nova/locale/nb/LC_MESSAGES/nova.po b/nova/locale/nb/LC_MESSAGES/nova.po
index 21004f601..d03094228 100644
--- a/nova/locale/nb/LC_MESSAGES/nova.po
+++ b/nova/locale/nb/LC_MESSAGES/nova.po
@@ -4111,7 +4111,7 @@ msgstr ""
#: nova/network/linux_net.py:1126
#, python-format
-msgid "Starting VLAN inteface %s"
+msgid "Starting VLAN interface %s"
msgstr ""
#: nova/network/linux_net.py:1162
diff --git a/nova/locale/nova.pot b/nova/locale/nova.pot
index 6cfd4c904..743550929 100644
--- a/nova/locale/nova.pot
+++ b/nova/locale/nova.pot
@@ -6,9 +6,9 @@
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: nova jenkins.nova.propose.translation.update.157\n"
+"Project-Id-Version: nova jenkins.nova.propose.translation.update.162\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2013-02-13 00:02+0000\n"
+"POT-Creation-Date: 2013-02-18 00:03+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
@@ -175,7 +175,7 @@ msgid "Volume %(volume_id)s is not attached to anything"
msgstr ""
#: nova/exception.py:234 nova/api/ec2/cloud.py:461
-#: nova/api/openstack/compute/contrib/keypairs.py:98 nova/compute/api.py:2634
+#: nova/api/openstack/compute/contrib/keypairs.py:98 nova/compute/api.py:2639
msgid "Keypair data is invalid"
msgstr ""
@@ -191,7 +191,7 @@ msgstr ""
msgid "Invalid volume"
msgstr ""
-#: nova/exception.py:250 nova/api/openstack/compute/servers.py:1338
+#: nova/exception.py:250 nova/api/openstack/compute/servers.py:1331
#: nova/api/openstack/compute/contrib/admin_actions.py:242
msgid "Invalid metadata"
msgstr ""
@@ -303,7 +303,8 @@ msgid "Insufficient compute resources."
msgstr ""
#: nova/exception.py:347
-msgid "Compute service is unavailable at this time."
+#, python-format
+msgid "Compute service of %(host)s is unavailable at this time."
msgstr ""
#: nova/exception.py:351
@@ -1206,6 +1207,18 @@ msgstr ""
msgid "%(binary)s attempted direct database access which is not allowed by policy"
msgstr ""
+#: nova/exception.py:1126
+#, python-format
+msgid "Virtualization type '%(virt)s' is not supported by this compute driver"
+msgstr ""
+
+#: nova/exception.py:1131
+#, python-format
+msgid ""
+"Requested hardware '%(model)s' is not supported by the '%(virt)s' virt "
+"driver"
+msgstr ""
+
#: nova/hooks.py:62
#, python-format
msgid "Running %(name)s pre-hook: %(obj)s"
@@ -1380,7 +1393,7 @@ msgstr ""
msgid "Running cmd (subprocess): %s"
msgstr ""
-#: nova/utils.py:231 nova/utils.py:309 nova/virt/powervm/common.py:83
+#: nova/utils.py:231 nova/utils.py:309 nova/virt/powervm/common.py:86
#, python-format
msgid "Result was %s"
msgstr ""
@@ -1451,21 +1464,36 @@ msgstr ""
msgid "Invalid server_string: %s"
msgstr ""
-#: nova/utils.py:978
+#: nova/utils.py:987
#, python-format
msgid "timefunc: '%(name)s' took %(total_time).2f secs"
msgstr ""
-#: nova/utils.py:1044
+#: nova/utils.py:1053
#, python-format
msgid "Reloading cached file %s"
msgstr ""
-#: nova/utils.py:1176 nova/virt/configdrive.py:176
+#: nova/utils.py:1185 nova/virt/configdrive.py:176
#, python-format
msgid "Could not remove tmpdir: %s"
msgstr ""
+#: nova/utils.py:1363
+#, python-format
+msgid "%s is not a string or unicode"
+msgstr ""
+
+#: nova/utils.py:1367
+#, python-format
+msgid "%(name)s has less than %(min_length)s characters."
+msgstr ""
+
+#: nova/utils.py:1372
+#, python-format
+msgid "%(name)s has more than %(max_length)s characters."
+msgstr ""
+
#: nova/wsgi.py:121
#, python-format
msgid "%(name)s listening on %(host)s:%(port)s"
@@ -2305,233 +2333,218 @@ msgid "Flavor '%s' could not be found "
msgstr ""
#: nova/api/openstack/compute/servers.py:560
-#: nova/api/openstack/compute/servers.py:738
-#: nova/api/openstack/compute/servers.py:1010
-#: nova/api/openstack/compute/servers.py:1116
-#: nova/api/openstack/compute/servers.py:1289
+#: nova/api/openstack/compute/servers.py:731
+#: nova/api/openstack/compute/servers.py:1003
+#: nova/api/openstack/compute/servers.py:1109
+#: nova/api/openstack/compute/servers.py:1282
msgid "Instance could not be found"
msgstr ""
-#: nova/api/openstack/compute/servers.py:567
-#, python-format
-msgid "%s is not a string or unicode"
-msgstr ""
-
-#: nova/api/openstack/compute/servers.py:571
-#, python-format
-msgid "%s is an empty string"
-msgstr ""
-
-#: nova/api/openstack/compute/servers.py:575
-#, python-format
-msgid "%(name)s can be at most %(max_length)s characters."
-msgstr ""
-
-#: nova/api/openstack/compute/servers.py:586
+#: nova/api/openstack/compute/servers.py:579
msgid "Device name cannot include spaces."
msgstr ""
-#: nova/api/openstack/compute/servers.py:603
+#: nova/api/openstack/compute/servers.py:596
#, python-format
msgid "Bad personality format: missing %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:606
+#: nova/api/openstack/compute/servers.py:599
msgid "Bad personality format"
msgstr ""
-#: nova/api/openstack/compute/servers.py:609
+#: nova/api/openstack/compute/servers.py:602
#, python-format
msgid "Personality content for %s cannot be decoded"
msgstr ""
-#: nova/api/openstack/compute/servers.py:640
+#: nova/api/openstack/compute/servers.py:633
msgid "Unknown argment : port"
msgstr ""
-#: nova/api/openstack/compute/servers.py:643
+#: nova/api/openstack/compute/servers.py:636
#, python-format
msgid "Bad port format: port uuid is not in proper format (%s)"
msgstr ""
-#: nova/api/openstack/compute/servers.py:653
+#: nova/api/openstack/compute/servers.py:646
#, python-format
msgid "Bad networks format: network uuid is not in proper format (%s)"
msgstr ""
-#: nova/api/openstack/compute/servers.py:663
+#: nova/api/openstack/compute/servers.py:656
#, python-format
msgid "Invalid fixed IP address (%s)"
msgstr ""
-#: nova/api/openstack/compute/servers.py:676
+#: nova/api/openstack/compute/servers.py:669
#, python-format
msgid "Duplicate networks (%s) are not allowed"
msgstr ""
-#: nova/api/openstack/compute/servers.py:682
+#: nova/api/openstack/compute/servers.py:675
#, python-format
msgid "Bad network format: missing %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:685
+#: nova/api/openstack/compute/servers.py:678
msgid "Bad networks format"
msgstr ""
-#: nova/api/openstack/compute/servers.py:711
+#: nova/api/openstack/compute/servers.py:704
msgid "Userdata content cannot be decoded"
msgstr ""
-#: nova/api/openstack/compute/servers.py:718
+#: nova/api/openstack/compute/servers.py:711
msgid "accessIPv4 is not proper IPv4 format"
msgstr ""
-#: nova/api/openstack/compute/servers.py:725
+#: nova/api/openstack/compute/servers.py:718
msgid "accessIPv6 is not proper IPv6 format"
msgstr ""
-#: nova/api/openstack/compute/servers.py:754
+#: nova/api/openstack/compute/servers.py:747
msgid "Server name is not defined"
msgstr ""
-#: nova/api/openstack/compute/servers.py:802
-#: nova/api/openstack/compute/servers.py:916
+#: nova/api/openstack/compute/servers.py:795
+#: nova/api/openstack/compute/servers.py:909
msgid "Invalid flavorRef provided."
msgstr ""
-#: nova/api/openstack/compute/servers.py:843
+#: nova/api/openstack/compute/servers.py:836
msgid "min_count must be an integer value"
msgstr ""
-#: nova/api/openstack/compute/servers.py:846
+#: nova/api/openstack/compute/servers.py:839
msgid "min_count must be > 0"
msgstr ""
-#: nova/api/openstack/compute/servers.py:852
+#: nova/api/openstack/compute/servers.py:845
msgid "max_count must be an integer value"
msgstr ""
-#: nova/api/openstack/compute/servers.py:855
+#: nova/api/openstack/compute/servers.py:848
msgid "max_count must be > 0"
msgstr ""
-#: nova/api/openstack/compute/servers.py:859
+#: nova/api/openstack/compute/servers.py:852
msgid "min_count must be <= max_count"
msgstr ""
-#: nova/api/openstack/compute/servers.py:911
+#: nova/api/openstack/compute/servers.py:904
msgid "Can not find requested image"
msgstr ""
-#: nova/api/openstack/compute/servers.py:919
+#: nova/api/openstack/compute/servers.py:912
msgid "Invalid key_name provided."
msgstr ""
-#: nova/api/openstack/compute/servers.py:998
+#: nova/api/openstack/compute/servers.py:991
msgid "HostId cannot be updated."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1002
+#: nova/api/openstack/compute/servers.py:995
msgid "Personality cannot be updated."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1028
-#: nova/api/openstack/compute/servers.py:1048
+#: nova/api/openstack/compute/servers.py:1021
+#: nova/api/openstack/compute/servers.py:1041
msgid "Instance has not been resized."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1034
+#: nova/api/openstack/compute/servers.py:1027
#, python-format
msgid "Error in confirm-resize %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1051
+#: nova/api/openstack/compute/servers.py:1044
msgid "Flavor used by the instance could not be found."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1057
+#: nova/api/openstack/compute/servers.py:1050
#, python-format
msgid "Error in revert-resize %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1070
+#: nova/api/openstack/compute/servers.py:1063
msgid "Argument 'type' for reboot is not HARD or SOFT"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1074
+#: nova/api/openstack/compute/servers.py:1067
msgid "Missing argument 'type' for reboot"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1087
+#: nova/api/openstack/compute/servers.py:1080
#, python-format
msgid "Error in reboot %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1099
+#: nova/api/openstack/compute/servers.py:1092
msgid "Unable to locate requested flavor."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1102
+#: nova/api/openstack/compute/servers.py:1095
msgid "Resize requires a flavor change."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1126
+#: nova/api/openstack/compute/servers.py:1119
msgid "Missing imageRef attribute"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1135
+#: nova/api/openstack/compute/servers.py:1128
msgid "Invalid imageRef provided."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1162
+#: nova/api/openstack/compute/servers.py:1155
msgid "Missing flavorRef attribute"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1175
+#: nova/api/openstack/compute/servers.py:1168
msgid "No adminPass was specified"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1179
-#: nova/api/openstack/compute/servers.py:1386
+#: nova/api/openstack/compute/servers.py:1172
+#: nova/api/openstack/compute/servers.py:1379
msgid "Invalid adminPass"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1185
+#: nova/api/openstack/compute/servers.py:1178
msgid "Unable to set password on instance"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1194
+#: nova/api/openstack/compute/servers.py:1187
msgid "Unable to parse metadata key/value pairs."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1207
+#: nova/api/openstack/compute/servers.py:1200
msgid "Resize request has invalid 'flavorRef' attribute."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1210
+#: nova/api/openstack/compute/servers.py:1203
msgid "Resize requests require 'flavorRef' attribute."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1228
+#: nova/api/openstack/compute/servers.py:1221
#: nova/api/openstack/compute/contrib/aggregates.py:143
#: nova/api/openstack/compute/contrib/coverage_ext.py:256
#: nova/api/openstack/compute/contrib/keypairs.py:78
msgid "Invalid request body"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1234
+#: nova/api/openstack/compute/servers.py:1227
msgid "Could not parse imageRef from request."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1296
+#: nova/api/openstack/compute/servers.py:1289
msgid "Cannot find image for rebuild"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1329
+#: nova/api/openstack/compute/servers.py:1322
msgid "createImage entity requires name attribute"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1413
+#: nova/api/openstack/compute/servers.py:1406
#, python-format
msgid "Removing options '%(unk_opt_str)s' from query"
msgstr ""
@@ -2569,7 +2582,7 @@ msgstr ""
#: nova/api/openstack/compute/contrib/admin_actions.py:154
#: nova/api/openstack/compute/contrib/admin_actions.py:170
#: nova/api/openstack/compute/contrib/admin_actions.py:186
-#: nova/api/openstack/compute/contrib/admin_actions.py:312
+#: nova/api/openstack/compute/contrib/admin_actions.py:314
#: nova/api/openstack/compute/contrib/multinic.py:41
#: nova/api/openstack/compute/contrib/rescue.py:44
msgid "Server not found"
@@ -2618,17 +2631,17 @@ msgstr ""
msgid "host and block_migration must be specified."
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:286
+#: nova/api/openstack/compute/contrib/admin_actions.py:288
#, python-format
msgid "Live migration of instance %(id)s to host %(host)s failed"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:304
+#: nova/api/openstack/compute/contrib/admin_actions.py:306
#, python-format
msgid "Desired state must be specified. Valid states are: %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:315
+#: nova/api/openstack/compute/contrib/admin_actions.py:317
#, python-format
msgid "Compute.api::resetState %s"
msgstr ""
@@ -3069,10 +3082,15 @@ msgstr ""
msgid "Create networks failed"
msgstr ""
-#: nova/api/openstack/compute/contrib/quotas.py:63
+#: nova/api/openstack/compute/contrib/quotas.py:65
msgid "Quota limit must be -1 or greater."
msgstr ""
+#: nova/api/openstack/compute/contrib/quotas.py:96
+#, python-format
+msgid "Quota for %s should be integer."
+msgstr ""
+
#: nova/api/openstack/compute/contrib/scheduler_hints.py:41
msgid "Malformed scheduler_hints attribute"
msgstr ""
@@ -3111,6 +3129,10 @@ msgstr ""
msgid "stop instance"
msgstr ""
+#: nova/api/openstack/compute/contrib/simple_tenant_usage.py:210
+msgid "Invalid start time. The start time cannot occur after the end time."
+msgstr ""
+
#: nova/api/openstack/compute/contrib/volumes.py:76
#, python-format
msgid "vol=%s"
@@ -3426,94 +3448,94 @@ msgstr ""
msgid "Volume must be attached in order to detach."
msgstr ""
-#: nova/compute/api.py:2367
+#: nova/compute/api.py:2369
#, python-format
msgid "Going to try to live migrate instance to %s"
msgstr ""
-#: nova/compute/api.py:2386
+#: nova/compute/api.py:2388
msgid "vm evacuation scheduled"
msgstr ""
-#: nova/compute/api.py:2390
+#: nova/compute/api.py:2392
#, python-format
msgid ""
"Instance compute service state on %(host)s expected to be down, but it "
"was up."
msgstr ""
-#: nova/compute/api.py:2607
+#: nova/compute/api.py:2612
msgid "Keypair name contains unsafe characters"
msgstr ""
-#: nova/compute/api.py:2611
+#: nova/compute/api.py:2616
msgid "Keypair name must be between 1 and 255 characters long"
msgstr ""
-#: nova/compute/api.py:2712
+#: nova/compute/api.py:2717
#, python-format
msgid "Security group %s is not a string or unicode"
msgstr ""
-#: nova/compute/api.py:2715
+#: nova/compute/api.py:2720
#, python-format
msgid "Security group %s cannot be empty."
msgstr ""
-#: nova/compute/api.py:2723
+#: nova/compute/api.py:2728
#, python-format
msgid ""
"Value (%(value)s) for parameter Group%(property)s is invalid. Content "
"limited to '%(allowed)'."
msgstr ""
-#: nova/compute/api.py:2729
+#: nova/compute/api.py:2734
#, python-format
msgid "Security group %s should not be greater than 255 characters."
msgstr ""
-#: nova/compute/api.py:2749
+#: nova/compute/api.py:2754
msgid "Quota exceeded, too many security groups."
msgstr ""
-#: nova/compute/api.py:2752
+#: nova/compute/api.py:2757
#, python-format
msgid "Create Security Group %s"
msgstr ""
-#: nova/compute/api.py:2759
+#: nova/compute/api.py:2764
#, python-format
msgid "Security group %s already exists"
msgstr ""
-#: nova/compute/api.py:2824
+#: nova/compute/api.py:2829
msgid "Security group is still in use"
msgstr ""
-#: nova/compute/api.py:2832
+#: nova/compute/api.py:2837
msgid "Failed to update usages deallocating security group"
msgstr ""
-#: nova/compute/api.py:2835
+#: nova/compute/api.py:2840
#, python-format
msgid "Delete security group %s"
msgstr ""
-#: nova/compute/api.py:3093
+#: nova/compute/api.py:3098
#, python-format
msgid "Rule (%s) not found"
msgstr ""
-#: nova/compute/api.py:3102
+#: nova/compute/api.py:3107
msgid "Quota exceeded, too many security group rules."
msgstr ""
-#: nova/compute/api.py:3105
+#: nova/compute/api.py:3110
#, python-format
msgid "Authorize security group ingress %s"
msgstr ""
-#: nova/compute/api.py:3116
+#: nova/compute/api.py:3121
#, python-format
msgid "Revoke security group ingress %s"
msgstr ""
@@ -3572,34 +3594,34 @@ msgid ""
"%(requested)d %(unit)s"
msgstr ""
-#: nova/compute/instance_types.py:92
+#: nova/compute/instance_types.py:95
msgid "names can only contain [a-zA-Z0-9_.- ]"
msgstr ""
-#: nova/compute/instance_types.py:101
+#: nova/compute/instance_types.py:104
#, python-format
msgid "'%s' argument must be a positive integer"
msgstr ""
-#: nova/compute/instance_types.py:109
+#: nova/compute/instance_types.py:112
msgid "'rxtx_factor' argument must be a positive float"
msgstr ""
-#: nova/compute/instance_types.py:117
+#: nova/compute/instance_types.py:120
#, python-format
msgid "'%s' argument must be greater than 0"
msgstr ""
-#: nova/compute/instance_types.py:127
+#: nova/compute/instance_types.py:130
msgid "is_public must be a boolean"
msgstr ""
-#: nova/compute/instance_types.py:134
+#: nova/compute/instance_types.py:137
#, python-format
msgid "DB error: %s"
msgstr ""
-#: nova/compute/instance_types.py:144
+#: nova/compute/instance_types.py:147
#, python-format
msgid "Instance type %s not found for deletion"
msgstr ""
@@ -3666,495 +3688,491 @@ msgstr ""
msgid "Setting up bdm %s"
msgstr ""
-#: nova/compute/manager.py:682 nova/compute/manager.py:1997
+#: nova/compute/manager.py:683 nova/compute/manager.py:2016
#, python-format
msgid "No node specified, defaulting to %(node)s"
msgstr ""
-#: nova/compute/manager.py:725
+#: nova/compute/manager.py:726
msgid "Failed to dealloc network for deleted instance"
msgstr ""
-#: nova/compute/manager.py:750
+#: nova/compute/manager.py:751
#, python-format
msgid "Error: %s"
msgstr ""
-#: nova/compute/manager.py:785 nova/compute/manager.py:2051
+#: nova/compute/manager.py:786 nova/compute/manager.py:2070
msgid "Error trying to reschedule"
msgstr ""
-#: nova/compute/manager.py:803
+#: nova/compute/manager.py:804
msgid "Retry info not present, will not reschedule"
msgstr ""
-#: nova/compute/manager.py:808
+#: nova/compute/manager.py:809
msgid "No request spec, will not reschedule"
msgstr ""
-#: nova/compute/manager.py:814
+#: nova/compute/manager.py:815
#, python-format
msgid "Re-scheduling %(method)s: attempt %(num)d"
msgstr ""
-#: nova/compute/manager.py:842
+#: nova/compute/manager.py:843
msgid "Instance build timed out. Set to error state."
msgstr ""
-#: nova/compute/manager.py:918
+#: nova/compute/manager.py:919
#, python-format
msgid ""
"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, "
"allowed_size_bytes=%(allowed_size_bytes)d"
msgstr ""
-#: nova/compute/manager.py:924
+#: nova/compute/manager.py:925
#, python-format
msgid ""
"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed "
"size %(allowed_size_bytes)d"
msgstr ""
-#: nova/compute/manager.py:934
+#: nova/compute/manager.py:935
msgid "Starting instance..."
msgstr ""
-#: nova/compute/manager.py:956
+#: nova/compute/manager.py:957
msgid "Instance failed network setup"
msgstr ""
-#: nova/compute/manager.py:960
+#: nova/compute/manager.py:961
#, python-format
msgid "Instance network_info: |%s|"
msgstr ""
-#: nova/compute/manager.py:970
+#: nova/compute/manager.py:971
msgid "Instance failed block device setup"
msgstr ""
-#: nova/compute/manager.py:987
+#: nova/compute/manager.py:988
msgid "Instance failed to spawn"
msgstr ""
-#: nova/compute/manager.py:1011
+#: nova/compute/manager.py:1012
msgid "Deallocating network for instance"
msgstr ""
-#: nova/compute/manager.py:1087
+#: nova/compute/manager.py:1088
#, python-format
msgid "%(action_str)s instance"
msgstr ""
-#: nova/compute/manager.py:1118
+#: nova/compute/manager.py:1119
#, python-format
msgid "Ignoring DiskNotFound: %s"
msgstr ""
-#: nova/compute/manager.py:1121
+#: nova/compute/manager.py:1122
#, python-format
msgid "Ignoring VolumeNotFound: %s"
msgstr ""
-#: nova/compute/manager.py:1128
+#: nova/compute/manager.py:1129
#, python-format
msgid "terminating bdm %s"
msgstr ""
-#: nova/compute/manager.py:1153
+#: nova/compute/manager.py:1154
#, python-format
msgid "Ignoring volume cleanup failure due to %s"
msgstr ""
-#: nova/compute/manager.py:1192 nova/compute/manager.py:2232
-#: nova/compute/manager.py:3618
+#: nova/compute/manager.py:1193 nova/compute/manager.py:2245
+#: nova/compute/manager.py:3635
#, python-format
msgid "%s. Setting instance vm_state to ERROR"
msgstr ""
-#: nova/compute/manager.py:1332
+#: nova/compute/manager.py:1333
msgid "Rebuilding instance"
msgstr ""
-#: nova/compute/manager.py:1345
+#: nova/compute/manager.py:1346
msgid "Invalid state of instance files on shared storage"
msgstr ""
-#: nova/compute/manager.py:1349
+#: nova/compute/manager.py:1350
msgid "disk on shared storage, recreating using existing disk"
msgstr ""
-#: nova/compute/manager.py:1353
+#: nova/compute/manager.py:1354
#, python-format
msgid "disk not on shared storagerebuilding from: '%s'"
msgstr ""
-#: nova/compute/manager.py:1437
+#: nova/compute/manager.py:1438
#, python-format
msgid "bringing vm to original state: '%s'"
msgstr ""
-#: nova/compute/manager.py:1461
+#: nova/compute/manager.py:1462
msgid "Rebooting instance"
msgstr ""
-#: nova/compute/manager.py:1480
+#: nova/compute/manager.py:1481
#, python-format
msgid ""
"trying to reboot a non-running instance: (state: %(state)s expected: "
"%(running)s)"
msgstr ""
-#: nova/compute/manager.py:1490
+#: nova/compute/manager.py:1491
#, python-format
msgid "Cannot reboot instance: %(exc)s"
msgstr ""
-#: nova/compute/manager.py:1526
+#: nova/compute/manager.py:1527
msgid "instance snapshotting"
msgstr ""
-#: nova/compute/manager.py:1532
+#: nova/compute/manager.py:1533
#, python-format
msgid ""
"trying to snapshot a non-running instance: (state: %(state)s expected: "
"%(running)s)"
msgstr ""
-#: nova/compute/manager.py:1593
+#: nova/compute/manager.py:1594
#, python-format
msgid "Found %(num_images)d images (rotation: %(rotation)d)"
msgstr ""
-#: nova/compute/manager.py:1600
+#: nova/compute/manager.py:1601
#, python-format
msgid "Rotating out %d backups"
msgstr ""
-#: nova/compute/manager.py:1605
+#: nova/compute/manager.py:1606
#, python-format
msgid "Deleting image %s"
msgstr ""
-#: nova/compute/manager.py:1633
+#: nova/compute/manager.py:1634
#, python-format
msgid "Failed to set admin password. Instance %s is not running"
msgstr ""
-#: nova/compute/manager.py:1640
+#: nova/compute/manager.py:1641
msgid "Root password set"
msgstr ""
-#: nova/compute/manager.py:1647
+#: nova/compute/manager.py:1648
msgid "set_admin_password is not implemented by this driver or guest instance."
msgstr ""
-#: nova/compute/manager.py:1662
+#: nova/compute/manager.py:1663
#, python-format
msgid "set_admin_password failed: %s"
msgstr ""
-#: nova/compute/manager.py:1669
+#: nova/compute/manager.py:1670
msgid "error setting admin password"
msgstr ""
-#: nova/compute/manager.py:1682
+#: nova/compute/manager.py:1683
#, python-format
msgid ""
"trying to inject a file into a non-running (state: "
"%(current_power_state)s expected: %(expected_state)s)"
msgstr ""
-#: nova/compute/manager.py:1686
+#: nova/compute/manager.py:1687
#, python-format
msgid "injecting file to %(path)s"
msgstr ""
-#: nova/compute/manager.py:1706
+#: nova/compute/manager.py:1707
msgid ""
"Unable to find a different image to use for rescue VM, using instance's "
"current image"
msgstr ""
-#: nova/compute/manager.py:1720
+#: nova/compute/manager.py:1721
msgid "Rescuing"
msgstr ""
-#: nova/compute/manager.py:1755
+#: nova/compute/manager.py:1756
msgid "Unrescuing"
msgstr ""
-#: nova/compute/manager.py:1776
+#: nova/compute/manager.py:1777
#, python-format
msgid "Changing instance metadata according to %(diff)r"
msgstr ""
-#: nova/compute/manager.py:1955
+#: nova/compute/manager.py:1974
msgid "Instance has no source host"
msgstr ""
-#: nova/compute/manager.py:1961
+#: nova/compute/manager.py:1980
msgid "destination same as source!"
msgstr ""
-#: nova/compute/manager.py:1978
+#: nova/compute/manager.py:1997
msgid "Migrating"
msgstr ""
-#: nova/compute/manager.py:2229
+#: nova/compute/manager.py:2242
#, python-format
msgid "Failed to rollback quota for failed finish_resize: %(qr_error)s"
msgstr ""
-#: nova/compute/manager.py:2284
+#: nova/compute/manager.py:2297
msgid "Pausing"
msgstr ""
-#: nova/compute/manager.py:2302
+#: nova/compute/manager.py:2315
msgid "Unpausing"
msgstr ""
-#: nova/compute/manager.py:2340
+#: nova/compute/manager.py:2353
msgid "Retrieving diagnostics"
msgstr ""
-#: nova/compute/manager.py:2371
+#: nova/compute/manager.py:2384
msgid "Resuming"
msgstr ""
-#: nova/compute/manager.py:2391
+#: nova/compute/manager.py:2404
msgid "Reset network"
msgstr ""
-#: nova/compute/manager.py:2396
+#: nova/compute/manager.py:2409
msgid "Inject network info"
msgstr ""
-#: nova/compute/manager.py:2399
+#: nova/compute/manager.py:2412
#, python-format
msgid "network_info to inject: |%s|"
msgstr ""
-#: nova/compute/manager.py:2416
+#: nova/compute/manager.py:2429
msgid "Get console output"
msgstr ""
-#: nova/compute/manager.py:2441
+#: nova/compute/manager.py:2454
msgid "Getting vnc console"
msgstr ""
-#: nova/compute/manager.py:2469
+#: nova/compute/manager.py:2482
msgid "Getting spice console"
msgstr ""
-#: nova/compute/manager.py:2499
+#: nova/compute/manager.py:2512
#, python-format
msgid "Booting with volume %(volume_id)s at %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2550
+#: nova/compute/manager.py:2563
#, python-format
msgid "Attaching volume %(volume_id)s to %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2559
+#: nova/compute/manager.py:2572
#, python-format
msgid ""
"Failed to connect to volume %(volume_id)s while attaching at "
"%(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2574
+#: nova/compute/manager.py:2587
#, python-format
msgid "Failed to attach volume %(volume_id)s at %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2604
+#: nova/compute/manager.py:2617
#, python-format
msgid "Detach volume %(volume_id)s from mountpoint %(mp)s"
msgstr ""
-#: nova/compute/manager.py:2614
+#: nova/compute/manager.py:2627
msgid "Detaching volume from unknown instance"
msgstr ""
-#: nova/compute/manager.py:2621
+#: nova/compute/manager.py:2634
#, python-format
msgid "Failed to detach volume %(volume_id)s from %(mp)s"
msgstr ""
-#: nova/compute/manager.py:2645
+#: nova/compute/manager.py:2658
msgid "Updating volume usage cache with totals"
msgstr ""
-#: nova/compute/manager.py:2682
+#: nova/compute/manager.py:2695
#, python-format
msgid "Host %(host)s not found"
msgstr ""
-#: nova/compute/manager.py:2755
-msgid "Instance has no volume."
-msgstr ""
-
-#: nova/compute/manager.py:2816
+#: nova/compute/manager.py:2824
#, python-format
msgid "Pre live migration failed at %(dest)s"
msgstr ""
-#: nova/compute/manager.py:2844
+#: nova/compute/manager.py:2852
msgid "_post_live_migration() is started.."
msgstr ""
-#: nova/compute/manager.py:2899
+#: nova/compute/manager.py:2907
#, python-format
msgid "Migrating instance to %(dest)s finished successfully."
msgstr ""
-#: nova/compute/manager.py:2901
+#: nova/compute/manager.py:2909
msgid ""
"You may see the error \"libvirt: QEMU error: Domain not found: no domain "
"with matching name.\" This error can be safely ignored."
msgstr ""
-#: nova/compute/manager.py:2915
+#: nova/compute/manager.py:2923
msgid "Post operation of migration started"
msgstr ""
-#: nova/compute/manager.py:3055
+#: nova/compute/manager.py:3063
msgid "Updated the info_cache for instance"
msgstr ""
-#: nova/compute/manager.py:3100
+#: nova/compute/manager.py:3108
#, python-format
msgid ""
"Found %(migration_count)d unconfirmed migrations older than "
"%(confirm_window)d seconds"
msgstr ""
-#: nova/compute/manager.py:3106
+#: nova/compute/manager.py:3114
#, python-format
msgid "Setting migration %(migration_id)s to error: %(reason)s"
msgstr ""
-#: nova/compute/manager.py:3115
+#: nova/compute/manager.py:3123
#, python-format
msgid ""
"Automatically confirming migration %(migration_id)s for instance "
"%(instance_uuid)s"
msgstr ""
-#: nova/compute/manager.py:3122
+#: nova/compute/manager.py:3130
#, python-format
msgid "Instance %(instance_uuid)s not found"
msgstr ""
-#: nova/compute/manager.py:3126
+#: nova/compute/manager.py:3134
msgid "In ERROR state"
msgstr ""
-#: nova/compute/manager.py:3133
+#: nova/compute/manager.py:3141
#, python-format
msgid "In states %(vm_state)s/%(task_state)s, not RESIZED/None"
msgstr ""
-#: nova/compute/manager.py:3141
+#: nova/compute/manager.py:3149
#, python-format
msgid "Error auto-confirming resize: %(e)s. Will retry later."
msgstr ""
-#: nova/compute/manager.py:3158
+#: nova/compute/manager.py:3166
#, python-format
msgid ""
"Running instance usage audit for host %(host)s from %(begin_time)s to "
"%(end_time)s. %(number_instances)s instances."
msgstr ""
-#: nova/compute/manager.py:3177
+#: nova/compute/manager.py:3185
#, python-format
msgid "Failed to generate usage audit for instance on host %s"
msgstr ""
-#: nova/compute/manager.py:3201
+#: nova/compute/manager.py:3209
msgid "Updating bandwidth usage cache"
msgstr ""
-#: nova/compute/manager.py:3319
+#: nova/compute/manager.py:3327
msgid "Updating volume usage cache"
msgstr ""
-#: nova/compute/manager.py:3337
+#: nova/compute/manager.py:3345
msgid "Updating host status"
msgstr ""
-#: nova/compute/manager.py:3366
+#: nova/compute/manager.py:3371
#, python-format
msgid ""
"Found %(num_db_instances)s in the database and %(num_vm_instances)s on "
"the hypervisor."
msgstr ""
-#: nova/compute/manager.py:3372 nova/compute/manager.py:3410
+#: nova/compute/manager.py:3376 nova/compute/manager.py:3425
msgid "During sync_power_state the instance has a pending task. Skip."
msgstr ""
-#: nova/compute/manager.py:3397
+#: nova/compute/manager.py:3412
#, python-format
msgid ""
"During the sync_power process the instance has moved from host %(src)s to"
" host %(dst)s"
msgstr ""
-#: nova/compute/manager.py:3433
+#: nova/compute/manager.py:3450
msgid "Instance shutdown by itself. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:3445 nova/compute/manager.py:3454
-#: nova/compute/manager.py:3484
+#: nova/compute/manager.py:3462 nova/compute/manager.py:3471
+#: nova/compute/manager.py:3501
msgid "error during stop() in sync_power_state."
msgstr ""
-#: nova/compute/manager.py:3449
+#: nova/compute/manager.py:3466
msgid "Instance is suspended unexpectedly. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:3465
+#: nova/compute/manager.py:3482
msgid "Instance is paused unexpectedly. Ignore."
msgstr ""
-#: nova/compute/manager.py:3471
+#: nova/compute/manager.py:3488
msgid "Instance is unexpectedly not found. Ignore."
msgstr ""
-#: nova/compute/manager.py:3477
+#: nova/compute/manager.py:3494
msgid "Instance is not stopped. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:3493
+#: nova/compute/manager.py:3510
msgid "Instance is not (soft-)deleted."
msgstr ""
-#: nova/compute/manager.py:3501
+#: nova/compute/manager.py:3518
msgid "CONF.reclaim_instance_interval <= 0, skipping..."
msgstr ""
-#: nova/compute/manager.py:3516
+#: nova/compute/manager.py:3533
msgid "Reclaiming deleted instance"
msgstr ""
-#: nova/compute/manager.py:3571
+#: nova/compute/manager.py:3588
#, python-format
msgid ""
"Detected instance with name label '%(name)s' which is marked as DELETED "
"but still present on host."
msgstr ""
-#: nova/compute/manager.py:3578
+#: nova/compute/manager.py:3595
#, python-format
msgid ""
"Destroying instance with name label '%(name)s' which is marked as DELETED"
" but still present on host."
msgstr ""
-#: nova/compute/manager.py:3585
+#: nova/compute/manager.py:3602
#, python-format
msgid "Unrecognized value '%(action)s' for CONF.running_deleted_instance_action"
msgstr ""
@@ -4273,18 +4291,18 @@ msgstr ""
msgid "Using %(prefix)s instead of %(req_prefix)s"
msgstr ""
-#: nova/conductor/api.py:363
+#: nova/conductor/api.py:366
msgid ""
"Timed out waiting for nova-conductor. Is it running? Or did this service "
"start before nova-conductor?"
msgstr ""
-#: nova/conductor/manager.py:80
+#: nova/conductor/manager.py:87
#, python-format
msgid "Instance update attempted for '%(key)s' on %(instance_uuid)s"
msgstr ""
-#: nova/conductor/manager.py:245
+#: nova/conductor/manager.py:252
msgid "Invalid block_device_mapping_destroy invocation"
msgstr ""
@@ -4378,23 +4396,23 @@ msgstr ""
msgid "Failed to notify cells of instance fault"
msgstr ""
-#: nova/db/sqlalchemy/api.py:194
+#: nova/db/sqlalchemy/api.py:202
msgid "model or base_model parameter should be subclass of NovaBase"
msgstr ""
-#: nova/db/sqlalchemy/api.py:207 nova/virt/baremetal/db/sqlalchemy/api.py:60
+#: nova/db/sqlalchemy/api.py:215 nova/virt/baremetal/db/sqlalchemy/api.py:60
#, python-format
msgid "Unrecognized read_deleted value '%s'"
msgstr ""
-#: nova/db/sqlalchemy/api.py:1389
+#: nova/db/sqlalchemy/api.py:1397
#, python-format
msgid ""
"Unknown osapi_compute_unique_server_name_scope value: %s Flag must be "
"empty, \"global\" or \"project\""
msgstr ""
-#: nova/db/sqlalchemy/api.py:2738
+#: nova/db/sqlalchemy/api.py:2747
#, python-format
msgid "Change will make usage less than 0 for the following resources: %(unders)s"
msgstr ""
@@ -4428,6 +4446,7 @@ msgid "Exception while seeding instance_types table"
msgstr ""
#: nova/db/sqlalchemy/migrate_repo/versions/133_folsom.py:927
+#: nova/db/sqlalchemy/migrate_repo/versions/154_add_shadow_tables.py:58
msgid "Exception while creating table."
msgstr ""
@@ -4439,6 +4458,11 @@ msgstr ""
msgid "volume_usage_cache table not dropped"
msgstr ""
+#: nova/db/sqlalchemy/migrate_repo/versions/154_add_shadow_tables.py:77
+#, python-format
+msgid "table '%s' not dropped"
+msgstr ""
+
#: nova/image/glance.py:187
#, python-format
msgid ""
@@ -4527,69 +4551,69 @@ msgstr ""
msgid "Loading network driver '%s'"
msgstr ""
-#: nova/network/floating_ips.py:85
+#: nova/network/floating_ips.py:86
#, python-format
msgid "Fixed ip %(fixed_ip_id)s not found"
msgstr ""
-#: nova/network/floating_ips.py:95 nova/network/floating_ips.py:368
+#: nova/network/floating_ips.py:96 nova/network/floating_ips.py:372
#, python-format
msgid "Interface %(interface)s not found"
msgstr ""
-#: nova/network/floating_ips.py:118
+#: nova/network/floating_ips.py:120
#, python-format
msgid "floating IP allocation for instance |%(floating_address)s|"
msgstr ""
-#: nova/network/floating_ips.py:173
+#: nova/network/floating_ips.py:177
msgid "Floating IP is not associated. Ignore."
msgstr ""
-#: nova/network/floating_ips.py:191
+#: nova/network/floating_ips.py:195
#, python-format
msgid "Address |%(address)s| is not allocated"
msgstr ""
-#: nova/network/floating_ips.py:195
+#: nova/network/floating_ips.py:199
#, python-format
msgid "Address |%(address)s| is not allocated to your project |%(project)s|"
msgstr ""
-#: nova/network/floating_ips.py:215
+#: nova/network/floating_ips.py:219
#, python-format
msgid "Quota exceeded for %(pid)s, tried to allocate floating IP"
msgstr ""
-#: nova/network/floating_ips.py:276
+#: nova/network/floating_ips.py:280
msgid "Failed to update usages deallocating floating IP"
msgstr ""
-#: nova/network/floating_ips.py:525
+#: nova/network/floating_ips.py:529
#, python-format
msgid "Starting migration network for instance %(instance_uuid)s"
msgstr ""
-#: nova/network/floating_ips.py:532
+#: nova/network/floating_ips.py:536
#, python-format
msgid ""
"Floating ip address |%(address)s| no longer belongs to instance "
"%(instance_uuid)s. Will notmigrate it "
msgstr ""
-#: nova/network/floating_ips.py:563
+#: nova/network/floating_ips.py:567
#, python-format
msgid "Finishing migration network for instance %(instance_uuid)s"
msgstr ""
-#: nova/network/floating_ips.py:571
+#: nova/network/floating_ips.py:575
#, python-format
msgid ""
"Floating ip address |%(address)s| no longer belongs to instance "
"%(instance_uuid)s. Will notsetup it."
msgstr ""
-#: nova/network/floating_ips.py:619
+#: nova/network/floating_ips.py:623
#, python-format
msgid ""
"Database inconsistency: DNS domain |%s| is registered in the Nova db but "
@@ -4597,12 +4621,12 @@ msgid ""
"ignored."
msgstr ""
-#: nova/network/floating_ips.py:659
+#: nova/network/floating_ips.py:663
#, python-format
msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|."
msgstr ""
-#: nova/network/floating_ips.py:668
+#: nova/network/floating_ips.py:672
#, python-format
msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|."
msgstr ""
@@ -4623,122 +4647,122 @@ msgstr ""
msgid "This driver only supports type 'a' entries."
msgstr ""
-#: nova/network/ldapdns.py:364 nova/network/minidns.py:171
+#: nova/network/ldapdns.py:364
msgid "This shouldn't be getting called except during testing."
msgstr ""
-#: nova/network/linux_net.py:194
+#: nova/network/linux_net.py:208
#, python-format
msgid "Attempted to remove chain %s which does not exist"
msgstr ""
-#: nova/network/linux_net.py:229
+#: nova/network/linux_net.py:243
#, python-format
msgid "Unknown chain: %r"
msgstr ""
-#: nova/network/linux_net.py:254
+#: nova/network/linux_net.py:268
#, python-format
msgid ""
"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r "
"%(top)r"
msgstr ""
-#: nova/network/linux_net.py:391
+#: nova/network/linux_net.py:405
msgid "IPTablesManager.apply completed with success"
msgstr ""
-#: nova/network/linux_net.py:619
+#: nova/network/linux_net.py:654
#, python-format
msgid "arping error for ip %s"
msgstr ""
-#: nova/network/linux_net.py:896
+#: nova/network/linux_net.py:934
#, python-format
msgid "Pid %d is stale, skip killing dnsmasq"
msgstr ""
-#: nova/network/linux_net.py:941
+#: nova/network/linux_net.py:979
#, python-format
msgid "Hupping dnsmasq threw %s"
msgstr ""
-#: nova/network/linux_net.py:943
+#: nova/network/linux_net.py:981
#, python-format
msgid "Pid %d is stale, relaunching dnsmasq"
msgstr ""
-#: nova/network/linux_net.py:1008
+#: nova/network/linux_net.py:1056
#, python-format
msgid "killing radvd threw %s"
msgstr ""
-#: nova/network/linux_net.py:1010
+#: nova/network/linux_net.py:1058
#, python-format
msgid "Pid %d is stale, relaunching radvd"
msgstr ""
-#: nova/network/linux_net.py:1142
+#: nova/network/linux_net.py:1190
#, python-format
msgid "Error clearing stale veth %s"
msgstr ""
-#: nova/network/linux_net.py:1301
+#: nova/network/linux_net.py:1349
#, python-format
-msgid "Starting VLAN inteface %s"
+msgid "Starting VLAN interface %s"
msgstr ""
-#: nova/network/linux_net.py:1332
+#: nova/network/linux_net.py:1380
#, python-format
msgid "Failed unplugging VLAN interface '%s'"
msgstr ""
-#: nova/network/linux_net.py:1335
+#: nova/network/linux_net.py:1383
#, python-format
msgid "Unplugged VLAN interface '%s'"
msgstr ""
-#: nova/network/linux_net.py:1357
+#: nova/network/linux_net.py:1405
#, python-format
msgid "Starting Bridge %s"
msgstr ""
-#: nova/network/linux_net.py:1369
+#: nova/network/linux_net.py:1417
#, python-format
msgid "Adding interface %(interface)s to bridge %(bridge)s"
msgstr ""
-#: nova/network/linux_net.py:1402
+#: nova/network/linux_net.py:1450
#, python-format
msgid "Failed to add interface: %s"
msgstr ""
-#: nova/network/linux_net.py:1438
+#: nova/network/linux_net.py:1486
#, python-format
msgid "Failed unplugging bridge interface '%s'"
msgstr ""
-#: nova/network/linux_net.py:1441
+#: nova/network/linux_net.py:1489
#, python-format
msgid "Unplugged bridge interface '%s'"
msgstr ""
-#: nova/network/linux_net.py:1610
+#: nova/network/linux_net.py:1658
#, python-format
msgid "Starting bridge %s "
msgstr ""
-#: nova/network/linux_net.py:1618
+#: nova/network/linux_net.py:1666
#, python-format
msgid "Done starting bridge %s"
msgstr ""
-#: nova/network/linux_net.py:1637
+#: nova/network/linux_net.py:1685
#, python-format
msgid "Failed unplugging gateway interface '%s'"
msgstr ""
-#: nova/network/linux_net.py:1639
+#: nova/network/linux_net.py:1687
#, python-format
msgid "Unplugged gateway interface '%s'"
msgstr ""
@@ -4752,20 +4776,20 @@ msgstr ""
msgid "setting network host"
msgstr ""
-#: nova/network/manager.py:484
+#: nova/network/manager.py:485
msgid "network allocations"
msgstr ""
-#: nova/network/manager.py:491
+#: nova/network/manager.py:492
#, python-format
msgid "networks retrieved for instance: |%(networks_list)s|"
msgstr ""
-#: nova/network/manager.py:536
+#: nova/network/manager.py:540
msgid "network deallocation for instance"
msgstr ""
-#: nova/network/manager.py:782
+#: nova/network/manager.py:803
#, python-format
msgid ""
"instance-dns-zone is |%(domain)s|, which is in availability zone "
@@ -4773,89 +4797,89 @@ msgid ""
"created."
msgstr ""
-#: nova/network/manager.py:870
+#: nova/network/manager.py:895
#, python-format
msgid "Unable to release %s because vif doesn't exist."
msgstr ""
-#: nova/network/manager.py:889
+#: nova/network/manager.py:914
#, python-format
msgid "Leased IP |%(address)s|"
msgstr ""
-#: nova/network/manager.py:893
+#: nova/network/manager.py:918
#, python-format
msgid "IP %s leased that is not associated"
msgstr ""
-#: nova/network/manager.py:901
+#: nova/network/manager.py:927
#, python-format
msgid "IP |%s| leased that isn't allocated"
msgstr ""
-#: nova/network/manager.py:906
+#: nova/network/manager.py:932
#, python-format
msgid "Released IP |%(address)s|"
msgstr ""
-#: nova/network/manager.py:910
+#: nova/network/manager.py:936
#, python-format
msgid "IP %s released that is not associated"
msgstr ""
-#: nova/network/manager.py:913
+#: nova/network/manager.py:940
#, python-format
msgid "IP %s released that was not leased"
msgstr ""
-#: nova/network/manager.py:932
+#: nova/network/manager.py:959
#, python-format
msgid "%s must be an integer"
msgstr ""
-#: nova/network/manager.py:956
+#: nova/network/manager.py:983
msgid "Maximum allowed length for 'label' is 255."
msgstr ""
-#: nova/network/manager.py:976
+#: nova/network/manager.py:1003
#, python-format
msgid ""
"Subnet(s) too large, defaulting to /%s. To override, specify "
"network_size flag."
msgstr ""
-#: nova/network/manager.py:1057
+#: nova/network/manager.py:1084
msgid "cidr already in use"
msgstr ""
-#: nova/network/manager.py:1060
+#: nova/network/manager.py:1087
#, python-format
msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)"
msgstr ""
-#: nova/network/manager.py:1071
+#: nova/network/manager.py:1098
#, python-format
msgid ""
"requested cidr (%(cidr)s) conflicts with existing smaller cidr "
"(%(smaller)s)"
msgstr ""
-#: nova/network/manager.py:1128
+#: nova/network/manager.py:1155
msgid "Network already exists!"
msgstr ""
-#: nova/network/manager.py:1147
+#: nova/network/manager.py:1174
#, python-format
msgid "Network must be disassociated from project %s before delete"
msgstr ""
-#: nova/network/manager.py:1686
+#: nova/network/manager.py:1716
msgid ""
"The sum between the number of networks and the vlan start cannot be "
"greater than 4094"
msgstr ""
-#: nova/network/manager.py:1693
+#: nova/network/manager.py:1723
#, python-format
msgid ""
"The network range is not big enough to fit %(num_networks)s networks. "
@@ -4880,7 +4904,7 @@ msgstr ""
msgid "Cannot delete entry |%s|"
msgstr ""
-#: nova/network/minidns.py:208
+#: nova/network/minidns.py:207
#, python-format
msgid "Cannot delete domain |%s|"
msgstr ""
@@ -4949,6 +4973,11 @@ msgstr ""
msgid "Multiple floating IP pools matches found for name '%s'"
msgstr ""
+#: nova/openstack/common/excutils.py:48
+#, python-format
+msgid "Original exception being dropped: %s"
+msgstr ""
+
#: nova/openstack/common/jsonutils.py:88
#, python-format
msgid "Max serialization depth exceeded on object: %d %s"
@@ -5062,7 +5091,7 @@ msgstr ""
msgid "Could not send notification to %(topic)s. Payload=%(message)s"
msgstr ""
-#: nova/openstack/common/plugin/pluginmanager.py:64
+#: nova/openstack/common/plugin/pluginmanager.py:65
#, python-format
msgid "Failed to load plugin %(plug)s: %(exc)s"
msgstr ""
@@ -5439,7 +5468,7 @@ msgstr ""
msgid "Exception during scheduler.run_instance"
msgstr ""
-#: nova/scheduler/driver.py:60 nova/scheduler/manager.py:192
+#: nova/scheduler/driver.py:60 nova/scheduler/manager.py:203
#, python-format
msgid "Setting instance to %(state)s state."
msgstr ""
@@ -5452,45 +5481,45 @@ msgstr ""
msgid "Driver must implement schedule_run_instance"
msgstr ""
-#: nova/scheduler/driver.py:284
+#: nova/scheduler/driver.py:287
#, python-format
msgid ""
"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of "
"memory(host:%(avail)s <= instance:%(mem_inst)s)"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:56
+#: nova/scheduler/filter_scheduler.py:73
#, python-format
msgid "Attempting to build %(num_instances)d instance(s)"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:201
+#: nova/scheduler/filter_scheduler.py:218
msgid "Invalid value for 'scheduler_max_attempts', must be >= 1"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:218
+#: nova/scheduler/filter_scheduler.py:235
#, python-format
msgid "Error from last host: %(last_host)s (node %(last_node)s): %(exc)s"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:247
+#: nova/scheduler/filter_scheduler.py:264
#, python-format
msgid ""
"Exceeded max scheduling attempts %(max_attempts)d for instance "
"%(instance_uuid)s"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:313
+#: nova/scheduler/filter_scheduler.py:330
#, python-format
msgid "Filtered %(hosts)s"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:318
+#: nova/scheduler/filter_scheduler.py:343
#, python-format
-msgid "Choosing host %(best_host)s"
+msgid "Choosing host %(chosen_host)s"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:351
+#: nova/scheduler/filter_scheduler.py:376
#, python-format
msgid "Unable to migrate %(instance_uuid)s to %(dest)s: Lack of memory"
msgstr ""
@@ -5522,13 +5551,18 @@ msgstr ""
msgid "Received %(service_name)s service update from %(state_key)s."
msgstr ""
-#: nova/scheduler/host_manager.py:375
+#: nova/scheduler/host_manager.py:376
#: nova/scheduler/filters/trusted_filter.py:220
#, python-format
msgid "No service for compute ID %s"
msgstr ""
-#: nova/scheduler/manager.py:178
+#: nova/scheduler/host_manager.py:398
+#, python-format
+msgid "Removing dead compute node %(host)s:%(node)s from scheduler"
+msgstr ""
+
+#: nova/scheduler/manager.py:189
#, python-format
msgid "Failed to schedule_%(method)s: %(ex)s"
msgstr ""
@@ -5638,41 +5672,41 @@ msgstr ""
msgid "least_cost has been deprecated in favor of the RAM Weigher."
msgstr ""
-#: nova/servicegroup/api.py:60
+#: nova/servicegroup/api.py:58
#, python-format
msgid "ServiceGroup driver defined as an instance of %s"
msgstr ""
-#: nova/servicegroup/api.py:66
+#: nova/servicegroup/api.py:64
#, python-format
msgid "unknown ServiceGroup driver name: %s"
msgstr ""
-#: nova/servicegroup/api.py:83
+#: nova/servicegroup/api.py:81
#, python-format
msgid ""
"Join new ServiceGroup member %(member_id)s to the %(group_id)s group, "
"service = %(service)s"
msgstr ""
-#: nova/servicegroup/api.py:90
+#: nova/servicegroup/api.py:88
#, python-format
msgid "Check if the given member [%s] is part of the ServiceGroup, is up"
msgstr ""
-#: nova/servicegroup/api.py:99
+#: nova/servicegroup/api.py:97
#, python-format
msgid ""
"Explicitly remove the given member %(member_id)s from the%(group_id)s "
"group monitoring"
msgstr ""
-#: nova/servicegroup/api.py:106
+#: nova/servicegroup/api.py:104
#, python-format
msgid "Returns ALL members of the [%s] ServiceGroup"
msgstr ""
-#: nova/servicegroup/api.py:114
+#: nova/servicegroup/api.py:112
#, python-format
msgid "Returns one member of the [%s] group"
msgstr ""
@@ -5749,15 +5783,19 @@ msgstr ""
msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'"
msgstr ""
-#: nova/tests/fake_volume.py:180 nova/volume/cinder.py:201
+#: nova/tests/fake_volume.py:185 nova/volume/cinder.py:205
msgid "status must be available"
msgstr ""
-#: nova/tests/fake_volume.py:184 nova/volume/cinder.py:204
+#: nova/tests/fake_volume.py:189 nova/volume/cinder.py:208
msgid "already attached"
msgstr ""
-#: nova/tests/fake_volume.py:189 nova/volume/cinder.py:210
+#: nova/tests/fake_volume.py:193 nova/volume/cinder.py:212
+msgid "Instance and volume not in same availability_zone"
+msgstr ""
+
+#: nova/tests/fake_volume.py:198 nova/volume/cinder.py:218
msgid "already detached"
msgstr ""
@@ -5915,7 +5953,7 @@ msgstr ""
#: nova/tests/compute/test_compute.py:734
#: nova/tests/compute/test_compute.py:785
#: nova/tests/compute/test_compute.py:812
-#: nova/tests/compute/test_compute.py:2768
+#: nova/tests/compute/test_compute.py:2909
#, python-format
msgid "Running instances: %s"
msgstr ""
@@ -5932,21 +5970,21 @@ msgstr ""
msgid "Internal error"
msgstr ""
-#: nova/tests/compute/test_compute.py:2779
+#: nova/tests/compute/test_compute.py:2920
#, python-format
msgid "After force-killing instances: %s"
msgstr ""
-#: nova/tests/compute/test_compute.py:3277
+#: nova/tests/compute/test_compute.py:3418
msgid "wrong host/node"
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:166
+#: nova/tests/integrated/test_api_samples.py:171
#, python-format
msgid "Result: %(result)s is not a dict."
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:170
+#: nova/tests/integrated/test_api_samples.py:175
#, python-format
msgid ""
"Key mismatch:\n"
@@ -5954,21 +5992,21 @@ msgid ""
"%(res_keys)s"
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:178
+#: nova/tests/integrated/test_api_samples.py:183
#, python-format
msgid "Result: %(result)s is not a list."
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:196
+#: nova/tests/integrated/test_api_samples.py:201
msgid "Extra items in expected:"
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:200
+#: nova/tests/integrated/test_api_samples.py:205
msgid "Extra items in result:"
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:219
-#: nova/tests/integrated/test_api_samples.py:232
+#: nova/tests/integrated/test_api_samples.py:224
+#: nova/tests/integrated/test_api_samples.py:237
#, python-format
msgid ""
"Values do not match:\n"
@@ -6188,7 +6226,7 @@ msgstr ""
msgid "Fetching image %(ami)s for instance %(name)s"
msgstr ""
-#: nova/virt/baremetal/pxe.py:318
+#: nova/virt/baremetal/pxe.py:321
#, python-format
msgid "Injecting files into image for instance %(name)s"
msgstr ""
@@ -6735,7 +6773,7 @@ msgstr ""
msgid "get_available_resource called"
msgstr ""
-#: nova/virt/hyperv/hostops.py:134 nova/virt/libvirt/driver.py:3324
+#: nova/virt/hyperv/hostops.py:134 nova/virt/libvirt/driver.py:3325
#: nova/virt/xenapi/host.py:149
msgid "Updating host stats"
msgstr ""
@@ -7023,12 +7061,12 @@ msgstr ""
msgid "Using config drive for instance: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:202 nova/virt/libvirt/driver.py:1530
+#: nova/virt/hyperv/vmops.py:202 nova/virt/libvirt/driver.py:1532
#, python-format
msgid "Creating config drive at %(path)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:210 nova/virt/libvirt/driver.py:1536
+#: nova/virt/hyperv/vmops.py:210 nova/virt/libvirt/driver.py:1538
#, python-format
msgid "Creating config drive failed with error: %s"
msgstr ""
@@ -7167,7 +7205,7 @@ msgstr ""
msgid "Detaching physical disk from instance: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:156 nova/virt/libvirt/driver.py:645
+#: nova/virt/hyperv/volumeops.py:156 nova/virt/libvirt/driver.py:647
msgid "Could not determine iscsi initiator name"
msgstr ""
@@ -7196,204 +7234,194 @@ msgstr ""
msgid "No free disk device names for prefix '%s'"
msgstr ""
-#: nova/virt/libvirt/blockinfo.py:179
-#, python-format
-msgid "Unsupported virt type %s"
-msgstr ""
-
-#: nova/virt/libvirt/blockinfo.py:204
-#, python-format
-msgid "Disk bus %(disk_bus)s is not valid for %(virt)s"
-msgstr ""
-
-#: nova/virt/libvirt/blockinfo.py:254
+#: nova/virt/libvirt/blockinfo.py:252
#, python-format
msgid "Unable to determine disk bus for '%s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:352
+#: nova/virt/libvirt/driver.py:354
#, python-format
msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater."
msgstr ""
-#: nova/virt/libvirt/driver.py:358
+#: nova/virt/libvirt/driver.py:360
#, python-format
msgid "Connecting to libvirt: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:379
+#: nova/virt/libvirt/driver.py:381
msgid "Connection to libvirt broke"
msgstr ""
-#: nova/virt/libvirt/driver.py:401 nova/virt/libvirt/driver.py:404
+#: nova/virt/libvirt/driver.py:403 nova/virt/libvirt/driver.py:406
#, python-format
msgid "Can not handle authentication request for %d credentials"
msgstr ""
-#: nova/virt/libvirt/driver.py:422
+#: nova/virt/libvirt/driver.py:424
#, python-format
msgid "Connection to libvirt failed: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:510
+#: nova/virt/libvirt/driver.py:512
#, python-format
msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:524
+#: nova/virt/libvirt/driver.py:526
msgid "During wait destroy, instance disappeared."
msgstr ""
-#: nova/virt/libvirt/driver.py:529
+#: nova/virt/libvirt/driver.py:531
msgid "Instance destroyed successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:552
+#: nova/virt/libvirt/driver.py:554
msgid "Error from libvirt during undefineFlags. Retrying with undefine"
msgstr ""
-#: nova/virt/libvirt/driver.py:567
+#: nova/virt/libvirt/driver.py:569
#, python-format
msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:581
+#: nova/virt/libvirt/driver.py:583
#, python-format
msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:599
+#: nova/virt/libvirt/driver.py:601
#, python-format
msgid "Deleting instance files %(target)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:608
+#: nova/virt/libvirt/driver.py:610
#, python-format
msgid "Failed to cleanup directory %(target)s: %(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:760
+#: nova/virt/libvirt/driver.py:762
msgid "During detach_volume, instance disappeared."
msgstr ""
-#: nova/virt/libvirt/driver.py:853
+#: nova/virt/libvirt/driver.py:855
msgid "Beginning live snapshot process"
msgstr ""
-#: nova/virt/libvirt/driver.py:856
+#: nova/virt/libvirt/driver.py:858
msgid "Beginning cold snapshot process"
msgstr ""
-#: nova/virt/libvirt/driver.py:885
+#: nova/virt/libvirt/driver.py:887
msgid "Snapshot extracted, beginning image upload"
msgstr ""
-#: nova/virt/libvirt/driver.py:897
+#: nova/virt/libvirt/driver.py:899
msgid "Snapshot image upload complete"
msgstr ""
-#: nova/virt/libvirt/driver.py:970
+#: nova/virt/libvirt/driver.py:972
msgid "Instance soft rebooted successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:974
+#: nova/virt/libvirt/driver.py:976
msgid "Failed to soft reboot instance."
msgstr ""
-#: nova/virt/libvirt/driver.py:1009
+#: nova/virt/libvirt/driver.py:1011
msgid "Instance shutdown successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:1017
+#: nova/virt/libvirt/driver.py:1019
msgid "Instance may have been rebooted during soft reboot, so return now."
msgstr ""
-#: nova/virt/libvirt/driver.py:1059
+#: nova/virt/libvirt/driver.py:1061
msgid "Instance rebooted successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:1202
+#: nova/virt/libvirt/driver.py:1204
msgid "Instance is running"
msgstr ""
-#: nova/virt/libvirt/driver.py:1209 nova/virt/powervm/operator.py:272
+#: nova/virt/libvirt/driver.py:1211 nova/virt/powervm/operator.py:289
msgid "Instance spawned successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:1225
+#: nova/virt/libvirt/driver.py:1227
#, python-format
msgid "data: %(data)r, fpath: %(fpath)r"
msgstr ""
-#: nova/virt/libvirt/driver.py:1262 nova/virt/libvirt/driver.py:1288
+#: nova/virt/libvirt/driver.py:1264 nova/virt/libvirt/driver.py:1290
#, python-format
msgid "Truncated console log returned, %d bytes ignored"
msgstr ""
-#: nova/virt/libvirt/driver.py:1277
+#: nova/virt/libvirt/driver.py:1279
msgid "Guest does not have a console available"
msgstr ""
-#: nova/virt/libvirt/driver.py:1346
+#: nova/virt/libvirt/driver.py:1348
#, python-format
msgid "Path '%(path)s' supports direct I/O"
msgstr ""
-#: nova/virt/libvirt/driver.py:1350
+#: nova/virt/libvirt/driver.py:1352
#, python-format
msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:1354 nova/virt/libvirt/driver.py:1358
+#: nova/virt/libvirt/driver.py:1356 nova/virt/libvirt/driver.py:1360
#, python-format
msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:1424
+#: nova/virt/libvirt/driver.py:1426
msgid "Creating image"
msgstr ""
-#: nova/virt/libvirt/driver.py:1521
+#: nova/virt/libvirt/driver.py:1523
msgid "Using config drive"
msgstr ""
-#: nova/virt/libvirt/driver.py:1569
+#: nova/virt/libvirt/driver.py:1571
#, python-format
msgid "Injecting %(inj)s into image %(img_id)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1578
+#: nova/virt/libvirt/driver.py:1580
#, python-format
msgid "Error injecting data into image %(img_id)s (%(e)s)"
msgstr ""
-#: nova/virt/libvirt/driver.py:1636
+#: nova/virt/libvirt/driver.py:1638
#, python-format
msgid ""
"Config requested an explicit CPU model, but the current libvirt "
"hypervisor '%s' does not support selecting CPU models"
msgstr ""
-#: nova/virt/libvirt/driver.py:1642
+#: nova/virt/libvirt/driver.py:1644
msgid "Config requested a custom CPU model, but no model name was provided"
msgstr ""
-#: nova/virt/libvirt/driver.py:1646
+#: nova/virt/libvirt/driver.py:1648
msgid "A CPU model name should not be set when a host CPU model is requested"
msgstr ""
-#: nova/virt/libvirt/driver.py:1650
+#: nova/virt/libvirt/driver.py:1652
#, python-format
msgid "CPU mode '%(mode)s' model '%(model)s' was chosen"
msgstr ""
-#: nova/virt/libvirt/driver.py:1666
+#: nova/virt/libvirt/driver.py:1668
msgid ""
"Passthrough of the host CPU was requested but this libvirt version does "
"not support this feature"
msgstr ""
-#: nova/virt/libvirt/driver.py:1966
+#: nova/virt/libvirt/driver.py:1969
#, python-format
msgid ""
"Start to_xml instance=%(instance)s network_info=%(network_info)s "
@@ -7401,80 +7429,80 @@ msgid ""
"rescue=%(rescue)sblock_device_info=%(block_device_info)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1982
+#: nova/virt/libvirt/driver.py:1985
#, python-format
msgid "End to_xml instance=%(instance)s xml=%(xml)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1999
+#: nova/virt/libvirt/driver.py:2002
#, python-format
msgid ""
"Error from libvirt while looking up %(instance_name)s: [Error Code "
"%(error_code)s] %(ex)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2166
+#: nova/virt/libvirt/driver.py:2169
msgid ""
"Cannot get the number of cpu, because this function is not implemented "
"for this platform. "
msgstr ""
-#: nova/virt/libvirt/driver.py:2217
+#: nova/virt/libvirt/driver.py:2220
#, python-format
msgid "List of domains returned by libVirt: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2219
+#: nova/virt/libvirt/driver.py:2222
#, python-format
msgid "libVirt can't find a domain with id: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2301
+#: nova/virt/libvirt/driver.py:2304
msgid "libvirt version is too old (does not support getVersion)"
msgstr ""
-#: nova/virt/libvirt/driver.py:2384
+#: nova/virt/libvirt/driver.py:2387
#, python-format
msgid "Trying to get stats for the volume %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2408
+#: nova/virt/libvirt/driver.py:2411
#, python-format
msgid ""
"Getting block stats failed, device might have been detached. "
"Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2412
+#: nova/virt/libvirt/driver.py:2415
#, python-format
msgid ""
"Could not find domain in libvirt for instance %s. Cannot get block stats "
"for device"
msgstr ""
-#: nova/virt/libvirt/driver.py:2528
+#: nova/virt/libvirt/driver.py:2531
msgid "Block migration can not be used with shared storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:2536
+#: nova/virt/libvirt/driver.py:2539
msgid "Live migration can not be used without shared storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:2573
+#: nova/virt/libvirt/driver.py:2576
#, python-format
msgid ""
"Unable to migrate %(instance_uuid)s: Disk of instance is too "
"large(available on destination host:%(available)s < need:%(necessary)s)"
msgstr ""
-#: nova/virt/libvirt/driver.py:2598
+#: nova/virt/libvirt/driver.py:2601
#, python-format
msgid ""
"Instance launched has CPU info:\n"
"%s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2610
+#: nova/virt/libvirt/driver.py:2613
#, python-format
msgid ""
"CPU doesn't have compatibility.\n"
@@ -7484,62 +7512,62 @@ msgid ""
"Refer to %(u)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2627
+#: nova/virt/libvirt/driver.py:2630
#, python-format
msgid ""
"Creating tmpfile %s to notify to other compute nodes that they should "
"mount the same storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:2675
+#: nova/virt/libvirt/driver.py:2678
#, python-format
msgid "The firewall filter for %s does not exist"
msgstr ""
-#: nova/virt/libvirt/driver.py:2747
+#: nova/virt/libvirt/driver.py:2750
#, python-format
msgid "Live Migration failure: %(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2840
+#: nova/virt/libvirt/driver.py:2843
#, python-format
msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s."
msgstr ""
-#: nova/virt/libvirt/driver.py:2950
+#: nova/virt/libvirt/driver.py:2953
#, python-format
msgid ""
"Error from libvirt while getting description of %(instance_name)s: [Error"
" Code %(error_code)s] %(ex)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2967
+#: nova/virt/libvirt/driver.py:2970
#, python-format
msgid "skipping %(path)s since it looks like volume"
msgstr ""
-#: nova/virt/libvirt/driver.py:3016
+#: nova/virt/libvirt/driver.py:3019
#, python-format
msgid "Getting disk size of %(i_name)s: %(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3064
+#: nova/virt/libvirt/driver.py:3067
msgid "Starting migrate_disk_and_power_off"
msgstr ""
-#: nova/virt/libvirt/driver.py:3123
+#: nova/virt/libvirt/driver.py:3126
msgid "Instance running successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:3129
+#: nova/virt/libvirt/driver.py:3132
msgid "Starting finish_migration"
msgstr ""
-#: nova/virt/libvirt/driver.py:3185
+#: nova/virt/libvirt/driver.py:3188
msgid "Starting finish_revert_migration"
msgstr ""
-#: nova/virt/libvirt/driver.py:3298
+#: nova/virt/libvirt/driver.py:3301
#, python-format
msgid "Checking instance files accessability%(instance_path)s"
msgstr ""
@@ -7572,11 +7600,11 @@ msgstr ""
msgid "iptables firewall: Setup Basic Filtering"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:232
+#: nova/virt/libvirt/imagebackend.py:236
msgid "You should specify libvirt_images_volume_group flag to use LVM images."
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:292
+#: nova/virt/libvirt/imagebackend.py:315
#, python-format
msgid "Unknown image_type=%s"
msgstr ""
@@ -7751,72 +7779,72 @@ msgstr ""
msgid "Can't retrieve root device path from instance libvirt configuration"
msgstr ""
-#: nova/virt/libvirt/vif.py:231 nova/virt/libvirt/vif.py:342
-#: nova/virt/libvirt/vif.py:429
+#: nova/virt/libvirt/vif.py:276 nova/virt/libvirt/vif.py:395
+#: nova/virt/libvirt/vif.py:482
#, python-format
msgid ""
"vif_type=%(vif_type)s instance=%(instance)s network=%(network)s "
"mapping=%(mapping)s"
msgstr ""
-#: nova/virt/libvirt/vif.py:237 nova/virt/libvirt/vif.py:348
-#: nova/virt/libvirt/vif.py:435
+#: nova/virt/libvirt/vif.py:282 nova/virt/libvirt/vif.py:401
+#: nova/virt/libvirt/vif.py:488
msgid "vif_type parameter must be present for this vif_driver implementation"
msgstr ""
-#: nova/virt/libvirt/vif.py:250 nova/virt/libvirt/vif.py:361
-#: nova/virt/libvirt/vif.py:448
+#: nova/virt/libvirt/vif.py:303 nova/virt/libvirt/vif.py:414
+#: nova/virt/libvirt/vif.py:501
#, python-format
msgid "Unexpected vif_type=%s"
msgstr ""
-#: nova/virt/libvirt/vif.py:262
+#: nova/virt/libvirt/vif.py:315
#, python-format
msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s"
msgstr ""
-#: nova/virt/libvirt/vif.py:272
+#: nova/virt/libvirt/vif.py:325
#, python-format
msgid "Ensuring bridge %s"
msgstr ""
-#: nova/virt/libvirt/vif.py:378 nova/virt/libvirt/vif.py:407
+#: nova/virt/libvirt/vif.py:431 nova/virt/libvirt/vif.py:460
msgid "Failed while unplugging vif"
msgstr ""
-#: nova/virt/libvirt/volume.py:224
+#: nova/virt/libvirt/volume.py:228
#, python-format
msgid "iSCSI device not found at %s"
msgstr ""
-#: nova/virt/libvirt/volume.py:227
+#: nova/virt/libvirt/volume.py:231
#, python-format
msgid ""
"ISCSI volume not yet found at: %(disk_dev)s. Will rescan & retry. Try "
"number: %(tries)s"
msgstr ""
-#: nova/virt/libvirt/volume.py:239
+#: nova/virt/libvirt/volume.py:243
#, python-format
msgid "Found iSCSI node %(disk_dev)s (after %(tries)s rescans)"
msgstr ""
-#: nova/virt/libvirt/volume.py:312
+#: nova/virt/libvirt/volume.py:316 nova/virt/libvirt/volume.py:437
#, python-format
msgid "%s is already mounted"
msgstr ""
-#: nova/virt/libvirt/volume.py:368
+#: nova/virt/libvirt/volume.py:372
#, python-format
msgid "AoE device not found at %s"
msgstr ""
-#: nova/virt/libvirt/volume.py:370
+#: nova/virt/libvirt/volume.py:374
#, python-format
msgid "AoE volume not yet found at: %(aoedevpath)s. Try number: %(tries)s"
msgstr ""
-#: nova/virt/libvirt/volume.py:384
+#: nova/virt/libvirt/volume.py:388
#, python-format
msgid "Found AoE device %(aoedevpath)s (after %(tries)s rediscover)"
msgstr ""
@@ -7876,54 +7904,73 @@ msgstr ""
msgid "Failed to clean up snapshot file %(snapshot_file_path)s"
msgstr ""
-#: nova/virt/powervm/blockdev.py:210
+#: nova/virt/powervm/blockdev.py:249
msgid "Could not create logical volume. No space left on any volume group."
msgstr ""
-#: nova/virt/powervm/blockdev.py:298 nova/virt/powervm/blockdev.py:370
+#: nova/virt/powervm/blockdev.py:337 nova/virt/powervm/blockdev.py:409
msgid "Unable to get checksum"
msgstr ""
-#: nova/virt/powervm/blockdev.py:301 nova/virt/powervm/blockdev.py:393
+#: nova/virt/powervm/blockdev.py:340 nova/virt/powervm/blockdev.py:432
msgid "Image checksums do not match"
msgstr ""
-#: nova/virt/powervm/blockdev.py:322
+#: nova/virt/powervm/blockdev.py:361
#, python-format
msgid "Image found on host at '%s'"
msgstr ""
-#: nova/virt/powervm/blockdev.py:330
+#: nova/virt/powervm/blockdev.py:369
msgid "Uncompressed image file not found"
msgstr ""
-#: nova/virt/powervm/common.py:54
+#: nova/virt/powervm/common.py:57
msgid "Connection error connecting PowerVM manager"
msgstr ""
-#: nova/virt/powervm/common.py:66
+#: nova/virt/powervm/common.py:69
#, python-format
msgid "Running cmd (SSH-as-root): %s"
msgstr ""
-#: nova/virt/powervm/common.py:112
+#: nova/virt/powervm/common.py:115
msgid "File transfer to PowerVM manager failed"
msgstr ""
-#: nova/virt/powervm/common.py:131
+#: nova/virt/powervm/common.py:134
#, python-format
msgid "ftp GET %(remote_path)s to: %(local_path)s"
msgstr ""
-#: nova/virt/powervm/common.py:137
+#: nova/virt/powervm/common.py:140
msgid "File transfer from PowerVM manager failed"
msgstr ""
-#: nova/virt/powervm/driver.py:160
+#: nova/virt/powervm/driver.py:118
+msgid "In get_host_ip_addr"
+msgstr ""
+
+#: nova/virt/powervm/driver.py:121
+#, python-format
+msgid "Attempting to resolve %s"
+msgstr ""
+
+#: nova/virt/powervm/driver.py:123
+#, python-format
+msgid "%(hostname)s was successfully resolved to %(ip_addr)s"
+msgstr ""
+
+#: nova/virt/powervm/driver.py:166
#, python-format
msgid "%(inst_name)s captured in %(snapshot_time)s seconds"
msgstr ""
+#: nova/virt/powervm/driver.py:286
+#, python-format
+msgid "Unrecognized root disk information: %s"
+msgstr ""
+
#: nova/virt/powervm/exception.py:21
msgid "Connection to PowerVM manager failed"
msgstr ""
@@ -7980,74 +8027,83 @@ msgstr ""
msgid "PowerVM LPAR instance '%(instance_name)s' cleanup failed"
msgstr ""
+#: nova/virt/powervm/lpar.py:55
+#, python-format
+msgid ""
+"Encountered unknown LPAR attribute: %s\n"
+"Continuing without storing"
+msgstr ""
+
#: nova/virt/powervm/operator.py:101
#, python-format
msgid "LPAR instance '%s' not found"
msgstr ""
-#: nova/virt/powervm/operator.py:185
+#: nova/virt/powervm/operator.py:185 nova/virt/powervm/operator.py:394
+#: nova/virt/powervm/operator.py:436
msgid "Not enough free memory in the host"
msgstr ""
-#: nova/virt/powervm/operator.py:195
+#: nova/virt/powervm/operator.py:195 nova/virt/powervm/operator.py:405
+#: nova/virt/powervm/operator.py:442
msgid "Insufficient available CPU on PowerVM"
msgstr ""
-#: nova/virt/powervm/operator.py:233
+#: nova/virt/powervm/operator.py:241 nova/virt/powervm/operator.py:273
#, python-format
msgid "Creating LPAR instance '%s'"
msgstr ""
-#: nova/virt/powervm/operator.py:238
+#: nova/virt/powervm/operator.py:246 nova/virt/powervm/operator.py:275
#, python-format
msgid "LPAR instance '%s' creation failed"
msgstr ""
-#: nova/virt/powervm/operator.py:255
+#: nova/virt/powervm/operator.py:263
#, python-format
msgid "PowerVM image creation failed: %s"
msgstr ""
-#: nova/virt/powervm/operator.py:263
+#: nova/virt/powervm/operator.py:280
#, python-format
msgid "Activating the LPAR instance '%s'"
msgstr ""
-#: nova/virt/powervm/operator.py:277
+#: nova/virt/powervm/operator.py:294
#, python-format
msgid "Instance '%s' failed to boot"
msgstr ""
-#: nova/virt/powervm/operator.py:289
+#: nova/virt/powervm/operator.py:306
msgid "Error while attempting to clean up failed instance launch."
msgstr ""
-#: nova/virt/powervm/operator.py:293
+#: nova/virt/powervm/operator.py:310
#, python-format
msgid "Instance spawned in %s seconds"
msgstr ""
-#: nova/virt/powervm/operator.py:304
+#: nova/virt/powervm/operator.py:321
#, python-format
msgid "During destroy, LPAR instance '%s' was not found on PowerVM system."
msgstr ""
-#: nova/virt/powervm/operator.py:320
+#: nova/virt/powervm/operator.py:337
#, python-format
msgid "Stopping instance %s for snapshot."
msgstr ""
-#: nova/virt/powervm/operator.py:343
+#: nova/virt/powervm/operator.py:360
#, python-format
msgid "Shutting down the instance '%s'"
msgstr ""
-#: nova/virt/powervm/operator.py:352
+#: nova/virt/powervm/operator.py:369
#, python-format
msgid "Deleting the LPAR instance '%s'"
msgstr ""
-#: nova/virt/powervm/operator.py:355
+#: nova/virt/powervm/operator.py:372
msgid "PowerVM instance cleanup failed"
msgstr ""
@@ -8521,32 +8577,30 @@ msgstr ""
msgid "get_diagnostics not implemented for vmwareapi"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1164
-#, python-format
-msgid "Reconfiguring VM instance to set the machine id with ip - %(ip_addr)s"
+#: nova/virt/vmwareapi/vmops.py:1170
+msgid "Reconfiguring VM instance to set the machine id"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1171
-#, python-format
-msgid "Reconfigured VM instance to set the machine id with ip - %(ip_addr)s"
+#: nova/virt/vmwareapi/vmops.py:1176
+msgid "Reconfigured VM instance to set the machine id"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1186
+#: nova/virt/vmwareapi/vmops.py:1190
#, python-format
msgid "Reconfiguring VM instance to enable vnc on port - %(port)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1193
+#: nova/virt/vmwareapi/vmops.py:1197
#, python-format
msgid "Reconfigured VM instance to enable vnc on port - %(port)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1286
+#: nova/virt/vmwareapi/vmops.py:1290
#, python-format
msgid "Creating directory with path %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1292
+#: nova/virt/vmwareapi/vmops.py:1296
#, python-format
msgid "Created directory with path %s"
msgstr ""
@@ -8938,374 +8992,374 @@ msgid ""
"Expected %(vlan_num)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:271
+#: nova/virt/xenapi/vm_utils.py:270
msgid "Created VM"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:283
+#: nova/virt/xenapi/vm_utils.py:282
msgid "VM destroyed"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:288 nova/virt/xenapi/vm_utils.py:303
+#: nova/virt/xenapi/vm_utils.py:287 nova/virt/xenapi/vm_utils.py:302
msgid "VM already halted, skipping shutdown..."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:292
+#: nova/virt/xenapi/vm_utils.py:291
msgid "Shutting down VM (cleanly)"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:307
+#: nova/virt/xenapi/vm_utils.py:306
msgid "Shutting down VM (hard)"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:346
+#: nova/virt/xenapi/vm_utils.py:345
#, python-format
msgid "VBD not found in instance %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:363
+#: nova/virt/xenapi/vm_utils.py:362
#, python-format
msgid "VBD %s already detached"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:366
+#: nova/virt/xenapi/vm_utils.py:365
#, python-format
msgid "VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:371
+#: nova/virt/xenapi/vm_utils.py:370
#, python-format
msgid "Unable to unplug VBD %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:376
+#: nova/virt/xenapi/vm_utils.py:375
#, python-format
msgid "Reached maximum number of retries trying to unplug VBD %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:387
+#: nova/virt/xenapi/vm_utils.py:386
#, python-format
msgid "Unable to destroy VBD %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:406
+#: nova/virt/xenapi/vm_utils.py:405
#, python-format
msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... "
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:409
+#: nova/virt/xenapi/vm_utils.py:408
#, python-format
msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:425
+#: nova/virt/xenapi/vm_utils.py:424
#, python-format
msgid "Unable to destroy VDI %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:457
+#: nova/virt/xenapi/vm_utils.py:456
#, python-format
msgid ""
"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s)"
" on %(sr_ref)s."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:471
+#: nova/virt/xenapi/vm_utils.py:470
msgid "SR not present and could not be introduced"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:496
+#: nova/virt/xenapi/vm_utils.py:495
#, python-format
msgid "block device info: %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:582
+#: nova/virt/xenapi/vm_utils.py:581
#, python-format
msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:602
+#: nova/virt/xenapi/vm_utils.py:601
#, python-format
msgid "No primary VDI found for %(vm_ref)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:615
+#: nova/virt/xenapi/vm_utils.py:614
msgid "Starting snapshot for VM"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:665
+#: nova/virt/xenapi/vm_utils.py:664
#, python-format
msgid "Destroying cached VDI '%(vdi_uuid)s'"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:912
+#: nova/virt/xenapi/vm_utils.py:911
#, python-format
msgid ""
"Fast cloning is only supported on default local SR of type ext. SR on "
"this system was found to be of type %(sr_type)s. Ignoring the cow flag."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:968
+#: nova/virt/xenapi/vm_utils.py:967
#, python-format
msgid "Unrecognized cache_images value '%s', defaulting to True"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1002
+#: nova/virt/xenapi/vm_utils.py:1001
#, python-format
msgid "Fetched VDIs of type '%(vdi_type)s' with UUID '%(vdi_uuid)s'"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1014
+#: nova/virt/xenapi/vm_utils.py:1013
#, python-format
msgid ""
"download_vhd %(image_id)s, attempt %(attempt_num)d/%(max_attempts)d, "
"params: %(params)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1027
+#: nova/virt/xenapi/vm_utils.py:1026
#, python-format
msgid "download_vhd failed: %r"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1061
+#: nova/virt/xenapi/vm_utils.py:1060
#, python-format
msgid "Invalid value '%s' for xenapi_torrent_images"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1072
+#: nova/virt/xenapi/vm_utils.py:1071
#, python-format
msgid "Asking xapi to fetch vhd image %(image_id)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1136
+#: nova/virt/xenapi/vm_utils.py:1135
#, python-format
msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1151
+#: nova/virt/xenapi/vm_utils.py:1150
#, python-format
msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1155
+#: nova/virt/xenapi/vm_utils.py:1154
#, python-format
msgid ""
"Image size %(size_bytes)d exceeded instance_type allowed size "
"%(allowed_size_bytes)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1177
+#: nova/virt/xenapi/vm_utils.py:1176
#, python-format
msgid "Fetching image %(image_id)s, type %(image_type_str)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1190
+#: nova/virt/xenapi/vm_utils.py:1189
#, python-format
msgid "Size for image %(image_id)s: %(virtual_size)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1199
+#: nova/virt/xenapi/vm_utils.py:1198
#, python-format
msgid ""
"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d "
"bytes"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1218
+#: nova/virt/xenapi/vm_utils.py:1217
#, python-format
msgid "Copying VDI %s to /boot/guest on dom0"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1232
+#: nova/virt/xenapi/vm_utils.py:1231
#, python-format
msgid "Kernel/Ramdisk VDI %s destroyed"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1241
+#: nova/virt/xenapi/vm_utils.py:1240
msgid "Failed to fetch glance image"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1282
+#: nova/virt/xenapi/vm_utils.py:1281
#, python-format
msgid "Detected %(image_type_str)s format for image %(image_ref)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1313
+#: nova/virt/xenapi/vm_utils.py:1312
#, python-format
msgid "Looking up vdi %s for PV kernel"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1331
+#: nova/virt/xenapi/vm_utils.py:1330
#, python-format
msgid ""
"Image format is None: trying to determine PV status using pygrub; if "
"instance with vdi %s does not boot correctly, try with image metadata."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1337
+#: nova/virt/xenapi/vm_utils.py:1336
#, python-format
msgid "Unknown image format %(disk_image_type)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1368
+#: nova/virt/xenapi/vm_utils.py:1367
#, python-format
msgid "VDI %s is still available"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1464
+#: nova/virt/xenapi/vm_utils.py:1463
#, python-format
msgid "Unable to parse rrd of %(vm_uuid)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1491
+#: nova/virt/xenapi/vm_utils.py:1490
#, python-format
msgid "Re-scanning SR %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1519
+#: nova/virt/xenapi/vm_utils.py:1518
#, python-format
msgid "Flag sr_matching_filter '%s' does not respect formatting convention"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1537
+#: nova/virt/xenapi/vm_utils.py:1536
msgid ""
"XenAPI is unable to find a Storage Repository to install guest instances "
"on. Please check your configuration and/or configure the flag "
"'sr_matching_filter'"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1550
+#: nova/virt/xenapi/vm_utils.py:1549
msgid "Cannot find SR of content-type ISO"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1558
+#: nova/virt/xenapi/vm_utils.py:1557
#, python-format
msgid "ISO: looking at SR %(sr_rec)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1560
+#: nova/virt/xenapi/vm_utils.py:1559
msgid "ISO: not iso content"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1563
+#: nova/virt/xenapi/vm_utils.py:1562
msgid "ISO: iso content_type, no 'i18n-key' key"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1566
+#: nova/virt/xenapi/vm_utils.py:1565
msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1570
+#: nova/virt/xenapi/vm_utils.py:1569
msgid "ISO: SR MATCHing our criteria"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1572
+#: nova/virt/xenapi/vm_utils.py:1571
msgid "ISO: ISO, looking to see if it is host local"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1575
+#: nova/virt/xenapi/vm_utils.py:1574
#, python-format
msgid "ISO: PBD %(pbd_ref)s disappeared"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1578
+#: nova/virt/xenapi/vm_utils.py:1577
#, python-format
msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1581
+#: nova/virt/xenapi/vm_utils.py:1580
msgid "ISO: SR with local PBD"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1603
+#: nova/virt/xenapi/vm_utils.py:1602
#, python-format
msgid ""
"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: "
"%(server)s."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1619
+#: nova/virt/xenapi/vm_utils.py:1618
#, python-format
msgid "Unable to obtain RRD XML updates with server details: %(server)s."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1673
+#: nova/virt/xenapi/vm_utils.py:1672
#, python-format
msgid "Invalid statistics data from Xenserver: %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1733
+#: nova/virt/xenapi/vm_utils.py:1732
#, python-format
msgid "VHD %(vdi_uuid)s has parent %(parent_uuid)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1820
+#: nova/virt/xenapi/vm_utils.py:1819
#, python-format
msgid ""
"Parent %(parent_uuid)s doesn't match original parent "
"%(original_parent_uuid)s, waiting for coalesce..."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1830
+#: nova/virt/xenapi/vm_utils.py:1829
#, python-format
msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1865
+#: nova/virt/xenapi/vm_utils.py:1864
#, python-format
msgid "Timeout waiting for device %s to be created"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1885
+#: nova/virt/xenapi/vm_utils.py:1884
#, python-format
msgid "Disconnecting stale VDI %s from compute domU"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1898
+#: nova/virt/xenapi/vm_utils.py:1897
#, python-format
msgid "Plugging VBD %s ... "
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1901
+#: nova/virt/xenapi/vm_utils.py:1900
#, python-format
msgid "Plugging VBD %s done."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1903
+#: nova/virt/xenapi/vm_utils.py:1902
#, python-format
msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1906
+#: nova/virt/xenapi/vm_utils.py:1905
#, python-format
msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1911
+#: nova/virt/xenapi/vm_utils.py:1910
#, python-format
msgid "Destroying VBD for VDI %s ... "
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1919
+#: nova/virt/xenapi/vm_utils.py:1918
#, python-format
msgid "Destroying VBD for VDI %s done."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1932
+#: nova/virt/xenapi/vm_utils.py:1931
#, python-format
msgid "Running pygrub against %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1940
+#: nova/virt/xenapi/vm_utils.py:1939
#, python-format
msgid "Found Xen kernel %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1942
+#: nova/virt/xenapi/vm_utils.py:1941
msgid "No Xen kernel found. Booting HVM."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1944
+#: nova/virt/xenapi/vm_utils.py:1943
msgid ""
"Error while executing pygrub! Please, ensure the binary is installed "
"correctly, and available in your PATH; on some Linux distros, pygrub may "
@@ -9313,28 +9367,28 @@ msgid ""
"mode."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1961
+#: nova/virt/xenapi/vm_utils.py:1960
msgid "Partitions:"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1967
+#: nova/virt/xenapi/vm_utils.py:1966
#, python-format
msgid " %(num)s: %(ptype)s %(size)d sectors"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1992
+#: nova/virt/xenapi/vm_utils.py:1991
#, python-format
msgid ""
"Writing partition table %(primary_first)d %(primary_last)d to "
"%(dev_path)s..."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2005
+#: nova/virt/xenapi/vm_utils.py:2004
#, python-format
msgid "Writing partition table %s done."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2059
+#: nova/virt/xenapi/vm_utils.py:2058
#, python-format
msgid ""
"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s "
@@ -9551,11 +9605,11 @@ msgstr ""
msgid "Migrate Receive failed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1645
+#: nova/virt/xenapi/vmops.py:1646
msgid "VM.assert_can_migratefailed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1681
+#: nova/virt/xenapi/vmops.py:1682
msgid "Migrate Send failed"
msgstr ""
@@ -9670,7 +9724,7 @@ msgstr ""
msgid "Starting nova-xvpvncproxy node (version %s)"
msgstr ""
-#: nova/volume/cinder.py:90
+#: nova/volume/cinder.py:94
#, python-format
msgid "Cinderclient connection created using URL: %s"
msgstr ""
diff --git a/nova/locale/pt_BR/LC_MESSAGES/nova.po b/nova/locale/pt_BR/LC_MESSAGES/nova.po
index 2cb5d88d7..2f405e367 100644
--- a/nova/locale/pt_BR/LC_MESSAGES/nova.po
+++ b/nova/locale/pt_BR/LC_MESSAGES/nova.po
@@ -4146,7 +4146,7 @@ msgstr "Pid %d está ultrapassado, reiniciando radvd"
#: nova/network/linux_net.py:1126
#, python-format
-msgid "Starting VLAN inteface %s"
+msgid "Starting VLAN interface %s"
msgstr "Iniciando a VLAN %s"
#: nova/network/linux_net.py:1162
diff --git a/nova/locale/ru/LC_MESSAGES/nova.po b/nova/locale/ru/LC_MESSAGES/nova.po
index 16598be25..861172310 100644
--- a/nova/locale/ru/LC_MESSAGES/nova.po
+++ b/nova/locale/ru/LC_MESSAGES/nova.po
@@ -4238,7 +4238,7 @@ msgstr ""
#: nova/network/linux_net.py:1126
#, python-format
-msgid "Starting VLAN inteface %s"
+msgid "Starting VLAN interface %s"
msgstr "ЗапуÑк интерфейÑа VLAN %s"
#: nova/network/linux_net.py:1162
diff --git a/nova/locale/tl/LC_MESSAGES/nova.po b/nova/locale/tl/LC_MESSAGES/nova.po
index ae13d3e6f..615df355f 100644
--- a/nova/locale/tl/LC_MESSAGES/nova.po
+++ b/nova/locale/tl/LC_MESSAGES/nova.po
@@ -4105,7 +4105,7 @@ msgstr ""
#: nova/network/linux_net.py:1126
#, python-format
-msgid "Starting VLAN inteface %s"
+msgid "Starting VLAN interface %s"
msgstr ""
#: nova/network/linux_net.py:1162
diff --git a/nova/locale/tr/LC_MESSAGES/nova.po b/nova/locale/tr/LC_MESSAGES/nova.po
index 5b018ae45..c8c40af80 100644
--- a/nova/locale/tr/LC_MESSAGES/nova.po
+++ b/nova/locale/tr/LC_MESSAGES/nova.po
@@ -4105,7 +4105,7 @@ msgstr ""
#: nova/network/linux_net.py:1126
#, python-format
-msgid "Starting VLAN inteface %s"
+msgid "Starting VLAN interface %s"
msgstr ""
#: nova/network/linux_net.py:1162
diff --git a/nova/locale/tr_TR/LC_MESSAGES/nova.po b/nova/locale/tr_TR/LC_MESSAGES/nova.po
index 519a37857..a4bf0b4fa 100644
--- a/nova/locale/tr_TR/LC_MESSAGES/nova.po
+++ b/nova/locale/tr_TR/LC_MESSAGES/nova.po
@@ -4118,7 +4118,7 @@ msgstr "%d süreç numarası bozuldu, radvd tekrar başlatılıyor"
#: nova/network/linux_net.py:1126
#, python-format
-msgid "Starting VLAN inteface %s"
+msgid "Starting VLAN interface %s"
msgstr "%s VLAN arayüzü başlatılıyor "
#: nova/network/linux_net.py:1162
diff --git a/nova/locale/uk/LC_MESSAGES/nova.po b/nova/locale/uk/LC_MESSAGES/nova.po
index 936695312..f988ec3eb 100644
--- a/nova/locale/uk/LC_MESSAGES/nova.po
+++ b/nova/locale/uk/LC_MESSAGES/nova.po
@@ -4107,7 +4107,7 @@ msgstr ""
#: nova/network/linux_net.py:1126
#, python-format
-msgid "Starting VLAN inteface %s"
+msgid "Starting VLAN interface %s"
msgstr ""
#: nova/network/linux_net.py:1162
diff --git a/nova/locale/zh_CN/LC_MESSAGES/nova.po b/nova/locale/zh_CN/LC_MESSAGES/nova.po
index ce736d4f8..a39f63b07 100644
--- a/nova/locale/zh_CN/LC_MESSAGES/nova.po
+++ b/nova/locale/zh_CN/LC_MESSAGES/nova.po
@@ -4206,7 +4206,7 @@ msgstr "Pid %d è¿‡æœŸäº†ï¼Œé‡æ–°å¯åЍradvd"
#: nova/network/linux_net.py:1126
#, python-format
-msgid "Starting VLAN inteface %s"
+msgid "Starting VLAN interface %s"
msgstr "正在开å¯VLANæŽ¥å£ %s"
#: nova/network/linux_net.py:1162
diff --git a/nova/locale/zh_TW/LC_MESSAGES/nova.po b/nova/locale/zh_TW/LC_MESSAGES/nova.po
index 91e75790b..5bba1017d 100644
--- a/nova/locale/zh_TW/LC_MESSAGES/nova.po
+++ b/nova/locale/zh_TW/LC_MESSAGES/nova.po
@@ -4113,7 +4113,7 @@ msgstr ""
#: nova/network/linux_net.py:1126
#, python-format
-msgid "Starting VLAN inteface %s"
+msgid "Starting VLAN interface %s"
msgstr ""
#: nova/network/linux_net.py:1162
diff --git a/nova/manager.py b/nova/manager.py
index 7c7cbeb67..cc7d464ca 100644
--- a/nova/manager.py
+++ b/nova/manager.py
@@ -53,12 +53,13 @@ This module provides Manager, a base class for managers.
"""
-import eventlet
import time
+import eventlet
+from oslo.config import cfg
+
from nova.db import base
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common.plugin import pluginmanager
from nova.openstack.common.rpc import dispatcher as rpc_dispatcher
@@ -88,9 +89,15 @@ def periodic_task(*args, **kwargs):
1. Without arguments '@periodic_task', this will be run on every cycle
of the periodic scheduler.
- 2. With arguments, @periodic_task(periodic_spacing=N), this will be
- run on approximately every N seconds. If this number is negative the
- periodic task will be disabled.
+ 2. With arguments:
+ @periodic_task(spacing=N [, run_immediately=[True|False]])
+ this will be run on approximately every N seconds. If this number is
+ negative the periodic task will be disabled. If the run_immediately
+ argument is provided and has a value of 'True', the first run of the
+ task will be shortly after task scheduler starts. If
+ run_immediately is omitted or set to 'False', the first time the
+ task runs will be approximately N seconds after the task scheduler
+ starts.
"""
def decorator(f):
# Test for old style invocation
@@ -107,7 +114,10 @@ def periodic_task(*args, **kwargs):
# Control frequency
f._periodic_spacing = kwargs.pop('spacing', 0)
- f._periodic_last_run = time.time()
+ if kwargs.pop('run_immediately', False):
+ f._periodic_last_run = None
+ else:
+ f._periodic_last_run = time.time()
return f
# NOTE(sirp): The `if` is necessary to allow the decorator to be used with
@@ -213,6 +223,8 @@ class Manager(base.Base):
# If a periodic task is _nearly_ due, then we'll run it early
if self._periodic_spacing[task_name] is None:
wait = 0
+ elif self._periodic_last_run[task_name] is None:
+ wait = 0
else:
due = (self._periodic_last_run[task_name] +
self._periodic_spacing[task_name])
diff --git a/nova/netconf.py b/nova/netconf.py
index 531a9e200..f8116ed97 100644
--- a/nova/netconf.py
+++ b/nova/netconf.py
@@ -19,7 +19,7 @@
import socket
-from nova.openstack.common import cfg
+from oslo.config import cfg
CONF = cfg.CONF
diff --git a/nova/network/__init__.py b/nova/network/__init__.py
index f0eeb4e84..89c63fcab 100644
--- a/nova/network/__init__.py
+++ b/nova/network/__init__.py
@@ -16,23 +16,24 @@
# License for the specific language governing permissions and limitations
# under the License.
+import oslo.config.cfg
+
# Importing full names to not pollute the namespace and cause possible
# collisions with use of 'from nova.network import <foo>' elsewhere.
-import nova.openstack.common.cfg
import nova.openstack.common.importutils
_network_opts = [
- nova.openstack.common.cfg.StrOpt('network_api_class',
- default='nova.network.api.API',
- help='The full class name of the '
- 'network API class to use'),
+ oslo.config.cfg.StrOpt('network_api_class',
+ default='nova.network.api.API',
+ help='The full class name of the '
+ 'network API class to use'),
]
-nova.openstack.common.cfg.CONF.register_opts(_network_opts)
+oslo.config.cfg.CONF.register_opts(_network_opts)
def API():
importutils = nova.openstack.common.importutils
- network_api_class = nova.openstack.common.cfg.CONF.network_api_class
+ network_api_class = oslo.config.cfg.CONF.network_api_class
cls = importutils.import_class(network_api_class)
return cls()
diff --git a/nova/network/api.py b/nova/network/api.py
index 09c4a8c79..4f0a2bffa 100644
--- a/nova/network/api.py
+++ b/nova/network/api.py
@@ -20,6 +20,7 @@
import functools
import inspect
+from nova.compute import instance_types
from nova.db import base
from nova import exception
from nova.network import floating_ips
@@ -243,7 +244,7 @@ class API(base.Base):
@refresh_cache
def allocate_for_instance(self, context, instance, vpn,
requested_networks, macs=None,
- conductor_api=None):
+ conductor_api=None, security_groups=None):
"""Allocates all network structures for an instance.
TODO(someone): document the rest of these parameters.
@@ -282,6 +283,25 @@ class API(base.Base):
args['host'] = instance['host']
self.network_rpcapi.deallocate_for_instance(context, **args)
+ # NOTE(danms): Here for quantum compatibility
+ def allocate_port_for_instance(self, context, instance, port_id,
+ network_id=None, requested_ip=None,
+ conductor_api=None):
+ raise NotImplementedError()
+
+ # NOTE(danms): Here for quantum compatibility
+ def deallocate_port_for_instance(self, context, instance, port_id,
+ conductor_api=None):
+ raise NotImplementedError()
+
+ # NOTE(danms): Here for quantum compatibility
+ def list_ports(self, *args, **kwargs):
+ raise NotImplementedError()
+
+ # NOTE(danms): Here for quantum compatibility
+ def show_port(self, *args, **kwargs):
+ raise NotImplementedError()
+
@wrap_check_policy
@refresh_cache
def add_fixed_ip_to_instance(self, context, instance, network_id,
@@ -343,8 +363,9 @@ class API(base.Base):
def _get_instance_nw_info(self, context, instance):
"""Returns all network info related to an instance."""
+ instance_type = instance_types.extract_instance_type(instance)
args = {'instance_id': instance['uuid'],
- 'rxtx_factor': instance['instance_type']['rxtx_factor'],
+ 'rxtx_factor': instance_type['rxtx_factor'],
'host': instance['host'],
'project_id': instance['project_id']}
nw_info = self.network_rpcapi.get_instance_nw_info(context, **args)
diff --git a/nova/network/driver.py b/nova/network/driver.py
index 2a9218898..47d043e09 100644
--- a/nova/network/driver.py
+++ b/nova/network/driver.py
@@ -16,7 +16,8 @@
import sys
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
diff --git a/nova/network/floating_ips.py b/nova/network/floating_ips.py
index 2890573c1..c35dc85f9 100644
--- a/nova/network/floating_ips.py
+++ b/nova/network/floating_ips.py
@@ -17,11 +17,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo.config import cfg
+
from nova import context
from nova.db import base
from nova import exception
from nova.network import rpcapi as network_rpcapi
-from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import lockutils
diff --git a/nova/network/ldapdns.py b/nova/network/ldapdns.py
index 680b2f435..df24680b0 100644
--- a/nova/network/ldapdns.py
+++ b/nova/network/ldapdns.py
@@ -15,9 +15,10 @@
import ldap
import time
+from oslo.config import cfg
+
from nova import exception
from nova.network import dns_driver
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import utils
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index 43c0d1c53..29b882a02 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -25,9 +25,10 @@ import netaddr
import os
import re
+from oslo.config import cfg
+
from nova import db
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import fileutils
from nova.openstack.common import importutils
from nova.openstack.common import lockutils
@@ -36,7 +37,6 @@ from nova.openstack.common import timeutils
from nova import paths
from nova import utils
-
LOG = logging.getLogger(__name__)
@@ -73,6 +73,11 @@ linux_net_opts = [
cfg.ListOpt('dmz_cidr',
default=[],
help='A list of dmz range that should be accepted'),
+ cfg.MultiStrOpt('force_snat_range',
+ default=[],
+ help='Traffic to this range will always be snatted to the '
+ 'fallback ip, even if it would normally be bridged out '
+ 'of the node. Can be specified multiple times.'),
cfg.StrOpt('dnsmasq_config_file',
default='',
help='Override the default dnsmasq settings with this file'),
@@ -627,6 +632,14 @@ def init_host(ip_range=None):
add_snat_rule(ip_range)
+ rules = []
+ for snat_range in CONF.force_snat_range:
+ rules.append('PREROUTING -p ipv4 --ip-src %s --ip-dst %s '
+ '-j redirect --redirect-target ACCEPT' %
+ (ip_range, snat_range))
+ if rules:
+ ensure_ebtables_rules(rules, 'nat')
+
iptables_manager.ipv4['nat'].add_rule('POSTROUTING',
'-s %s -d %s/32 -j ACCEPT' %
(ip_range, CONF.metadata_host))
@@ -1346,7 +1359,7 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
"""Create a vlan unless it already exists."""
interface = 'vlan%s' % vlan_num
if not device_exists(interface):
- LOG.debug(_('Starting VLAN inteface %s'), interface)
+ LOG.debug(_('Starting VLAN interface %s'), interface)
_execute('ip', 'link', 'add', 'link', bridge_interface,
'name', interface, 'type', 'vlan',
'id', vlan_num, run_as_root=True,
diff --git a/nova/network/manager.py b/nova/network/manager.py
index ef967383c..a4669def0 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -50,8 +50,8 @@ import uuid
from eventlet import greenpool
import netaddr
+from oslo.config import cfg
-from nova.compute import api as compute_api
from nova import context
from nova import exception
from nova import ipv6
@@ -61,7 +61,7 @@ from nova.network import driver
from nova.network import floating_ips
from nova.network import model as network_model
from nova.network import rpcapi as network_rpcapi
-from nova.openstack.common import cfg
+from nova.network.security_group import openstack_driver
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
@@ -72,7 +72,6 @@ from nova.openstack.common import uuidutils
from nova import servicegroup
from nova import utils
-
LOG = logging.getLogger(__name__)
@@ -287,7 +286,9 @@ class NetworkManager(manager.SchedulerDependentManager):
CONF.floating_ip_dns_manager)
self.network_api = network_api.API()
self.network_rpcapi = network_rpcapi.NetworkAPI()
- self.security_group_api = compute_api.SecurityGroupAPI()
+ self.security_group_api = (
+ openstack_driver.get_openstack_security_group_driver())
+
self.servicegroup_api = servicegroup.API()
# NOTE(tr3buchet: unless manager subclassing NetworkManager has
@@ -1398,9 +1399,9 @@ class FlatManager(NetworkManager):
The idea is to create a single network for the host with a command like:
nova-manage network create 192.168.0.0/24 1 256. Creating multiple
networks for for one manager is currently not supported, but could be
- added by modifying allocate_fixed_ip and get_network to get the a network
- with new logic instead of network_get_by_bridge. Arbitrary lists of
- addresses in a single network can be accomplished with manual db editing.
+ added by modifying allocate_fixed_ip and get_network to get the network
+ with new logic. Arbitrary lists of addresses in a single network can
+ be accomplished with manual db editing.
If flat_injected is True, the compute host will attempt to inject network
config into the guest. It attempts to modify /etc/network/interfaces and
@@ -1492,6 +1493,37 @@ class FlatManager(NetworkManager):
# we major version the network_rpcapi to 2.0.
return []
+ @network_api.wrap_check_policy
+ def allocate_floating_ip(self, context, project_id, pool):
+ """Gets a floating ip from the pool."""
+ return None
+
+ @network_api.wrap_check_policy
+ def deallocate_floating_ip(self, context, address,
+ affect_auto_assigned):
+ """Returns a floating ip to the pool."""
+ return None
+
+ @network_api.wrap_check_policy
+ def associate_floating_ip(self, context, floating_address, fixed_address,
+ affect_auto_assigned=False):
+ """Associates a floating ip with a fixed ip.
+
+ Makes sure everything makes sense then calls _associate_floating_ip,
+ rpc'ing to correct host if i'm not it.
+ """
+ return None
+
+ @network_api.wrap_check_policy
+ def disassociate_floating_ip(self, context, address,
+ affect_auto_assigned=False):
+ """Disassociates a floating ip from its fixed ip.
+
+ Makes sure everything makes sense then calls _disassociate_floating_ip,
+ rpc'ing to correct host if i'm not it.
+ """
+ return None
+
def migrate_instance_start(self, context, instance_uuid,
floating_addresses,
rxtx_factor=None, project_id=None,
@@ -1680,13 +1712,13 @@ class VlanManager(RPCAllocateFixedIP, floating_ips.FloatingIP, NetworkManager):
def _get_network_by_id(self, context, network_id):
# NOTE(vish): Don't allow access to networks with project_id=None as
- # these are networksa that haven't been allocated to a
+ # these are networks that haven't been allocated to a
# project yet.
return self.db.network_get(context, network_id, project_only=True)
def _get_networks_by_uuids(self, context, network_uuids):
# NOTE(vish): Don't allow access to networks with project_id=None as
- # these are networksa that haven't been allocated to a
+ # these are networks that haven't been allocated to a
# project yet.
return self.db.network_get_all_by_uuids(context, network_uuids,
project_only=True)
diff --git a/nova/network/minidns.py b/nova/network/minidns.py
index dc9dc6f17..b4b5257ea 100644
--- a/nova/network/minidns.py
+++ b/nova/network/minidns.py
@@ -16,12 +16,12 @@ import os
import shutil
import tempfile
+from oslo.config import cfg
+
from nova import exception
from nova.network import dns_driver
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
-
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
diff --git a/nova/network/quantumv2/__init__.py b/nova/network/quantumv2/__init__.py
index 1b7381e8e..5d3cf5770 100644
--- a/nova/network/quantumv2/__init__.py
+++ b/nova/network/quantumv2/__init__.py
@@ -15,12 +15,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-from nova.openstack.common import cfg
-from nova.openstack.common import excutils
-from nova.openstack.common import log as logging
+from oslo.config import cfg
from quantumclient import client
from quantumclient.v2_0 import client as clientv20
+from nova.openstack.common import excutils
+from nova.openstack.common import log as logging
+
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
diff --git a/nova/network/quantumv2/api.py b/nova/network/quantumv2/api.py
index ee4ceb9cd..a177c1ac0 100644
--- a/nova/network/quantumv2/api.py
+++ b/nova/network/quantumv2/api.py
@@ -16,18 +16,22 @@
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
+import time
+
+from oslo.config import cfg
+
from nova import conductor
+from nova import context
from nova.db import base
from nova import exception
from nova.network import api as network_api
from nova.network import model as network_model
from nova.network import quantumv2
-from nova.openstack.common import cfg
+from nova.network.security_group import openstack_driver
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
-
quantum_opts = [
cfg.StrOpt('quantum_url',
default='http://127.0.0.1:9696',
@@ -59,6 +63,10 @@ quantum_opts = [
cfg.StrOpt('quantum_ovs_bridge',
default='br-int',
help='Name of Integration Bridge used by Open vSwitch'),
+ cfg.IntOpt('quantum_extension_sync_interval',
+ default=600,
+ help='Number of seconds before querying quantum for'
+ ' extensions'),
]
CONF = cfg.CONF
@@ -76,6 +84,12 @@ class API(base.Base):
"""API for interacting with the quantum 2.x API."""
conductor_api = conductor.API()
+ security_group_api = openstack_driver.get_openstack_security_group_driver()
+
+ def __init__(self):
+ super(API, self).__init__()
+ self.last_quantum_extension_sync = None
+ self.extensions = {}
def setup_networks_on_host(self, context, instance, host=None,
teardown=False):
@@ -110,7 +124,7 @@ class API(base.Base):
return nets
def allocate_for_instance(self, context, instance, **kwargs):
- """Allocate all network resources for the instance.
+ """Allocate network resources for the instance.
TODO(someone): document the rest of these parameters.
@@ -163,9 +177,55 @@ class API(base.Base):
nets = self._get_available_networks(context, instance['project_id'],
net_ids)
+ security_groups = kwargs.get('security_groups', [])
+ security_group_ids = []
+
+ # TODO(arosen) Should optimize more to do direct query for security
+ # group if len(security_groups) == 1
+ if len(security_groups):
+ search_opts = {'tenant_id': instance['project_id']}
+ user_security_groups = quantum.list_security_groups(
+ **search_opts).get('security_groups')
+
+ for security_group in security_groups:
+ name_match = None
+ uuid_match = None
+ for user_security_group in user_security_groups:
+ if user_security_group['name'] == security_group:
+ if name_match:
+ msg = (_("Multiple security groups found matching"
+ " '%s'. Use an ID to be more specific."),
+ security_group)
+ raise exception.NoUniqueMatch(msg)
+ name_match = user_security_group['id']
+ if user_security_group['id'] == security_group:
+ uuid_match = user_security_group['id']
+
+ # If a user names the security group the same as
+ # another's security groups uuid, the name takes priority.
+ if not name_match and not uuid_match:
+ raise exception.SecurityGroupNotFound(
+ security_group_id=security_group)
+ security_group_ids.append(name_match)
+ elif name_match:
+ security_group_ids.append(name_match)
+ elif uuid_match:
+ security_group_ids.append(uuid_match)
+
touched_port_ids = []
created_port_ids = []
for network in nets:
+ # If security groups are requested on an instance then the
+ # network must has a subnet associated with it. Some plugins
+ # implement the port-security extension which requires
+ # 'port_security_enabled' to be True for security groups.
+ # That is why True is returned if 'port_security_enabled'
+ # is not found.
+ if (security_groups and not (
+ network['subnets']
+ and network.get('port_security_enabled', True))):
+
+ raise exception.SecurityGroupCannotBeApplied()
network_id = network['id']
zone = 'compute:%s' % instance['availability_zone']
port_req_body = {'port': {'device_id': instance['uuid'],
@@ -182,12 +242,18 @@ class API(base.Base):
port_req_body['port']['network_id'] = network_id
port_req_body['port']['admin_state_up'] = True
port_req_body['port']['tenant_id'] = instance['project_id']
+ if security_group_ids:
+ port_req_body['port']['security_groups'] = (
+ security_group_ids)
if available_macs is not None:
if not available_macs:
raise exception.PortNotFree(
instance=instance['display_name'])
mac_address = available_macs.pop()
port_req_body['port']['mac_address'] = mac_address
+
+ self._populate_quantum_extension_values(instance,
+ port_req_body)
created_port_ids.append(
quantum.create_port(port_req_body)['port']['id'])
except Exception:
@@ -214,6 +280,23 @@ class API(base.Base):
return self.get_instance_nw_info(context, instance, networks=nets,
conductor_api=kwargs.get('conductor_api'))
+ def _refresh_quantum_extensions_cache(self):
+ if (not self.last_quantum_extension_sync or
+ ((time.time() - self.last_quantum_extension_sync)
+ >= CONF.quantum_extension_sync_interval)):
+ quantum = quantumv2.get_client(context.get_admin_context())
+ extensions_list = quantum.list_extensions()['extensions']
+ self.last_quantum_extension_sync = time.time()
+ self.extensions.clear()
+ self.extensions = dict((ext['name'], ext)
+ for ext in extensions_list)
+
+ def _populate_quantum_extension_values(self, instance, port_req_body):
+ self._refresh_quantum_extensions_cache()
+ if 'nvp-qos' in self.extensions:
+ rxtx_factor = instance['instance_type'].get('rxtx_factor')
+ port_req_body['port']['rxtx_factor'] = rxtx_factor
+
def deallocate_for_instance(self, context, instance, **kwargs):
"""Deallocate all network resources related to the instance."""
LOG.debug(_('deallocate_for_instance() for %s'),
@@ -230,6 +313,33 @@ class API(base.Base):
self.trigger_security_group_members_refresh(context, instance)
self.trigger_instance_remove_security_group_refresh(context, instance)
+ def allocate_port_for_instance(self, context, instance, port_id,
+ network_id=None, requested_ip=None,
+ conductor_api=None):
+ return self.allocate_for_instance(context, instance,
+ requested_networks=[(network_id, requested_ip, port_id)],
+ conductor_api=conductor_api)
+
+ def deallocate_port_for_instance(self, context, instance, port_id,
+ conductor_api=None):
+ try:
+ quantumv2.get_client(context).delete_port(port_id)
+ except Exception as ex:
+ LOG.exception(_("Failed to delete quantum port %(port_id)s ") %
+ locals())
+
+ self.trigger_security_group_members_refresh(context, instance)
+ self.trigger_instance_remove_security_group_refresh(context, instance)
+
+ return self.get_instance_nw_info(context, instance,
+ conductor_api=conductor_api)
+
+ def list_ports(self, context, **search_opts):
+ return quantumv2.get_client(context).list_ports(**search_opts)
+
+ def show_port(self, context, port_id):
+ return quantumv2.get_client(context).show_port(port_id)
+
def get_instance_nw_info(self, context, instance, networks=None,
conductor_api=None):
result = self._get_instance_nw_info(context, instance, networks)
@@ -425,11 +535,16 @@ class API(base.Base):
def get_all(self, context):
client = quantumv2.get_client(context)
- return client.list_networks()
+ networks = client.list_networks().get('networks') or {}
+ for network in networks:
+ network['label'] = network['name']
+ return networks
def get(self, context, network_uuid):
client = quantumv2.get_client(context)
- return client.show_network(network_uuid)
+ network = client.show_network(network_uuid).get('network') or {}
+ network['label'] = network['name']
+ return network
def delete(self, context, network_uuid):
raise NotImplementedError()
@@ -640,7 +755,7 @@ class API(base.Base):
data = quantumv2.get_client(context,
admin=True).list_ports(**search_opts)
ports = data.get('ports', [])
- if not networks:
+ if networks is None:
networks = self._get_available_networks(context,
instance['project_id'])
else:
diff --git a/nova/network/rpcapi.py b/nova/network/rpcapi.py
index ed8b775fd..d0d6b5f99 100644
--- a/nova/network/rpcapi.py
+++ b/nova/network/rpcapi.py
@@ -18,7 +18,8 @@
Client side of the network RPC API.
"""
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common import jsonutils
from nova.openstack.common import rpc
from nova.openstack.common.rpc import proxy as rpc_proxy
diff --git a/nova/network/security_group/__init__.py b/nova/network/security_group/__init__.py
new file mode 100644
index 000000000..5f67d7881
--- /dev/null
+++ b/nova/network/security_group/__init__.py
@@ -0,0 +1,18 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 Nicira, Inc.
+# All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# @author: Aaron Rosen, Nicira Networks, Inc.
diff --git a/nova/network/security_group/openstack_driver.py b/nova/network/security_group/openstack_driver.py
new file mode 100644
index 000000000..46f3f3491
--- /dev/null
+++ b/nova/network/security_group/openstack_driver.py
@@ -0,0 +1,59 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 Nicira, Inc.
+# All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# @author: Aaron Rosen, Nicira Networks, Inc.
+
+from oslo.config import cfg
+
+from nova.openstack.common import importutils
+
+security_group_opts = [
+ cfg.StrOpt('security_group_api',
+ default='nova',
+ help='The full class name of the security API class'),
+ cfg.StrOpt('security_group_handler',
+ default='nova.network.sg.NullSecurityGroupHandler',
+ help='The full class name of the security group handler class'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(security_group_opts)
+
+NOVA_DRIVER = ('nova.api.openstack.compute.contrib.security_groups.'
+ 'NativeNovaSecurityGroupAPI')
+QUANTUM_DRIVER = ('nova.api.openstack.compute.contrib.security_groups.'
+ 'NativeQuantumSecurityGroupAPI')
+
+
+def get_openstack_security_group_driver():
+ if CONF.security_group_api.lower() == 'nova':
+ return importutils.import_object(NOVA_DRIVER)
+ elif CONF.security_group_api.lower() == 'quantum':
+ return importutils.import_object(QUANTUM_DRIVER)
+ else:
+ return importutils.import_object(CONF.security_group_api)
+
+
+def get_security_group_handler():
+ return importutils.import_object(CONF.security_group_handler)
+
+
+def is_quantum_security_groups():
+ if CONF.security_group_api.lower() == "quantum":
+ return True
+ else:
+ return False
diff --git a/nova/network/security_group/quantum_driver.py b/nova/network/security_group/quantum_driver.py
new file mode 100644
index 000000000..918c839e9
--- /dev/null
+++ b/nova/network/security_group/quantum_driver.py
@@ -0,0 +1,398 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 Nicira, Inc.
+# All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# @author: Aaron Rosen, Nicira Networks, Inc.
+
+from oslo.config import cfg
+from quantumclient.common import exceptions as q_exc
+from quantumclient.quantum import v2_0 as quantumv20
+from webob import exc
+
+from nova.compute import api as compute_api
+from nova import context
+from nova import exception
+from nova.network import quantumv2
+from nova.network.security_group import security_group_base
+from nova.openstack.common import log as logging
+from nova.openstack.common import uuidutils
+
+
+from nova import utils
+
+
+wrap_check_security_groups_policy = compute_api.policy_decorator(
+ scope='compute:security_groups')
+
+CONF = cfg.CONF
+LOG = logging.getLogger(__name__)
+
+
+class SecurityGroupAPI(security_group_base.SecurityGroupBase):
+
+ id_is_uuid = True
+
+ def create_security_group(self, context, name, description):
+ quantum = quantumv2.get_client(context)
+ body = self._make_quantum_security_group_dict(name, description)
+ try:
+ security_group = quantum.create_security_group(
+ body).get('security_group')
+ except q_exc.QuantumClientException as e:
+ LOG.exception(_("Quantum Error creating security group %s"),
+ name)
+ if e.status_code == 401:
+ # TODO(arosen) Cannot raise generic response from quantum here
+ # as this error code could be related to bad input or over
+ # quota
+ raise exc.HTTPBadRequest()
+ raise e
+ return self._convert_to_nova_security_group_format(security_group)
+
+ def _convert_to_nova_security_group_format(self, security_group):
+ nova_group = {}
+ nova_group['id'] = security_group['id']
+ nova_group['description'] = security_group['description']
+ nova_group['name'] = security_group['name']
+ nova_group['project_id'] = security_group['tenant_id']
+ nova_group['rules'] = []
+ for rule in security_group.get('security_group_rules', []):
+ if rule['direction'] == 'ingress':
+ nova_group['rules'].append(
+ self._convert_to_nova_security_group_rule_format(rule))
+
+ return nova_group
+
+ def _convert_to_nova_security_group_rule_format(self, rule):
+ nova_rule = {}
+ nova_rule['id'] = rule['id']
+ nova_rule['parent_group_id'] = rule['security_group_id']
+ nova_rule['protocol'] = rule['protocol']
+ if rule['port_range_min'] is None:
+ nova_rule['from_port'] = -1
+ else:
+ nova_rule['from_port'] = rule['port_range_min']
+
+ if rule['port_range_max'] is None:
+ nova_rule['to_port'] = -1
+ else:
+ nova_rule['to_port'] = rule['port_range_max']
+ nova_rule['group_id'] = rule['source_group_id']
+ nova_rule['cidr'] = rule['source_ip_prefix']
+ return nova_rule
+
+ def get(self, context, name=None, id=None, map_exception=False):
+ quantum = quantumv2.get_client(context)
+ try:
+ group = quantum.show_security_group(id).get('security_group')
+ except q_exc.QuantumClientException as e:
+ if e.status_code == 404:
+ LOG.exception(_("Quantum Error getting security group %s"),
+ name)
+ self.raise_not_found(e.message)
+ else:
+ LOG.error(_("Quantum Error: %s"), e)
+ raise e
+
+ return self._convert_to_nova_security_group_format(group)
+
+ def list(self, context, names=None, ids=None, project=None,
+ search_opts=None):
+ """Returns list of security group rules owned by tenant."""
+ quantum = quantumv2.get_client(context)
+ try:
+ security_groups = quantum.list_security_groups().get(
+ 'security_groups')
+ except q_exc.QuantumClientException as e:
+ LOG.exception(_("Quantum Error getting security groups"))
+ raise e
+ converted_rules = []
+ for security_group in security_groups:
+ converted_rules.append(
+ self._convert_to_nova_security_group_format(security_group))
+ return converted_rules
+
+ def validate_id(self, id):
+ if not uuidutils.is_uuid_like(id):
+ msg = _("Security group id should be uuid")
+ self.raise_invalid_property(msg)
+ return id
+
+ def destroy(self, context, security_group):
+ """This function deletes a security group."""
+
+ quantum = quantumv2.get_client(context)
+ try:
+ quantum.delete_security_group(security_group['id'])
+ except q_exc.QuantumClientException as e:
+ if e.status_code == 404:
+ self.raise_not_found(e.message)
+ elif e.status_code == 409:
+ self.raise_invalid_property(e.message)
+ else:
+ LOG.error(_("Quantum Error: %s"), e)
+ raise e
+
+ def add_rules(self, context, id, name, vals):
+ """Add security group rule(s) to security group.
+
+ Note: the Nova security group API doesn't support adding muliple
+ security group rules at once but the EC2 one does. Therefore,
+ this function is writen to support both. Multiple rules are
+ installed to a security group in quantum using bulk support."""
+
+ quantum = quantumv2.get_client(context)
+ body = self._make_quantum_security_group_rules_list(vals)
+ try:
+ rules = quantum.create_security_group_rule(
+ body).get('security_group_rules')
+ except q_exc.QuantumClientException as e:
+ if e.status_code == 409:
+ LOG.exception(_("Quantum Error getting security group %s"),
+ name)
+ self.raise_not_found(e.message)
+ else:
+ LOG.exception(_("Quantum Error:"))
+ raise e
+ converted_rules = []
+ for rule in rules:
+ converted_rules.append(
+ self._convert_to_nova_security_group_rule_format(rule))
+ return converted_rules
+
+ def _make_quantum_security_group_dict(self, name, description):
+ return {'security_group': {'name': name,
+ 'description': description}}
+
+ def _make_quantum_security_group_rules_list(self, rules):
+ new_rules = []
+ for rule in rules:
+ new_rule = {}
+ # nova only supports ingress rules so all rules are ingress.
+ new_rule['direction'] = "ingress"
+ new_rule['protocol'] = rule.get('protocol')
+
+ # FIXME(arosen) Nova does not expose ethertype on security group
+ # rules. Therefore, in the case of self referential rules we
+ # should probably assume they want to allow both IPv4 and IPv6.
+ # Unfortunately, this would require adding two rules in quantum.
+ # The reason we do not do this is because when the user using the
+ # nova api wants to remove the rule we'd have to have some way to
+ # know that we should delete both of these rules in quantum.
+ # For now, self referential rules only support IPv4.
+ if not rule.get('cidr'):
+ new_rule['ethertype'] = 'IPv4'
+ else:
+ new_rule['ethertype'] = utils.get_ip_version(rule.get('cidr'))
+ new_rule['source_ip_prefix'] = rule.get('cidr')
+ new_rule['security_group_id'] = rule.get('parent_group_id')
+ new_rule['source_group_id'] = rule.get('group_id')
+ if rule['from_port'] != -1:
+ new_rule['port_range_min'] = rule['from_port']
+ if rule['to_port'] != -1:
+ new_rule['port_range_max'] = rule['to_port']
+ new_rules.append(new_rule)
+ return {'security_group_rules': new_rules}
+
+ def create_security_group_rule(self, context, security_group, new_rule):
+ return self.add_rules(context, new_rule['parent_group_id'],
+ security_group['name'], [new_rule])[0]
+
+ def remove_rules(self, context, security_group, rule_ids):
+ quantum = quantumv2.get_client(context)
+ rule_ids = set(rule_ids)
+ try:
+ # The ec2 api allows one to delete multiple security group rules
+ # at once. Since there is no bulk delete for quantum the best
+ # thing we can do is delete the rules one by one and hope this
+ # works.... :/
+ for rule_id in range(0, len(rule_ids)):
+ quantum.delete_security_group_rule(rule_ids.pop())
+ except q_exc.QuantumClientException as e:
+ LOG.exception(_("Quantum Error unable to delete %s"),
+ rule_ids)
+ raise e
+
+ def get_rule(self, context, id):
+ quantum = quantumv2.get_client(context)
+ try:
+ rule = quantum.show_security_group_rule(
+ id).get('security_group_rule')
+ except q_exc.QuantumClientException as e:
+ if e.status_code == 404:
+ LOG.exception(_("Quantum Error getting security group rule "
+ "%s.") % id)
+ self.raise_not_found(e.message)
+ else:
+ LOG.error(_("Quantum Error: %s"), e)
+ raise e
+ return self._convert_to_nova_security_group_rule_format(rule)
+
+ def get_instance_security_groups(self, req, instance_id):
+ dict_security_groups = {}
+ security_group_name_map = {}
+ admin_context = context.get_admin_context()
+
+ quantum = quantumv2.get_client(admin_context)
+ params = {'device_id': instance_id}
+ ports = quantum.list_ports(**params)
+ security_groups = quantum.list_security_groups().get('security_groups')
+
+ for security_group in security_groups:
+ name = security_group.get('name')
+ # Since the name is optional for quantum security groups
+ if not name:
+ name = security_group['id']
+ security_group_name_map[security_group['id']] = name
+
+ for port in ports['ports']:
+ for security_group in port.get('security_groups', []):
+ try:
+ dict_security_groups[security_group] = (
+ security_group_name_map[security_group])
+ except KeyError:
+ # If this should only happen due to a race condition
+ # if the security group on a port was deleted after the
+ # ports were returned. We pass since this security group
+ # is no longer on the port.
+ pass
+ ret = []
+ for security_group in dict_security_groups.values():
+ ret.append({'name': security_group})
+ return ret
+
+ def _has_security_group_requirements(self, port):
+ port_security_enabled = port.get('port_security_enabled')
+ has_ip = port.get('fixed_ips')
+ if port_security_enabled and has_ip:
+ return True
+ else:
+ return False
+
+ @wrap_check_security_groups_policy
+ def add_to_instance(self, context, instance, security_group_name):
+ """Add security group to the instance."""
+
+ quantum = quantumv2.get_client(context)
+ try:
+ security_group_id = quantumv20.find_resourceid_by_name_or_id(
+ quantum, 'security_group', security_group_name)
+ except q_exc.QuantumClientException as e:
+ if e.status_code == 404:
+ msg = ("Security group %s is not found for project %s" %
+ (security_group_name, context.project_id))
+ self.raise_not_found(msg)
+ else:
+ LOG.exception(_("Quantum Error:"))
+ raise e
+ params = {'device_id': instance['uuid']}
+ try:
+ ports = quantum.list_ports(**params).get('ports')
+ except q_exc.QuantumClientException as e:
+ LOG.exception(_("Quantum Error:"))
+ raise e
+
+ if not ports:
+ msg = ("instance_id %s could not be found as device id on"
+ " any ports" % instance['uuid'])
+ self.raise_not_found(msg)
+
+ for port in ports:
+ if not self._has_security_group_requirements(port):
+ LOG.warn(_("Cannot add security group %(name)s to %(instance)s"
+ " since the port %(port_id)s does not meet security"
+ " requirements"), {'name': security_group_name,
+ 'instance': instance['uuid'], 'port_id': port['id']})
+ raise exception.SecurityGroupCannotBeApplied()
+ if 'security_groups' not in port:
+ port['security_groups'] = []
+ port['security_groups'].append(security_group_id)
+ updated_port = {'security_groups': port['security_groups']}
+ try:
+ LOG.info(_("Adding security group %(security_group_id)s to "
+ "port %(port_id)s"),
+ {'security_group_id': security_group_id,
+ 'port_id': port['id']})
+ quantum.update_port(port['id'], {'port': updated_port})
+ except Exception:
+ LOG.exception(_("Quantum Error:"))
+ raise
+
+ @wrap_check_security_groups_policy
+ def remove_from_instance(self, context, instance, security_group_name):
+ """Remove the security group associated with the instance."""
+ quantum = quantumv2.get_client(context)
+ try:
+ security_group_id = quantumv20.find_resourceid_by_name_or_id(
+ quantum, 'security_group', security_group_name)
+ except q_exc.QuantumClientException as e:
+ if e.status_code == 404:
+ msg = ("Security group %s is not found for project %s" %
+ (security_group_name, context.project_id))
+ self.raise_not_found(msg)
+ else:
+ LOG.exception(_("Quantum Error:"))
+ raise e
+ params = {'device_id': instance['uuid']}
+ try:
+ ports = quantum.list_ports(**params).get('ports')
+ except q_exc.QuantumClientException as e:
+ LOG.exception(_("Quantum Error:"))
+ raise e
+
+ if not ports:
+ msg = ("instance_id %s could not be found as device id on"
+ " any ports" % instance['uuid'])
+ self.raise_not_found(msg)
+
+ found_security_group = False
+ for port in ports:
+ try:
+ port.get('security_groups', []).remove(security_group_id)
+ except ValueError:
+ # When removing a security group from an instance the security
+ # group should be on both ports since it was added this way if
+ # done through the nova api. In case it is not a 404 is only
+ # raised if the security group is not found on any of the
+ # ports on the instance.
+ continue
+
+ updated_port = {'security_groups': port['security_groups']}
+ try:
+ LOG.info(_("Adding security group %(security_group_id)s to "
+ "port %(port_id)s"),
+ {'security_group_id': security_group_id,
+ 'port_id': port['id']})
+ quantum.update_port(port['id'], {'port': updated_port})
+ found_security_group = True
+ except Exception:
+ LOG.exception(_("Quantum Error:"))
+ raise e
+ if not found_security_group:
+ msg = (_("Security group %(security_group_name)s not assocaited "
+ "with the instance %(instance)s"),
+ {'security_group_name': security_group_name,
+ 'instance': instance['uuid']})
+ self.raise_not_found(msg)
+
+ def rule_exists(self, security_group, new_rule):
+ # Handled by quantum
+ pass
+
+ def populate_security_groups(self, instance, security_groups):
+ # Setting to emply list since we do not want to populate this field
+ # in the nova database if using the quantum driver
+ instance['security_groups'] = []
diff --git a/nova/network/security_group/security_group_base.py b/nova/network/security_group/security_group_base.py
new file mode 100644
index 000000000..499f808b1
--- /dev/null
+++ b/nova/network/security_group/security_group_base.py
@@ -0,0 +1,196 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Piston Cloud Computing, Inc.
+# Copyright 2012 Red Hat, Inc.
+# Copyright 2013 Nicira, Inc.
+# All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# @author: Aaron Rosen, Nicira Networks, Inc.
+
+import urllib
+
+from oslo.config import cfg
+
+from nova import exception
+from nova import utils
+
+CONF = cfg.CONF
+
+
+class SecurityGroupBase(object):
+
+ def parse_cidr(self, cidr):
+ if cidr:
+ try:
+ cidr = urllib.unquote(cidr).decode()
+ except Exception as e:
+ self.raise_invalid_cidr(cidr, e)
+
+ if not utils.is_valid_cidr(cidr):
+ self.raise_invalid_cidr(cidr)
+
+ return cidr
+ else:
+ return '0.0.0.0/0'
+
+ @staticmethod
+ def new_group_ingress_rule(grantee_group_id, protocol, from_port,
+ to_port):
+ return SecurityGroupBase._new_ingress_rule(
+ protocol, from_port, to_port, group_id=grantee_group_id)
+
+ @staticmethod
+ def new_cidr_ingress_rule(grantee_cidr, protocol, from_port, to_port):
+ return SecurityGroupBase._new_ingress_rule(
+ protocol, from_port, to_port, cidr=grantee_cidr)
+
+ @staticmethod
+ def _new_ingress_rule(ip_protocol, from_port, to_port,
+ group_id=None, cidr=None):
+ values = {}
+
+ if group_id:
+ values['group_id'] = group_id
+ # Open everything if an explicit port range or type/code are not
+ # specified, but only if a source group was specified.
+ ip_proto_upper = ip_protocol.upper() if ip_protocol else ''
+ if (ip_proto_upper == 'ICMP' and
+ from_port is None and to_port is None):
+ from_port = -1
+ to_port = -1
+ elif (ip_proto_upper in ['TCP', 'UDP'] and from_port is None
+ and to_port is None):
+ from_port = 1
+ to_port = 65535
+
+ elif cidr:
+ values['cidr'] = cidr
+
+ if ip_protocol and from_port is not None and to_port is not None:
+
+ ip_protocol = str(ip_protocol)
+ try:
+ # Verify integer conversions
+ from_port = int(from_port)
+ to_port = int(to_port)
+ except ValueError:
+ if ip_protocol.upper() == 'ICMP':
+ raise exception.InvalidInput(reason="Type and"
+ " Code must be integers for ICMP protocol type")
+ else:
+ raise exception.InvalidInput(reason="To and From ports "
+ "must be integers")
+
+ if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']:
+ raise exception.InvalidIpProtocol(protocol=ip_protocol)
+
+ # Verify that from_port must always be less than
+ # or equal to to_port
+ if (ip_protocol.upper() in ['TCP', 'UDP'] and
+ (from_port > to_port)):
+ raise exception.InvalidPortRange(from_port=from_port,
+ to_port=to_port, msg="Former value cannot"
+ " be greater than the later")
+
+ # Verify valid TCP, UDP port ranges
+ if (ip_protocol.upper() in ['TCP', 'UDP'] and
+ (from_port < 1 or to_port > 65535)):
+ raise exception.InvalidPortRange(from_port=from_port,
+ to_port=to_port, msg="Valid TCP ports should"
+ " be between 1-65535")
+
+ # Verify ICMP type and code
+ if (ip_protocol.upper() == "ICMP" and
+ (from_port < -1 or from_port > 255 or
+ to_port < -1 or to_port > 255)):
+ raise exception.InvalidPortRange(from_port=from_port,
+ to_port=to_port, msg="For ICMP, the"
+ " type:code must be valid")
+
+ values['protocol'] = ip_protocol
+ values['from_port'] = from_port
+ values['to_port'] = to_port
+
+ else:
+ # If cidr based filtering, protocol and ports are mandatory
+ if cidr:
+ return None
+
+ return values
+
+ def validate_property(self, value, property, allowed):
+ pass
+
+ def ensure_default(self, context):
+ pass
+
+ def trigger_handler(self, event, *args):
+ pass
+
+ def trigger_rules_refresh(self, context, id):
+ """Called when a rule is added to or removed from a security_group."""
+ pass
+
+ def trigger_members_refresh(self, context, group_ids):
+ """Called when a security group gains a new or loses a member.
+
+ Sends an update request to each compute node for each instance for
+ which this is relevant.
+ """
+ pass
+
+ def populate_security_groups(self, instance, security_groups):
+ """Called when populating the database for an instances
+ security groups."""
+ raise NotImplementedError()
+
+ def create_security_group(self, context, name, description):
+ raise NotImplementedError()
+
+ def get(self, context, name=None, id=None, map_exception=False):
+ raise NotImplementedError()
+
+ def list(self, context, names=None, ids=None, project=None,
+ search_opts=None):
+ raise NotImplementedError()
+
+ def destroy(self, context, security_group):
+ raise NotImplementedError()
+
+ def add_rules(self, context, id, name, vals):
+ raise NotImplementedError()
+
+ def create_security_group_rule(self, context, security_group, new_rule):
+ raise NotImplementedError()
+
+ def remove_rules(self, context, security_group, rule_ids):
+ raise NotImplementedError()
+
+ def get_rule(self, context, id):
+ raise NotImplementedError()
+
+ def get_instance_security_groups(self, req, instance_id):
+ raise NotImplementedError()
+
+ def add_to_instance(self, context, instance, security_group_name):
+ raise NotImplementedError()
+
+ def remove_from_instance(self, context, instance, security_group_name):
+ raise NotImplementedError()
+
+ def rule_exists(self, security_group, new_rule):
+ raise NotImplementedError()
diff --git a/nova/notifications.py b/nova/notifications.py
index f40fff7f2..79d43126a 100644
--- a/nova/notifications.py
+++ b/nova/notifications.py
@@ -19,12 +19,13 @@
the system.
"""
+from oslo.config import cfg
+
import nova.context
from nova import db
from nova.image import glance
from nova import network
from nova.network import model as network_model
-from nova.openstack.common import cfg
from nova.openstack.common import log
from nova.openstack.common.notifier import api as notifier_api
from nova.openstack.common import timeutils
diff --git a/nova/objectstore/s3server.py b/nova/objectstore/s3server.py
index 6a6d6bf32..3ff5fa003 100644
--- a/nova/objectstore/s3server.py
+++ b/nova/objectstore/s3server.py
@@ -41,10 +41,10 @@ import os
import os.path
import urllib
+from oslo.config import cfg
import routes
import webob
-from nova.openstack.common import cfg
from nova.openstack.common import fileutils
from nova import paths
from nova import utils
diff --git a/nova/openstack/common/cfg.py b/nova/openstack/common/cfg.py
index baecc0c3b..c35dcb845 100644
--- a/nova/openstack/common/cfg.py
+++ b/nova/openstack/common/cfg.py
@@ -1,6 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright 2012 Red Hat, Inc.
+# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -14,1736 +14,4 @@
# License for the specific language governing permissions and limitations
# under the License.
-r"""
-Configuration options which may be set on the command line or in config files.
-
-The schema for each option is defined using the Opt sub-classes, e.g.:
-
-::
-
- common_opts = [
- cfg.StrOpt('bind_host',
- default='0.0.0.0',
- help='IP address to listen on'),
- cfg.IntOpt('bind_port',
- default=9292,
- help='Port number to listen on')
- ]
-
-Options can be strings, integers, floats, booleans, lists or 'multi strings'::
-
- enabled_apis_opt = cfg.ListOpt('enabled_apis',
- default=['ec2', 'osapi_compute'],
- help='List of APIs to enable by default')
-
- DEFAULT_EXTENSIONS = [
- 'nova.api.openstack.compute.contrib.standard_extensions'
- ]
- osapi_compute_extension_opt = cfg.MultiStrOpt('osapi_compute_extension',
- default=DEFAULT_EXTENSIONS)
-
-Option schemas are registered with the config manager at runtime, but before
-the option is referenced::
-
- class ExtensionManager(object):
-
- enabled_apis_opt = cfg.ListOpt(...)
-
- def __init__(self, conf):
- self.conf = conf
- self.conf.register_opt(enabled_apis_opt)
- ...
-
- def _load_extensions(self):
- for ext_factory in self.conf.osapi_compute_extension:
- ....
-
-A common usage pattern is for each option schema to be defined in the module or
-class which uses the option::
-
- opts = ...
-
- def add_common_opts(conf):
- conf.register_opts(opts)
-
- def get_bind_host(conf):
- return conf.bind_host
-
- def get_bind_port(conf):
- return conf.bind_port
-
-An option may optionally be made available via the command line. Such options
-must registered with the config manager before the command line is parsed (for
-the purposes of --help and CLI arg validation)::
-
- cli_opts = [
- cfg.BoolOpt('verbose',
- short='v',
- default=False,
- help='Print more verbose output'),
- cfg.BoolOpt('debug',
- short='d',
- default=False,
- help='Print debugging output'),
- ]
-
- def add_common_opts(conf):
- conf.register_cli_opts(cli_opts)
-
-The config manager has two CLI options defined by default, --config-file
-and --config-dir::
-
- class ConfigOpts(object):
-
- def __call__(self, ...):
-
- opts = [
- MultiStrOpt('config-file',
- ...),
- StrOpt('config-dir',
- ...),
- ]
-
- self.register_cli_opts(opts)
-
-Option values are parsed from any supplied config files using
-openstack.common.iniparser. If none are specified, a default set is used
-e.g. glance-api.conf and glance-common.conf::
-
- glance-api.conf:
- [DEFAULT]
- bind_port = 9292
-
- glance-common.conf:
- [DEFAULT]
- bind_host = 0.0.0.0
-
-Option values in config files override those on the command line. Config files
-are parsed in order, with values in later files overriding those in earlier
-files.
-
-The parsing of CLI args and config files is initiated by invoking the config
-manager e.g.::
-
- conf = ConfigOpts()
- conf.register_opt(BoolOpt('verbose', ...))
- conf(sys.argv[1:])
- if conf.verbose:
- ...
-
-Options can be registered as belonging to a group::
-
- rabbit_group = cfg.OptGroup(name='rabbit',
- title='RabbitMQ options')
-
- rabbit_host_opt = cfg.StrOpt('host',
- default='localhost',
- help='IP/hostname to listen on'),
- rabbit_port_opt = cfg.IntOpt('port',
- default=5672,
- help='Port number to listen on')
-
- def register_rabbit_opts(conf):
- conf.register_group(rabbit_group)
- # options can be registered under a group in either of these ways:
- conf.register_opt(rabbit_host_opt, group=rabbit_group)
- conf.register_opt(rabbit_port_opt, group='rabbit')
-
-If it no group attributes are required other than the group name, the group
-need not be explicitly registered e.g.
-
- def register_rabbit_opts(conf):
- # The group will automatically be created, equivalent calling::
- # conf.register_group(OptGroup(name='rabbit'))
- conf.register_opt(rabbit_port_opt, group='rabbit')
-
-If no group is specified, options belong to the 'DEFAULT' section of config
-files::
-
- glance-api.conf:
- [DEFAULT]
- bind_port = 9292
- ...
-
- [rabbit]
- host = localhost
- port = 5672
- use_ssl = False
- userid = guest
- password = guest
- virtual_host = /
-
-Command-line options in a group are automatically prefixed with the
-group name::
-
- --rabbit-host localhost --rabbit-port 9999
-
-Option values in the default group are referenced as attributes/properties on
-the config manager; groups are also attributes on the config manager, with
-attributes for each of the options associated with the group::
-
- server.start(app, conf.bind_port, conf.bind_host, conf)
-
- self.connection = kombu.connection.BrokerConnection(
- hostname=conf.rabbit.host,
- port=conf.rabbit.port,
- ...)
-
-Option values may reference other values using PEP 292 string substitution::
-
- opts = [
- cfg.StrOpt('state_path',
- default=os.path.join(os.path.dirname(__file__), '../'),
- help='Top-level directory for maintaining nova state'),
- cfg.StrOpt('sqlite_db',
- default='nova.sqlite',
- help='file name for sqlite'),
- cfg.StrOpt('sql_connection',
- default='sqlite:///$state_path/$sqlite_db',
- help='connection string for sql database'),
- ]
-
-Note that interpolation can be avoided by using '$$'.
-
-Options may be declared as required so that an error is raised if the user
-does not supply a value for the option.
-
-Options may be declared as secret so that their values are not leaked into
-log files::
-
- opts = [
- cfg.StrOpt('s3_store_access_key', secret=True),
- cfg.StrOpt('s3_store_secret_key', secret=True),
- ...
- ]
-
-This module also contains a global instance of the ConfigOpts class
-in order to support a common usage pattern in OpenStack::
-
- from nova.openstack.common import cfg
-
- opts = [
- cfg.StrOpt('bind_host', default='0.0.0.0'),
- cfg.IntOpt('bind_port', default=9292),
- ]
-
- CONF = cfg.CONF
- CONF.register_opts(opts)
-
- def start(server, app):
- server.start(app, CONF.bind_port, CONF.bind_host)
-
-Positional command line arguments are supported via a 'positional' Opt
-constructor argument::
-
- >>> conf = ConfigOpts()
- >>> conf.register_cli_opt(MultiStrOpt('bar', positional=True))
- True
- >>> conf(['a', 'b'])
- >>> conf.bar
- ['a', 'b']
-
-It is also possible to use argparse "sub-parsers" to parse additional
-command line arguments using the SubCommandOpt class:
-
- >>> def add_parsers(subparsers):
- ... list_action = subparsers.add_parser('list')
- ... list_action.add_argument('id')
- ...
- >>> conf = ConfigOpts()
- >>> conf.register_cli_opt(SubCommandOpt('action', handler=add_parsers))
- True
- >>> conf(args=['list', '10'])
- >>> conf.action.name, conf.action.id
- ('list', '10')
-
-"""
-
-import argparse
-import collections
-import copy
-import functools
-import glob
-import os
-import string
-import sys
-
-from nova.openstack.common import iniparser
-
-
-class Error(Exception):
- """Base class for cfg exceptions."""
-
- def __init__(self, msg=None):
- self.msg = msg
-
- def __str__(self):
- return self.msg
-
-
-class ArgsAlreadyParsedError(Error):
- """Raised if a CLI opt is registered after parsing."""
-
- def __str__(self):
- ret = "arguments already parsed"
- if self.msg:
- ret += ": " + self.msg
- return ret
-
-
-class NoSuchOptError(Error, AttributeError):
- """Raised if an opt which doesn't exist is referenced."""
-
- def __init__(self, opt_name, group=None):
- self.opt_name = opt_name
- self.group = group
-
- def __str__(self):
- if self.group is None:
- return "no such option: %s" % self.opt_name
- else:
- return "no such option in group %s: %s" % (self.group.name,
- self.opt_name)
-
-
-class NoSuchGroupError(Error):
- """Raised if a group which doesn't exist is referenced."""
-
- def __init__(self, group_name):
- self.group_name = group_name
-
- def __str__(self):
- return "no such group: %s" % self.group_name
-
-
-class DuplicateOptError(Error):
- """Raised if multiple opts with the same name are registered."""
-
- def __init__(self, opt_name):
- self.opt_name = opt_name
-
- def __str__(self):
- return "duplicate option: %s" % self.opt_name
-
-
-class RequiredOptError(Error):
- """Raised if an option is required but no value is supplied by the user."""
-
- def __init__(self, opt_name, group=None):
- self.opt_name = opt_name
- self.group = group
-
- def __str__(self):
- if self.group is None:
- return "value required for option: %s" % self.opt_name
- else:
- return "value required for option: %s.%s" % (self.group.name,
- self.opt_name)
-
-
-class TemplateSubstitutionError(Error):
- """Raised if an error occurs substituting a variable in an opt value."""
-
- def __str__(self):
- return "template substitution error: %s" % self.msg
-
-
-class ConfigFilesNotFoundError(Error):
- """Raised if one or more config files are not found."""
-
- def __init__(self, config_files):
- self.config_files = config_files
-
- def __str__(self):
- return ('Failed to read some config files: %s' %
- string.join(self.config_files, ','))
-
-
-class ConfigFileParseError(Error):
- """Raised if there is an error parsing a config file."""
-
- def __init__(self, config_file, msg):
- self.config_file = config_file
- self.msg = msg
-
- def __str__(self):
- return 'Failed to parse %s: %s' % (self.config_file, self.msg)
-
-
-class ConfigFileValueError(Error):
- """Raised if a config file value does not match its opt type."""
- pass
-
-
-def _fixpath(p):
- """Apply tilde expansion and absolutization to a path."""
- return os.path.abspath(os.path.expanduser(p))
-
-
-def _get_config_dirs(project=None):
- """Return a list of directors where config files may be located.
-
- :param project: an optional project name
-
- If a project is specified, following directories are returned::
-
- ~/.${project}/
- ~/
- /etc/${project}/
- /etc/
-
- Otherwise, these directories::
-
- ~/
- /etc/
- """
- cfg_dirs = [
- _fixpath(os.path.join('~', '.' + project)) if project else None,
- _fixpath('~'),
- os.path.join('/etc', project) if project else None,
- '/etc'
- ]
-
- return filter(bool, cfg_dirs)
-
-
-def _search_dirs(dirs, basename, extension=""):
- """Search a list of directories for a given filename.
-
- Iterator over the supplied directories, returning the first file
- found with the supplied name and extension.
-
- :param dirs: a list of directories
- :param basename: the filename, e.g. 'glance-api'
- :param extension: the file extension, e.g. '.conf'
- :returns: the path to a matching file, or None
- """
- for d in dirs:
- path = os.path.join(d, '%s%s' % (basename, extension))
- if os.path.exists(path):
- return path
-
-
-def find_config_files(project=None, prog=None, extension='.conf'):
- """Return a list of default configuration files.
-
- :param project: an optional project name
- :param prog: the program name, defaulting to the basename of sys.argv[0]
- :param extension: the type of the config file
-
- We default to two config files: [${project}.conf, ${prog}.conf]
-
- And we look for those config files in the following directories::
-
- ~/.${project}/
- ~/
- /etc/${project}/
- /etc/
-
- We return an absolute path for (at most) one of each the default config
- files, for the topmost directory it exists in.
-
- For example, if project=foo, prog=bar and /etc/foo/foo.conf, /etc/bar.conf
- and ~/.foo/bar.conf all exist, then we return ['/etc/foo/foo.conf',
- '~/.foo/bar.conf']
-
- If no project name is supplied, we only look for ${prog.conf}.
- """
- if prog is None:
- prog = os.path.basename(sys.argv[0])
-
- cfg_dirs = _get_config_dirs(project)
-
- config_files = []
- if project:
- config_files.append(_search_dirs(cfg_dirs, project, extension))
- config_files.append(_search_dirs(cfg_dirs, prog, extension))
-
- return filter(bool, config_files)
-
-
-def _is_opt_registered(opts, opt):
- """Check whether an opt with the same name is already registered.
-
- The same opt may be registered multiple times, with only the first
- registration having any effect. However, it is an error to attempt
- to register a different opt with the same name.
-
- :param opts: the set of opts already registered
- :param opt: the opt to be registered
- :returns: True if the opt was previously registered, False otherwise
- :raises: DuplicateOptError if a naming conflict is detected
- """
- if opt.dest in opts:
- if opts[opt.dest]['opt'] != opt:
- raise DuplicateOptError(opt.name)
- return True
- else:
- return False
-
-
-def set_defaults(opts, **kwargs):
- for opt in opts:
- if opt.dest in kwargs:
- opt.default = kwargs[opt.dest]
- break
-
-
-class Opt(object):
-
- """Base class for all configuration options.
-
- An Opt object has no public methods, but has a number of public string
- properties:
-
- name:
- the name of the option, which may include hyphens
- dest:
- the (hyphen-less) ConfigOpts property which contains the option value
- short:
- a single character CLI option name
- default:
- the default value of the option
- positional:
- True if the option is a positional CLI argument
- metavar:
- the name shown as the argument to a CLI option in --help output
- help:
- an string explaining how the options value is used
- """
- multi = False
-
- def __init__(self, name, dest=None, short=None, default=None,
- positional=False, metavar=None, help=None,
- secret=False, required=False, deprecated_name=None):
- """Construct an Opt object.
-
- The only required parameter is the option's name. However, it is
- common to also supply a default and help string for all options.
-
- :param name: the option's name
- :param dest: the name of the corresponding ConfigOpts property
- :param short: a single character CLI option name
- :param default: the default value of the option
- :param positional: True if the option is a positional CLI argument
- :param metavar: the option argument to show in --help
- :param help: an explanation of how the option is used
- :param secret: true iff the value should be obfuscated in log output
- :param required: true iff a value must be supplied for this option
- :param deprecated_name: deprecated name option. Acts like an alias
- """
- self.name = name
- if dest is None:
- self.dest = self.name.replace('-', '_')
- else:
- self.dest = dest
- self.short = short
- self.default = default
- self.positional = positional
- self.metavar = metavar
- self.help = help
- self.secret = secret
- self.required = required
- if deprecated_name is not None:
- self.deprecated_name = deprecated_name.replace('-', '_')
- else:
- self.deprecated_name = None
-
- def __ne__(self, another):
- return vars(self) != vars(another)
-
- def _get_from_config_parser(self, cparser, section):
- """Retrieves the option value from a MultiConfigParser object.
-
- This is the method ConfigOpts uses to look up the option value from
- config files. Most opt types override this method in order to perform
- type appropriate conversion of the returned value.
-
- :param cparser: a ConfigParser object
- :param section: a section name
- """
- return self._cparser_get_with_deprecated(cparser, section)
-
- def _cparser_get_with_deprecated(self, cparser, section):
- """If cannot find option as dest try deprecated_name alias."""
- if self.deprecated_name is not None:
- return cparser.get(section, [self.dest, self.deprecated_name])
- return cparser.get(section, [self.dest])
-
- def _add_to_cli(self, parser, group=None):
- """Makes the option available in the command line interface.
-
- This is the method ConfigOpts uses to add the opt to the CLI interface
- as appropriate for the opt type. Some opt types may extend this method,
- others may just extend the helper methods it uses.
-
- :param parser: the CLI option parser
- :param group: an optional OptGroup object
- """
- container = self._get_argparse_container(parser, group)
- kwargs = self._get_argparse_kwargs(group)
- prefix = self._get_argparse_prefix('', group)
- self._add_to_argparse(container, self.name, self.short, kwargs, prefix,
- self.positional, self.deprecated_name)
-
- def _add_to_argparse(self, container, name, short, kwargs, prefix='',
- positional=False, deprecated_name=None):
- """Add an option to an argparse parser or group.
-
- :param container: an argparse._ArgumentGroup object
- :param name: the opt name
- :param short: the short opt name
- :param kwargs: the keyword arguments for add_argument()
- :param prefix: an optional prefix to prepend to the opt name
- :param position: whether the optional is a positional CLI argument
- :raises: DuplicateOptError if a naming confict is detected
- """
- def hyphen(arg):
- return arg if not positional else ''
-
- args = [hyphen('--') + prefix + name]
- if short:
- args.append(hyphen('-') + short)
- if deprecated_name:
- args.append(hyphen('--') + prefix + deprecated_name)
-
- try:
- container.add_argument(*args, **kwargs)
- except argparse.ArgumentError as e:
- raise DuplicateOptError(e)
-
- def _get_argparse_container(self, parser, group):
- """Returns an argparse._ArgumentGroup.
-
- :param parser: an argparse.ArgumentParser
- :param group: an (optional) OptGroup object
- :returns: an argparse._ArgumentGroup if group is given, else parser
- """
- if group is not None:
- return group._get_argparse_group(parser)
- else:
- return parser
-
- def _get_argparse_kwargs(self, group, **kwargs):
- """Build a dict of keyword arguments for argparse's add_argument().
-
- Most opt types extend this method to customize the behaviour of the
- options added to argparse.
-
- :param group: an optional group
- :param kwargs: optional keyword arguments to add to
- :returns: a dict of keyword arguments
- """
- if not self.positional:
- dest = self.dest
- if group is not None:
- dest = group.name + '_' + dest
- kwargs['dest'] = dest
- else:
- kwargs['nargs'] = '?'
- kwargs.update({'default': None,
- 'metavar': self.metavar,
- 'help': self.help, })
- return kwargs
-
- def _get_argparse_prefix(self, prefix, group):
- """Build a prefix for the CLI option name, if required.
-
- CLI options in a group are prefixed with the group's name in order
- to avoid conflicts between similarly named options in different
- groups.
-
- :param prefix: an existing prefix to append to (e.g. 'no' or '')
- :param group: an optional OptGroup object
- :returns: a CLI option prefix including the group name, if appropriate
- """
- if group is not None:
- return group.name + '-' + prefix
- else:
- return prefix
-
-
-class StrOpt(Opt):
- """
- String opts do not have their values transformed and are returned as
- str objects.
- """
- pass
-
-
-class BoolOpt(Opt):
-
- """
- Bool opts are set to True or False on the command line using --optname or
- --noopttname respectively.
-
- In config files, boolean values are case insensitive and can be set using
- 1/0, yes/no, true/false or on/off.
- """
-
- _boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
- '0': False, 'no': False, 'false': False, 'off': False}
-
- def __init__(self, *args, **kwargs):
- if 'positional' in kwargs:
- raise ValueError('positional boolean args not supported')
- super(BoolOpt, self).__init__(*args, **kwargs)
-
- def _get_from_config_parser(self, cparser, section):
- """Retrieve the opt value as a boolean from ConfigParser."""
- def convert_bool(v):
- value = self._boolean_states.get(v.lower())
- if value is None:
- raise ValueError('Unexpected boolean value %r' % v)
-
- return value
-
- return [convert_bool(v) for v in
- self._cparser_get_with_deprecated(cparser, section)]
-
- def _add_to_cli(self, parser, group=None):
- """Extends the base class method to add the --nooptname option."""
- super(BoolOpt, self)._add_to_cli(parser, group)
- self._add_inverse_to_argparse(parser, group)
-
- def _add_inverse_to_argparse(self, parser, group):
- """Add the --nooptname option to the option parser."""
- container = self._get_argparse_container(parser, group)
- kwargs = self._get_argparse_kwargs(group, action='store_false')
- prefix = self._get_argparse_prefix('no', group)
- kwargs["help"] = "The inverse of --" + self.name
- self._add_to_argparse(container, self.name, None, kwargs, prefix,
- self.positional, self.deprecated_name)
-
- def _get_argparse_kwargs(self, group, action='store_true', **kwargs):
- """Extends the base argparse keyword dict for boolean options."""
-
- kwargs = super(BoolOpt, self)._get_argparse_kwargs(group, **kwargs)
-
- # metavar has no effect for BoolOpt
- if 'metavar' in kwargs:
- del kwargs['metavar']
-
- if action != 'store_true':
- action = 'store_false'
-
- kwargs['action'] = action
-
- return kwargs
-
-
-class IntOpt(Opt):
-
- """Int opt values are converted to integers using the int() builtin."""
-
- def _get_from_config_parser(self, cparser, section):
- """Retrieve the opt value as a integer from ConfigParser."""
- return [int(v) for v in self._cparser_get_with_deprecated(cparser,
- section)]
-
- def _get_argparse_kwargs(self, group, **kwargs):
- """Extends the base argparse keyword dict for integer options."""
- return super(IntOpt,
- self)._get_argparse_kwargs(group, type=int, **kwargs)
-
-
-class FloatOpt(Opt):
-
- """Float opt values are converted to floats using the float() builtin."""
-
- def _get_from_config_parser(self, cparser, section):
- """Retrieve the opt value as a float from ConfigParser."""
- return [float(v) for v in
- self._cparser_get_with_deprecated(cparser, section)]
-
- def _get_argparse_kwargs(self, group, **kwargs):
- """Extends the base argparse keyword dict for float options."""
- return super(FloatOpt, self)._get_argparse_kwargs(group,
- type=float, **kwargs)
-
-
-class ListOpt(Opt):
-
- """
- List opt values are simple string values separated by commas. The opt value
- is a list containing these strings.
- """
-
- class _StoreListAction(argparse.Action):
- """
- An argparse action for parsing an option value into a list.
- """
- def __call__(self, parser, namespace, values, option_string=None):
- if values is not None:
- values = [a.strip() for a in values.split(',')]
- setattr(namespace, self.dest, values)
-
- def _get_from_config_parser(self, cparser, section):
- """Retrieve the opt value as a list from ConfigParser."""
- return [[a.strip() for a in v.split(',')] for v in
- self._cparser_get_with_deprecated(cparser, section)]
-
- def _get_argparse_kwargs(self, group, **kwargs):
- """Extends the base argparse keyword dict for list options."""
- return Opt._get_argparse_kwargs(self,
- group,
- action=ListOpt._StoreListAction,
- **kwargs)
-
-
-class MultiStrOpt(Opt):
-
- """
- Multistr opt values are string opts which may be specified multiple times.
- The opt value is a list containing all the string values specified.
- """
- multi = True
-
- def _get_argparse_kwargs(self, group, **kwargs):
- """Extends the base argparse keyword dict for multi str options."""
- kwargs = super(MultiStrOpt, self)._get_argparse_kwargs(group)
- if not self.positional:
- kwargs['action'] = 'append'
- else:
- kwargs['nargs'] = '*'
- return kwargs
-
- def _cparser_get_with_deprecated(self, cparser, section):
- """If cannot find option as dest try deprecated_name alias."""
- if self.deprecated_name is not None:
- return cparser.get(section, [self.dest, self.deprecated_name],
- multi=True)
- return cparser.get(section, [self.dest], multi=True)
-
-
-class SubCommandOpt(Opt):
-
- """
- Sub-command options allow argparse sub-parsers to be used to parse
- additional command line arguments.
-
- The handler argument to the SubCommandOpt contructor is a callable
- which is supplied an argparse subparsers object. Use this handler
- callable to add sub-parsers.
-
- The opt value is SubCommandAttr object with the name of the chosen
- sub-parser stored in the 'name' attribute and the values of other
- sub-parser arguments available as additional attributes.
- """
-
- def __init__(self, name, dest=None, handler=None,
- title=None, description=None, help=None):
- """Construct an sub-command parsing option.
-
- This behaves similarly to other Opt sub-classes but adds a
- 'handler' argument. The handler is a callable which is supplied
- an subparsers object when invoked. The add_parser() method on
- this subparsers object can be used to register parsers for
- sub-commands.
-
- :param name: the option's name
- :param dest: the name of the corresponding ConfigOpts property
- :param title: title of the sub-commands group in help output
- :param description: description of the group in help output
- :param help: a help string giving an overview of available sub-commands
- """
- super(SubCommandOpt, self).__init__(name, dest=dest, help=help)
- self.handler = handler
- self.title = title
- self.description = description
-
- def _add_to_cli(self, parser, group=None):
- """Add argparse sub-parsers and invoke the handler method."""
- dest = self.dest
- if group is not None:
- dest = group.name + '_' + dest
-
- subparsers = parser.add_subparsers(dest=dest,
- title=self.title,
- description=self.description,
- help=self.help)
-
- if self.handler is not None:
- self.handler(subparsers)
-
-
-class OptGroup(object):
-
- """
- Represents a group of opts.
-
- CLI opts in the group are automatically prefixed with the group name.
-
- Each group corresponds to a section in config files.
-
- An OptGroup object has no public methods, but has a number of public string
- properties:
-
- name:
- the name of the group
- title:
- the group title as displayed in --help
- help:
- the group description as displayed in --help
- """
-
- def __init__(self, name, title=None, help=None):
- """Constructs an OptGroup object.
-
- :param name: the group name
- :param title: the group title for --help
- :param help: the group description for --help
- """
- self.name = name
- if title is None:
- self.title = "%s options" % title
- else:
- self.title = title
- self.help = help
-
- self._opts = {} # dict of dicts of (opt:, override:, default:)
- self._argparse_group = None
-
- def _register_opt(self, opt, cli=False):
- """Add an opt to this group.
-
- :param opt: an Opt object
- :param cli: whether this is a CLI option
- :returns: False if previously registered, True otherwise
- :raises: DuplicateOptError if a naming conflict is detected
- """
- if _is_opt_registered(self._opts, opt):
- return False
-
- self._opts[opt.dest] = {'opt': opt, 'cli': cli}
-
- return True
-
- def _unregister_opt(self, opt):
- """Remove an opt from this group.
-
- :param opt: an Opt object
- """
- if opt.dest in self._opts:
- del self._opts[opt.dest]
-
- def _get_argparse_group(self, parser):
- if self._argparse_group is None:
- """Build an argparse._ArgumentGroup for this group."""
- self._argparse_group = parser.add_argument_group(self.title,
- self.help)
- return self._argparse_group
-
- def _clear(self):
- """Clear this group's option parsing state."""
- self._argparse_group = None
-
-
-class ParseError(iniparser.ParseError):
- def __init__(self, msg, lineno, line, filename):
- super(ParseError, self).__init__(msg, lineno, line)
- self.filename = filename
-
- def __str__(self):
- return 'at %s:%d, %s: %r' % (self.filename, self.lineno,
- self.msg, self.line)
-
-
-class ConfigParser(iniparser.BaseParser):
- def __init__(self, filename, sections):
- super(ConfigParser, self).__init__()
- self.filename = filename
- self.sections = sections
- self.section = None
-
- def parse(self):
- with open(self.filename) as f:
- return super(ConfigParser, self).parse(f)
-
- def new_section(self, section):
- self.section = section
- self.sections.setdefault(self.section, {})
-
- def assignment(self, key, value):
- if not self.section:
- raise self.error_no_section()
-
- self.sections[self.section].setdefault(key, [])
- self.sections[self.section][key].append('\n'.join(value))
-
- def parse_exc(self, msg, lineno, line=None):
- return ParseError(msg, lineno, line, self.filename)
-
- def error_no_section(self):
- return self.parse_exc('Section must be started before assignment',
- self.lineno)
-
-
-class MultiConfigParser(object):
- def __init__(self):
- self.parsed = []
-
- def read(self, config_files):
- read_ok = []
-
- for filename in config_files:
- sections = {}
- parser = ConfigParser(filename, sections)
-
- try:
- parser.parse()
- except IOError:
- continue
- self.parsed.insert(0, sections)
- read_ok.append(filename)
-
- return read_ok
-
- def get(self, section, names, multi=False):
- rvalue = []
- for sections in self.parsed:
- if section not in sections:
- continue
- for name in names:
- if name in sections[section]:
- if multi:
- rvalue = sections[section][name] + rvalue
- else:
- return sections[section][name]
- if multi and rvalue != []:
- return rvalue
- raise KeyError
-
-
-class ConfigOpts(collections.Mapping):
-
- """
- Config options which may be set on the command line or in config files.
-
- ConfigOpts is a configuration option manager with APIs for registering
- option schemas, grouping options, parsing option values and retrieving
- the values of options.
- """
-
- def __init__(self):
- """Construct a ConfigOpts object."""
- self._opts = {} # dict of dicts of (opt:, override:, default:)
- self._groups = {}
-
- self._args = None
-
- self._oparser = None
- self._cparser = None
- self._cli_values = {}
- self.__cache = {}
- self._config_opts = []
-
- def _pre_setup(self, project, prog, version, usage, default_config_files):
- """Initialize a ConfigCliParser object for option parsing."""
-
- if prog is None:
- prog = os.path.basename(sys.argv[0])
-
- if default_config_files is None:
- default_config_files = find_config_files(project, prog)
-
- self._oparser = argparse.ArgumentParser(prog=prog, usage=usage)
- self._oparser.add_argument('--version',
- action='version',
- version=version)
-
- return prog, default_config_files
-
- def _setup(self, project, prog, version, usage, default_config_files):
- """Initialize a ConfigOpts object for option parsing."""
-
- self._config_opts = [
- MultiStrOpt('config-file',
- default=default_config_files,
- metavar='PATH',
- help='Path to a config file to use. Multiple config '
- 'files can be specified, with values in later '
- 'files taking precedence. The default files '
- ' used are: %s' % (default_config_files, )),
- StrOpt('config-dir',
- metavar='DIR',
- help='Path to a config directory to pull *.conf '
- 'files from. This file set is sorted, so as to '
- 'provide a predictable parse order if individual '
- 'options are over-ridden. The set is parsed after '
- 'the file(s), if any, specified via --config-file, '
- 'hence over-ridden options in the directory take '
- 'precedence.'),
- ]
- self.register_cli_opts(self._config_opts)
-
- self.project = project
- self.prog = prog
- self.version = version
- self.usage = usage
- self.default_config_files = default_config_files
-
- def __clear_cache(f):
- @functools.wraps(f)
- def __inner(self, *args, **kwargs):
- if kwargs.pop('clear_cache', True):
- self.__cache.clear()
- return f(self, *args, **kwargs)
-
- return __inner
-
- def __call__(self,
- args=None,
- project=None,
- prog=None,
- version=None,
- usage=None,
- default_config_files=None):
- """Parse command line arguments and config files.
-
- Calling a ConfigOpts object causes the supplied command line arguments
- and config files to be parsed, causing opt values to be made available
- as attributes of the object.
-
- The object may be called multiple times, each time causing the previous
- set of values to be overwritten.
-
- Automatically registers the --config-file option with either a supplied
- list of default config files, or a list from find_config_files().
-
- If the --config-dir option is set, any *.conf files from this
- directory are pulled in, after all the file(s) specified by the
- --config-file option.
-
- :param args: command line arguments (defaults to sys.argv[1:])
- :param project: the toplevel project name, used to locate config files
- :param prog: the name of the program (defaults to sys.argv[0] basename)
- :param version: the program version (for --version)
- :param usage: a usage string (%prog will be expanded)
- :param default_config_files: config files to use by default
- :returns: the list of arguments left over after parsing options
- :raises: SystemExit, ConfigFilesNotFoundError, ConfigFileParseError,
- RequiredOptError, DuplicateOptError
- """
-
- self.clear()
-
- prog, default_config_files = self._pre_setup(project,
- prog,
- version,
- usage,
- default_config_files)
-
- self._setup(project, prog, version, usage, default_config_files)
-
- self._cli_values = self._parse_cli_opts(args)
-
- self._parse_config_files()
-
- self._check_required_opts()
-
- def __getattr__(self, name):
- """Look up an option value and perform string substitution.
-
- :param name: the opt name (or 'dest', more precisely)
- :returns: the option value (after string subsititution) or a GroupAttr
- :raises: NoSuchOptError,ConfigFileValueError,TemplateSubstitutionError
- """
- return self._get(name)
-
- def __getitem__(self, key):
- """Look up an option value and perform string substitution."""
- return self.__getattr__(key)
-
- def __contains__(self, key):
- """Return True if key is the name of a registered opt or group."""
- return key in self._opts or key in self._groups
-
- def __iter__(self):
- """Iterate over all registered opt and group names."""
- for key in self._opts.keys() + self._groups.keys():
- yield key
-
- def __len__(self):
- """Return the number of options and option groups."""
- return len(self._opts) + len(self._groups)
-
- def reset(self):
- """Clear the object state and unset overrides and defaults."""
- self._unset_defaults_and_overrides()
- self.clear()
-
- @__clear_cache
- def clear(self):
- """Clear the state of the object to before it was called.
-
- Any subparsers added using the add_cli_subparsers() will also be
- removed as a side-effect of this method.
- """
- self._args = None
- self._cli_values.clear()
- self._oparser = argparse.ArgumentParser()
- self._cparser = None
- self.unregister_opts(self._config_opts)
- for group in self._groups.values():
- group._clear()
-
- @__clear_cache
- def register_opt(self, opt, group=None, cli=False):
- """Register an option schema.
-
- Registering an option schema makes any option value which is previously
- or subsequently parsed from the command line or config files available
- as an attribute of this object.
-
- :param opt: an instance of an Opt sub-class
- :param cli: whether this is a CLI option
- :param group: an optional OptGroup object or group name
- :return: False if the opt was already register, True otherwise
- :raises: DuplicateOptError
- """
- if group is not None:
- group = self._get_group(group, autocreate=True)
- return group._register_opt(opt, cli)
-
- if _is_opt_registered(self._opts, opt):
- return False
-
- self._opts[opt.dest] = {'opt': opt, 'cli': cli}
-
- return True
-
- @__clear_cache
- def register_opts(self, opts, group=None):
- """Register multiple option schemas at once."""
- for opt in opts:
- self.register_opt(opt, group, clear_cache=False)
-
- @__clear_cache
- def register_cli_opt(self, opt, group=None):
- """Register a CLI option schema.
-
- CLI option schemas must be registered before the command line and
- config files are parsed. This is to ensure that all CLI options are
- show in --help and option validation works as expected.
-
- :param opt: an instance of an Opt sub-class
- :param group: an optional OptGroup object or group name
- :return: False if the opt was already register, True otherwise
- :raises: DuplicateOptError, ArgsAlreadyParsedError
- """
- if self._args is not None:
- raise ArgsAlreadyParsedError("cannot register CLI option")
-
- return self.register_opt(opt, group, cli=True, clear_cache=False)
-
- @__clear_cache
- def register_cli_opts(self, opts, group=None):
- """Register multiple CLI option schemas at once."""
- for opt in opts:
- self.register_cli_opt(opt, group, clear_cache=False)
-
- def register_group(self, group):
- """Register an option group.
-
- An option group must be registered before options can be registered
- with the group.
-
- :param group: an OptGroup object
- """
- if group.name in self._groups:
- return
-
- self._groups[group.name] = copy.copy(group)
-
- @__clear_cache
- def unregister_opt(self, opt, group=None):
- """Unregister an option.
-
- :param opt: an Opt object
- :param group: an optional OptGroup object or group name
- :raises: ArgsAlreadyParsedError, NoSuchGroupError
- """
- if self._args is not None:
- raise ArgsAlreadyParsedError("reset before unregistering options")
-
- if group is not None:
- self._get_group(group)._unregister_opt(opt)
- elif opt.dest in self._opts:
- del self._opts[opt.dest]
-
- @__clear_cache
- def unregister_opts(self, opts, group=None):
- """Unregister multiple CLI option schemas at once."""
- for opt in opts:
- self.unregister_opt(opt, group, clear_cache=False)
-
- def import_opt(self, name, module_str, group=None):
- """Import an option definition from a module.
-
- Import a module and check that a given option is registered.
-
- This is intended for use with global configuration objects
- like cfg.CONF where modules commonly register options with
- CONF at module load time. If one module requires an option
- defined by another module it can use this method to explicitly
- declare the dependency.
-
- :param name: the name/dest of the opt
- :param module_str: the name of a module to import
- :param group: an option OptGroup object or group name
- :raises: NoSuchOptError, NoSuchGroupError
- """
- __import__(module_str)
- self._get_opt_info(name, group)
-
- def import_group(self, group, module_str):
- """Import an option group from a module.
-
- Import a module and check that a given option group is registered.
-
- This is intended for use with global configuration objects
- like cfg.CONF where modules commonly register options with
- CONF at module load time. If one module requires an option group
- defined by another module it can use this method to explicitly
- declare the dependency.
-
- :param group: an option OptGroup object or group name
- :param module_str: the name of a module to import
- :raises: ImportError, NoSuchGroupError
- """
- __import__(module_str)
- self._get_group(group)
-
- @__clear_cache
- def set_override(self, name, override, group=None):
- """Override an opt value.
-
- Override the command line, config file and default values of a
- given option.
-
- :param name: the name/dest of the opt
- :param override: the override value
- :param group: an option OptGroup object or group name
- :raises: NoSuchOptError, NoSuchGroupError
- """
- opt_info = self._get_opt_info(name, group)
- opt_info['override'] = override
-
- @__clear_cache
- def set_default(self, name, default, group=None):
- """Override an opt's default value.
-
- Override the default value of given option. A command line or
- config file value will still take precedence over this default.
-
- :param name: the name/dest of the opt
- :param default: the default value
- :param group: an option OptGroup object or group name
- :raises: NoSuchOptError, NoSuchGroupError
- """
- opt_info = self._get_opt_info(name, group)
- opt_info['default'] = default
-
- @__clear_cache
- def clear_override(self, name, group=None):
- """Clear an override an opt value.
-
- Clear a previously set override of the command line, config file
- and default values of a given option.
-
- :param name: the name/dest of the opt
- :param group: an option OptGroup object or group name
- :raises: NoSuchOptError, NoSuchGroupError
- """
- opt_info = self._get_opt_info(name, group)
- opt_info.pop('override', None)
-
- @__clear_cache
- def clear_default(self, name, group=None):
- """Clear an override an opt's default value.
-
- Clear a previously set override of the default value of given option.
-
- :param name: the name/dest of the opt
- :param group: an option OptGroup object or group name
- :raises: NoSuchOptError, NoSuchGroupError
- """
- opt_info = self._get_opt_info(name, group)
- opt_info.pop('default', None)
-
- def _all_opt_infos(self):
- """A generator function for iteration opt infos."""
- for info in self._opts.values():
- yield info, None
- for group in self._groups.values():
- for info in group._opts.values():
- yield info, group
-
- def _all_cli_opts(self):
- """A generator function for iterating CLI opts."""
- for info, group in self._all_opt_infos():
- if info['cli']:
- yield info['opt'], group
-
- def _unset_defaults_and_overrides(self):
- """Unset any default or override on all options."""
- for info, group in self._all_opt_infos():
- info.pop('default', None)
- info.pop('override', None)
-
- def find_file(self, name):
- """Locate a file located alongside the config files.
-
- Search for a file with the supplied basename in the directories
- which we have already loaded config files from and other known
- configuration directories.
-
- The directory, if any, supplied by the config_dir option is
- searched first. Then the config_file option is iterated over
- and each of the base directories of the config_files values
- are searched. Failing both of these, the standard directories
- searched by the module level find_config_files() function is
- used. The first matching file is returned.
-
- :param basename: the filename, e.g. 'policy.json'
- :returns: the path to a matching file, or None
- """
- dirs = []
- if self.config_dir:
- dirs.append(_fixpath(self.config_dir))
-
- for cf in reversed(self.config_file):
- dirs.append(os.path.dirname(_fixpath(cf)))
-
- dirs.extend(_get_config_dirs(self.project))
-
- return _search_dirs(dirs, name)
-
- def log_opt_values(self, logger, lvl):
- """Log the value of all registered opts.
-
- It's often useful for an app to log its configuration to a log file at
- startup for debugging. This method dumps to the entire config state to
- the supplied logger at a given log level.
-
- :param logger: a logging.Logger object
- :param lvl: the log level (e.g. logging.DEBUG) arg to logger.log()
- """
- logger.log(lvl, "*" * 80)
- logger.log(lvl, "Configuration options gathered from:")
- logger.log(lvl, "command line args: %s", self._args)
- logger.log(lvl, "config files: %s", self.config_file)
- logger.log(lvl, "=" * 80)
-
- def _sanitize(opt, value):
- """Obfuscate values of options declared secret"""
- return value if not opt.secret else '*' * len(str(value))
-
- for opt_name in sorted(self._opts):
- opt = self._get_opt_info(opt_name)['opt']
- logger.log(lvl, "%-30s = %s", opt_name,
- _sanitize(opt, getattr(self, opt_name)))
-
- for group_name in self._groups:
- group_attr = self.GroupAttr(self, self._get_group(group_name))
- for opt_name in sorted(self._groups[group_name]._opts):
- opt = self._get_opt_info(opt_name, group_name)['opt']
- logger.log(lvl, "%-30s = %s",
- "%s.%s" % (group_name, opt_name),
- _sanitize(opt, getattr(group_attr, opt_name)))
-
- logger.log(lvl, "*" * 80)
-
- def print_usage(self, file=None):
- """Print the usage message for the current program."""
- self._oparser.print_usage(file)
-
- def print_help(self, file=None):
- """Print the help message for the current program."""
- self._oparser.print_help(file)
-
- def _get(self, name, group=None):
- if isinstance(group, OptGroup):
- key = (group.name, name)
- else:
- key = (group, name)
- try:
- return self.__cache[key]
- except KeyError:
- value = self._substitute(self._do_get(name, group))
- self.__cache[key] = value
- return value
-
- def _do_get(self, name, group=None):
- """Look up an option value.
-
- :param name: the opt name (or 'dest', more precisely)
- :param group: an OptGroup
- :returns: the option value, or a GroupAttr object
- :raises: NoSuchOptError, NoSuchGroupError, ConfigFileValueError,
- TemplateSubstitutionError
- """
- if group is None and name in self._groups:
- return self.GroupAttr(self, self._get_group(name))
-
- info = self._get_opt_info(name, group)
- opt = info['opt']
-
- if isinstance(opt, SubCommandOpt):
- return self.SubCommandAttr(self, group, opt.dest)
-
- if 'override' in info:
- return info['override']
-
- values = []
- if self._cparser is not None:
- section = group.name if group is not None else 'DEFAULT'
- try:
- value = opt._get_from_config_parser(self._cparser, section)
- except KeyError:
- pass
- except ValueError as ve:
- raise ConfigFileValueError(str(ve))
- else:
- if not opt.multi:
- # No need to continue since the last value wins
- return value[-1]
- values.extend(value)
-
- name = name if group is None else group.name + '_' + name
- value = self._cli_values.get(name)
- if value is not None:
- if not opt.multi:
- return value
-
- # argparse ignores default=None for nargs='*'
- if opt.positional and not value:
- value = opt.default
-
- return value + values
-
- if values:
- return values
-
- if 'default' in info:
- return info['default']
-
- return opt.default
-
- def _substitute(self, value):
- """Perform string template substitution.
-
- Substitute any template variables (e.g. $foo, ${bar}) in the supplied
- string value(s) with opt values.
-
- :param value: the string value, or list of string values
- :returns: the substituted string(s)
- """
- if isinstance(value, list):
- return [self._substitute(i) for i in value]
- elif isinstance(value, str):
- tmpl = string.Template(value)
- return tmpl.safe_substitute(self.StrSubWrapper(self))
- else:
- return value
-
- def _get_group(self, group_or_name, autocreate=False):
- """Looks up a OptGroup object.
-
- Helper function to return an OptGroup given a parameter which can
- either be the group's name or an OptGroup object.
-
- The OptGroup object returned is from the internal dict of OptGroup
- objects, which will be a copy of any OptGroup object that users of
- the API have access to.
-
- :param group_or_name: the group's name or the OptGroup object itself
- :param autocreate: whether to auto-create the group if it's not found
- :raises: NoSuchGroupError
- """
- group = group_or_name if isinstance(group_or_name, OptGroup) else None
- group_name = group.name if group else group_or_name
-
- if group_name not in self._groups:
- if group is not None or not autocreate:
- raise NoSuchGroupError(group_name)
-
- self.register_group(OptGroup(name=group_name))
-
- return self._groups[group_name]
-
- def _get_opt_info(self, opt_name, group=None):
- """Return the (opt, override, default) dict for an opt.
-
- :param opt_name: an opt name/dest
- :param group: an optional group name or OptGroup object
- :raises: NoSuchOptError, NoSuchGroupError
- """
- if group is None:
- opts = self._opts
- else:
- group = self._get_group(group)
- opts = group._opts
-
- if opt_name not in opts:
- raise NoSuchOptError(opt_name, group)
-
- return opts[opt_name]
-
- def _parse_config_files(self):
- """Parse the config files from --config-file and --config-dir.
-
- :raises: ConfigFilesNotFoundError, ConfigFileParseError
- """
- config_files = list(self.config_file)
-
- if self.config_dir:
- config_dir_glob = os.path.join(self.config_dir, '*.conf')
- config_files += sorted(glob.glob(config_dir_glob))
-
- config_files = [_fixpath(p) for p in config_files]
-
- self._cparser = MultiConfigParser()
-
- try:
- read_ok = self._cparser.read(config_files)
- except iniparser.ParseError as pe:
- raise ConfigFileParseError(pe.filename, str(pe))
-
- if read_ok != config_files:
- not_read_ok = filter(lambda f: f not in read_ok, config_files)
- raise ConfigFilesNotFoundError(not_read_ok)
-
- def _check_required_opts(self):
- """Check that all opts marked as required have values specified.
-
- :raises: RequiredOptError
- """
- for info, group in self._all_opt_infos():
- opt = info['opt']
-
- if opt.required:
- if 'default' in info or 'override' in info:
- continue
-
- if self._get(opt.dest, group) is None:
- raise RequiredOptError(opt.name, group)
-
- def _parse_cli_opts(self, args):
- """Parse command line options.
-
- Initializes the command line option parser and parses the supplied
- command line arguments.
-
- :param args: the command line arguments
- :returns: a dict of parsed option values
- :raises: SystemExit, DuplicateOptError
-
- """
- self._args = args
-
- for opt, group in sorted(self._all_cli_opts()):
- opt._add_to_cli(self._oparser, group)
-
- return vars(self._oparser.parse_args(args))
-
- class GroupAttr(collections.Mapping):
-
- """
- A helper class representing the option values of a group as a mapping
- and attributes.
- """
-
- def __init__(self, conf, group):
- """Construct a GroupAttr object.
-
- :param conf: a ConfigOpts object
- :param group: an OptGroup object
- """
- self._conf = conf
- self._group = group
-
- def __getattr__(self, name):
- """Look up an option value and perform template substitution."""
- return self._conf._get(name, self._group)
-
- def __getitem__(self, key):
- """Look up an option value and perform string substitution."""
- return self.__getattr__(key)
-
- def __contains__(self, key):
- """Return True if key is the name of a registered opt or group."""
- return key in self._group._opts
-
- def __iter__(self):
- """Iterate over all registered opt and group names."""
- for key in self._group._opts.keys():
- yield key
-
- def __len__(self):
- """Return the number of options and option groups."""
- return len(self._group._opts)
-
- class SubCommandAttr(object):
-
- """
- A helper class representing the name and arguments of an argparse
- sub-parser.
- """
-
- def __init__(self, conf, group, dest):
- """Construct a SubCommandAttr object.
-
- :param conf: a ConfigOpts object
- :param group: an OptGroup object
- :param dest: the name of the sub-parser
- """
- self._conf = conf
- self._group = group
- self._dest = dest
-
- def __getattr__(self, name):
- """Look up a sub-parser name or argument value."""
- if name == 'name':
- name = self._dest
- if self._group is not None:
- name = self._group.name + '_' + name
- return self._conf._cli_values[name]
-
- if name in self._conf:
- raise DuplicateOptError(name)
-
- try:
- return self._conf._cli_values[name]
- except KeyError:
- raise NoSuchOptError(name)
-
- class StrSubWrapper(object):
-
- """
- A helper class exposing opt values as a dict for string substitution.
- """
-
- def __init__(self, conf):
- """Construct a StrSubWrapper object.
-
- :param conf: a ConfigOpts object
- """
- self.conf = conf
-
- def __getitem__(self, key):
- """Look up an opt value from the ConfigOpts object.
-
- :param key: an opt name
- :returns: an opt value
- :raises: TemplateSubstitutionError if attribute is a group
- """
- value = getattr(self.conf, key)
- if isinstance(value, self.conf.GroupAttr):
- raise TemplateSubstitutionError(
- 'substituting group %s not supported' % key)
- return value
-
-
-CONF = ConfigOpts()
+from oslo.config.cfg import *
diff --git a/nova/openstack/common/db/api.py b/nova/openstack/common/db/api.py
new file mode 100644
index 000000000..edb42074f
--- /dev/null
+++ b/nova/openstack/common/db/api.py
@@ -0,0 +1,100 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2013 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Multiple DB API backend support.
+
+Supported configuration options:
+
+`db_backend`: DB backend name or full module path to DB backend module.
+`dbapi_use_tpool`: Enable thread pooling of DB API calls.
+
+A DB backend module should implement a method named 'get_backend' which
+takes no arguments. The method can return any object that implements DB
+API methods.
+
+*NOTE*: There are bugs in eventlet when using tpool combined with
+threading locks. The python logging module happens to use such locks. To
+work around this issue, be sure to specify thread=False with
+eventlet.monkey_patch().
+
+A bug for eventlet has been filed here:
+
+https://bitbucket.org/eventlet/eventlet/issue/137/
+"""
+import functools
+
+from nova.openstack.common import cfg
+from nova.openstack.common import lockutils
+from nova.openstack.common import importutils
+
+
+db_opts = [
+ cfg.StrOpt('db_backend',
+ default='sqlalchemy',
+ help='The backend to use for db'),
+ cfg.BoolOpt('dbapi_use_tpool',
+ default=False,
+ help='Enable the experimental use of thread pooling for '
+ 'all DB API calls')
+]
+
+CONF = cfg.CONF
+CONF.register_opts(db_opts)
+
+
+class DBAPI(object):
+ def __init__(self, backend_mapping=None):
+ if backend_mapping is None:
+ backend_mapping = {}
+ self.__backend = None
+ self.__backend_mapping = backend_mapping
+
+ @lockutils.synchronized('dbapi_backend', 'nova-')
+ def __get_backend(self):
+ """Get the actual backend. May be a module or an instance of
+ a class. Doesn't matter to us. We do this synchronized as it's
+ possible multiple greenthreads started very quickly trying to do
+ DB calls and eventlet can switch threads before self.__backend gets
+ assigned.
+ """
+ if self.__backend:
+ # Another thread assigned it
+ return self.__backend
+ backend_name = CONF.db_backend
+ self.__use_tpool = CONF.dbapi_use_tpool
+ if self.__use_tpool:
+ from eventlet import tpool
+ self.__tpool = tpool
+ # Import the untranslated name if we don't have a
+ # mapping.
+ backend_path = self.__backend_mapping.get(backend_name,
+ backend_name)
+ backend_mod = importutils.import_module(backend_path)
+ self.__backend = backend_mod.get_backend()
+ return self.__backend
+
+ def __getattr__(self, key):
+ backend = self.__backend or self.__get_backend()
+ attr = getattr(backend, key)
+ if not self.__use_tpool or not hasattr(attr, '__call__'):
+ return attr
+
+ def tpool_wrapper(*args, **kwargs):
+ return self.__tpool.execute(attr, *args, **kwargs)
+
+ functools.update_wrapper(tpool_wrapper, attr)
+ return tpool_wrapper
diff --git a/nova/openstack/common/db/sqlalchemy/session.py b/nova/openstack/common/db/sqlalchemy/session.py
index bc889fc36..fb86d9ca5 100644
--- a/nova/openstack/common/db/sqlalchemy/session.py
+++ b/nova/openstack/common/db/sqlalchemy/session.py
@@ -244,23 +244,18 @@ import os.path
import re
import time
-from eventlet import db_pool
from eventlet import greenthread
+from oslo.config import cfg
from sqlalchemy.exc import DisconnectionError, OperationalError, IntegrityError
import sqlalchemy.interfaces
import sqlalchemy.orm
from sqlalchemy.pool import NullPool, StaticPool
from sqlalchemy.sql.expression import literal_column
-from nova.openstack.common import cfg
-from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common.gettextutils import _
from nova.openstack.common import timeutils
-MySQLdb = importutils.try_import('MySQLdb')
-if MySQLdb is not None:
- from MySQLdb.constants import CLIENT as mysql_client_constants
sql_opts = [
cfg.StrOpt('sql_connection',
@@ -303,9 +298,6 @@ sql_opts = [
cfg.BoolOpt('sql_connection_trace',
default=False,
help='Add python stack traces to SQL as comment strings'),
- cfg.BoolOpt('sql_dbpool_enable',
- default=False,
- help="enable the use of eventlet's db_pool for MySQL"),
]
CONF = cfg.CONF
@@ -517,33 +509,6 @@ def create_engine(sql_connection):
if CONF.sql_connection == "sqlite://":
engine_args["poolclass"] = StaticPool
engine_args["connect_args"] = {'check_same_thread': False}
- elif all((CONF.sql_dbpool_enable, MySQLdb,
- "mysql" in connection_dict.drivername)):
- LOG.info(_("Using mysql/eventlet db_pool."))
- # MySQLdb won't accept 'None' in the password field
- password = connection_dict.password or ''
- pool_args = {
- 'db': connection_dict.database,
- 'passwd': password,
- 'host': connection_dict.host,
- 'user': connection_dict.username,
- 'min_size': CONF.sql_min_pool_size,
- 'max_size': CONF.sql_max_pool_size,
- 'max_idle': CONF.sql_idle_timeout,
- 'client_flag': mysql_client_constants.FOUND_ROWS}
-
- pool = db_pool.ConnectionPool(MySQLdb, **pool_args)
-
- def creator():
- conn = pool.create()
- if isinstance(conn, tuple):
- # NOTE(belliott) eventlet >= 0.10 returns a tuple
- now, now, conn = conn
-
- return conn
-
- engine_args['creator'] = creator
-
else:
engine_args['pool_size'] = CONF.sql_max_pool_size
if CONF.sql_max_overflow is not None:
diff --git a/nova/openstack/common/eventlet_backdoor.py b/nova/openstack/common/eventlet_backdoor.py
index 118385427..61ceded43 100644
--- a/nova/openstack/common/eventlet_backdoor.py
+++ b/nova/openstack/common/eventlet_backdoor.py
@@ -24,8 +24,7 @@ import traceback
import eventlet
import eventlet.backdoor
import greenlet
-
-from nova.openstack.common import cfg
+from oslo.config import cfg
eventlet_backdoor_opts = [
cfg.IntOpt('backdoor_port',
diff --git a/nova/openstack/common/iniparser.py b/nova/openstack/common/iniparser.py
deleted file mode 100644
index 9bf399f0c..000000000
--- a/nova/openstack/common/iniparser.py
+++ /dev/null
@@ -1,130 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-class ParseError(Exception):
- def __init__(self, message, lineno, line):
- self.msg = message
- self.line = line
- self.lineno = lineno
-
- def __str__(self):
- return 'at line %d, %s: %r' % (self.lineno, self.msg, self.line)
-
-
-class BaseParser(object):
- lineno = 0
- parse_exc = ParseError
-
- def _assignment(self, key, value):
- self.assignment(key, value)
- return None, []
-
- def _get_section(self, line):
- if line[-1] != ']':
- return self.error_no_section_end_bracket(line)
- if len(line) <= 2:
- return self.error_no_section_name(line)
-
- return line[1:-1]
-
- def _split_key_value(self, line):
- colon = line.find(':')
- equal = line.find('=')
- if colon < 0 and equal < 0:
- return self.error_invalid_assignment(line)
-
- if colon < 0 or (equal >= 0 and equal < colon):
- key, value = line[:equal], line[equal + 1:]
- else:
- key, value = line[:colon], line[colon + 1:]
-
- value = value.strip()
- if ((value and value[0] == value[-1]) and
- (value[0] == "\"" or value[0] == "'")):
- value = value[1:-1]
- return key.strip(), [value]
-
- def parse(self, lineiter):
- key = None
- value = []
-
- for line in lineiter:
- self.lineno += 1
-
- line = line.rstrip()
- if not line:
- # Blank line, ends multi-line values
- if key:
- key, value = self._assignment(key, value)
- continue
- elif line[0] in (' ', '\t'):
- # Continuation of previous assignment
- if key is None:
- self.error_unexpected_continuation(line)
- else:
- value.append(line.lstrip())
- continue
-
- if key:
- # Flush previous assignment, if any
- key, value = self._assignment(key, value)
-
- if line[0] == '[':
- # Section start
- section = self._get_section(line)
- if section:
- self.new_section(section)
- elif line[0] in '#;':
- self.comment(line[1:].lstrip())
- else:
- key, value = self._split_key_value(line)
- if not key:
- return self.error_empty_key(line)
-
- if key:
- # Flush previous assignment, if any
- self._assignment(key, value)
-
- def assignment(self, key, value):
- """Called when a full assignment is parsed"""
- raise NotImplementedError()
-
- def new_section(self, section):
- """Called when a new section is started"""
- raise NotImplementedError()
-
- def comment(self, comment):
- """Called when a comment is parsed"""
- pass
-
- def error_invalid_assignment(self, line):
- raise self.parse_exc("No ':' or '=' found in assignment",
- self.lineno, line)
-
- def error_empty_key(self, line):
- raise self.parse_exc('Key cannot be empty', self.lineno, line)
-
- def error_unexpected_continuation(self, line):
- raise self.parse_exc('Unexpected continuation line',
- self.lineno, line)
-
- def error_no_section_end_bracket(self, line):
- raise self.parse_exc('Invalid section (must end with ])',
- self.lineno, line)
-
- def error_no_section_name(self, line):
- raise self.parse_exc('Empty section name', self.lineno, line)
diff --git a/nova/openstack/common/lockutils.py b/nova/openstack/common/lockutils.py
index 930e265f6..6ee376929 100644
--- a/nova/openstack/common/lockutils.py
+++ b/nova/openstack/common/lockutils.py
@@ -25,14 +25,13 @@ import time
import weakref
from eventlet import semaphore
+from oslo.config import cfg
-from nova.openstack.common import cfg
from nova.openstack.common import fileutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import local
from nova.openstack.common import log as logging
-
LOG = logging.getLogger(__name__)
diff --git a/nova/openstack/common/log.py b/nova/openstack/common/log.py
index 32513bb32..796045657 100644
--- a/nova/openstack/common/log.py
+++ b/nova/openstack/common/log.py
@@ -40,7 +40,8 @@ import stat
import sys
import traceback
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import local
diff --git a/nova/openstack/common/notifier/api.py b/nova/openstack/common/notifier/api.py
index 0ec55fbf0..70941e243 100644
--- a/nova/openstack/common/notifier/api.py
+++ b/nova/openstack/common/notifier/api.py
@@ -15,7 +15,8 @@
import uuid
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common import context
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
diff --git a/nova/openstack/common/notifier/log_notifier.py b/nova/openstack/common/notifier/log_notifier.py
index a334f92e7..4fe4d74ea 100644
--- a/nova/openstack/common/notifier/log_notifier.py
+++ b/nova/openstack/common/notifier/log_notifier.py
@@ -13,12 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo.config import cfg
-from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
-
CONF = cfg.CONF
diff --git a/nova/openstack/common/notifier/rpc_notifier.py b/nova/openstack/common/notifier/rpc_notifier.py
index 8316f7ab7..b348480c9 100644
--- a/nova/openstack/common/notifier/rpc_notifier.py
+++ b/nova/openstack/common/notifier/rpc_notifier.py
@@ -13,8 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo.config import cfg
-from nova.openstack.common import cfg
from nova.openstack.common import context as req_context
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
diff --git a/nova/openstack/common/notifier/rpc_notifier2.py b/nova/openstack/common/notifier/rpc_notifier2.py
index beb2e9f71..f1158a2f9 100644
--- a/nova/openstack/common/notifier/rpc_notifier2.py
+++ b/nova/openstack/common/notifier/rpc_notifier2.py
@@ -15,7 +15,8 @@
'''messaging based notification driver, with message envelopes'''
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common import context as req_context
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
diff --git a/nova/openstack/common/plugin/pluginmanager.py b/nova/openstack/common/plugin/pluginmanager.py
index 157ecbf44..040b721a4 100644
--- a/nova/openstack/common/plugin/pluginmanager.py
+++ b/nova/openstack/common/plugin/pluginmanager.py
@@ -15,7 +15,8 @@
import pkg_resources
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier_api
diff --git a/nova/openstack/common/rpc/__init__.py b/nova/openstack/common/rpc/__init__.py
index b98fef006..ff72c3f8e 100644
--- a/nova/openstack/common/rpc/__init__.py
+++ b/nova/openstack/common/rpc/__init__.py
@@ -28,12 +28,12 @@ For some wrappers that add message versioning to rpc, see:
import inspect
import logging
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import local
-
LOG = logging.getLogger(__name__)
@@ -299,7 +299,7 @@ def _get_impl():
try:
_RPCIMPL = importutils.import_module(CONF.rpc_backend)
except ImportError:
- # For backwards compatibility with older nova config.
+ # For backwards compatibility with older oslo.config.
impl = CONF.rpc_backend.replace('nova.rpc',
'nova.openstack.common.rpc')
_RPCIMPL = importutils.import_module(impl)
diff --git a/nova/openstack/common/rpc/amqp.py b/nova/openstack/common/rpc/amqp.py
index 9aadce733..3f25eed67 100644
--- a/nova/openstack/common/rpc/amqp.py
+++ b/nova/openstack/common/rpc/amqp.py
@@ -32,13 +32,27 @@ import uuid
from eventlet import greenpool
from eventlet import pools
from eventlet import semaphore
+from eventlet import queue
+# TODO(pekowsk): Remove import cfg and below comment in Havana.
+# This import should no longer be needed when the amqp_rpc_single_reply_queue
+# option is removed.
+from oslo.config import cfg
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import local
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import common as rpc_common
+# TODO(pekowski): Remove this option in Havana.
+amqp_opts = [
+ cfg.BoolOpt('amqp_rpc_single_reply_queue',
+ default=False,
+ help='Enable a fast single reply queue if using AMQP based '
+ 'RPC like RabbitMQ or Qpid.'),
+]
+
+cfg.CONF.register_opts(amqp_opts)
LOG = logging.getLogger(__name__)
@@ -51,6 +65,7 @@ class Pool(pools.Pool):
kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size)
kwargs.setdefault("order_as_stack", True)
super(Pool, self).__init__(*args, **kwargs)
+ self.reply_proxy = None
# TODO(comstud): Timeout connections not used in a while
def create(self):
@@ -60,6 +75,16 @@ class Pool(pools.Pool):
def empty(self):
while self.free_items:
self.get().close()
+ # Force a new connection pool to be created.
+ # Note that this was added due to failing unit test cases. The issue
+ # is the above "while loop" gets all the cached connections from the
+ # pool and closes them, but never returns them to the pool, a pool
+ # leak. The unit tests hang waiting for an item to be returned to the
+ # pool. The unit tests get here via the teatDown() method. In the run
+ # time code, it gets here via cleanup() and only appears in service.py
+ # just before doing a sys.exit(), so cleanup() only happens once and
+ # the leakage is not a problem.
+ self.connection_cls.pool = None
_pool_create_sem = semaphore.Semaphore()
@@ -137,6 +162,12 @@ class ConnectionContext(rpc_common.Connection):
def create_worker(self, topic, proxy, pool_name):
self.connection.create_worker(topic, proxy, pool_name)
+ def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
+ self.connection.join_consumer_pool(callback,
+ pool_name,
+ topic,
+ exchange_name)
+
def consume_in_thread(self):
self.connection.consume_in_thread()
@@ -148,8 +179,45 @@ class ConnectionContext(rpc_common.Connection):
raise rpc_common.InvalidRPCConnectionReuse()
-def msg_reply(conf, msg_id, connection_pool, reply=None, failure=None,
- ending=False, log_failure=True):
+class ReplyProxy(ConnectionContext):
+ """ Connection class for RPC replies / callbacks """
+ def __init__(self, conf, connection_pool):
+ self._call_waiters = {}
+ self._num_call_waiters = 0
+ self._num_call_waiters_wrn_threshhold = 10
+ self._reply_q = 'reply_' + uuid.uuid4().hex
+ super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False)
+ self.declare_direct_consumer(self._reply_q, self._process_data)
+ self.consume_in_thread()
+
+ def _process_data(self, message_data):
+ msg_id = message_data.pop('_msg_id', None)
+ waiter = self._call_waiters.get(msg_id)
+ if not waiter:
+ LOG.warn(_('no calling threads waiting for msg_id : %s'
+ ', message : %s') % (msg_id, message_data))
+ else:
+ waiter.put(message_data)
+
+ def add_call_waiter(self, waiter, msg_id):
+ self._num_call_waiters += 1
+ if self._num_call_waiters > self._num_call_waiters_wrn_threshhold:
+ LOG.warn(_('Number of call waiters is greater than warning '
+ 'threshhold: %d. There could be a MulticallProxyWaiter '
+ 'leak.') % self._num_call_waiters_wrn_threshhold)
+ self._num_call_waiters_wrn_threshhold *= 2
+ self._call_waiters[msg_id] = waiter
+
+ def del_call_waiter(self, msg_id):
+ self._num_call_waiters -= 1
+ del self._call_waiters[msg_id]
+
+ def get_reply_q(self):
+ return self._reply_q
+
+
+def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None,
+ failure=None, ending=False, log_failure=True):
"""Sends a reply or an error on the channel signified by msg_id.
Failure should be a sys.exc_info() tuple.
@@ -168,13 +236,21 @@ def msg_reply(conf, msg_id, connection_pool, reply=None, failure=None,
'failure': failure}
if ending:
msg['ending'] = True
- conn.direct_send(msg_id, rpc_common.serialize_msg(msg))
+ # If a reply_q exists, add the msg_id to the reply and pass the
+ # reply_q to direct_send() to use it as the response queue.
+ # Otherwise use the msg_id for backward compatibilty.
+ if reply_q:
+ msg['_msg_id'] = msg_id
+ conn.direct_send(reply_q, rpc_common.serialize_msg(msg))
+ else:
+ conn.direct_send(msg_id, rpc_common.serialize_msg(msg))
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call"""
def __init__(self, **kwargs):
self.msg_id = kwargs.pop('msg_id', None)
+ self.reply_q = kwargs.pop('reply_q', None)
self.conf = kwargs.pop('conf')
super(RpcContext, self).__init__(**kwargs)
@@ -182,13 +258,14 @@ class RpcContext(rpc_common.CommonRpcContext):
values = self.to_dict()
values['conf'] = self.conf
values['msg_id'] = self.msg_id
+ values['reply_q'] = self.reply_q
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False,
connection_pool=None, log_failure=True):
if self.msg_id:
- msg_reply(self.conf, self.msg_id, connection_pool, reply, failure,
- ending, log_failure)
+ msg_reply(self.conf, self.msg_id, self.reply_q, connection_pool,
+ reply, failure, ending, log_failure)
if ending:
self.msg_id = None
@@ -204,6 +281,7 @@ def unpack_context(conf, msg):
value = msg.pop(key)
context_dict[key[9:]] = value
context_dict['msg_id'] = msg.pop('_msg_id', None)
+ context_dict['reply_q'] = msg.pop('_reply_q', None)
context_dict['conf'] = conf
ctx = RpcContext.from_dict(context_dict)
rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict())
@@ -224,15 +302,54 @@ def pack_context(msg, context):
msg.update(context_d)
-class ProxyCallback(object):
- """Calls methods on a proxy object based on method and args."""
+class _ThreadPoolWithWait(object):
+ """Base class for a delayed invocation manager used by
+ the Connection class to start up green threads
+ to handle incoming messages.
+ """
- def __init__(self, conf, proxy, connection_pool):
- self.proxy = proxy
+ def __init__(self, conf, connection_pool):
self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size)
self.connection_pool = connection_pool
self.conf = conf
+ def wait(self):
+ """Wait for all callback threads to exit."""
+ self.pool.waitall()
+
+
+class CallbackWrapper(_ThreadPoolWithWait):
+ """Wraps a straight callback to allow it to be invoked in a green
+ thread.
+ """
+
+ def __init__(self, conf, callback, connection_pool):
+ """
+ :param conf: cfg.CONF instance
+ :param callback: a callable (probably a function)
+ :param connection_pool: connection pool as returned by
+ get_connection_pool()
+ """
+ super(CallbackWrapper, self).__init__(
+ conf=conf,
+ connection_pool=connection_pool,
+ )
+ self.callback = callback
+
+ def __call__(self, message_data):
+ self.pool.spawn_n(self.callback, message_data)
+
+
+class ProxyCallback(_ThreadPoolWithWait):
+ """Calls methods on a proxy object based on method and args."""
+
+ def __init__(self, conf, proxy, connection_pool):
+ super(ProxyCallback, self).__init__(
+ conf=conf,
+ connection_pool=connection_pool,
+ )
+ self.proxy = proxy
+
def __call__(self, message_data):
"""Consumer callback to call a method on a proxy object.
@@ -293,11 +410,66 @@ class ProxyCallback(object):
ctxt.reply(None, sys.exc_info(),
connection_pool=self.connection_pool)
- def wait(self):
- """Wait for all callback threads to exit."""
- self.pool.waitall()
+
+class MulticallProxyWaiter(object):
+ def __init__(self, conf, msg_id, timeout, connection_pool):
+ self._msg_id = msg_id
+ self._timeout = timeout or conf.rpc_response_timeout
+ self._reply_proxy = connection_pool.reply_proxy
+ self._done = False
+ self._got_ending = False
+ self._conf = conf
+ self._dataqueue = queue.LightQueue()
+ # Add this caller to the reply proxy's call_waiters
+ self._reply_proxy.add_call_waiter(self, self._msg_id)
+
+ def put(self, data):
+ self._dataqueue.put(data)
+
+ def done(self):
+ if self._done:
+ return
+ self._done = True
+ # Remove this caller from reply proxy's call_waiters
+ self._reply_proxy.del_call_waiter(self._msg_id)
+
+ def _process_data(self, data):
+ result = None
+ if data['failure']:
+ failure = data['failure']
+ result = rpc_common.deserialize_remote_exception(self._conf,
+ failure)
+ elif data.get('ending', False):
+ self._got_ending = True
+ else:
+ result = data['result']
+ return result
+
+ def __iter__(self):
+ """Return a result until we get a reply with an 'ending" flag"""
+ if self._done:
+ raise StopIteration
+ while True:
+ try:
+ data = self._dataqueue.get(timeout=self._timeout)
+ result = self._process_data(data)
+ except queue.Empty:
+ LOG.exception(_('Timed out waiting for RPC response.'))
+ self.done()
+ raise rpc_common.Timeout()
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ self.done()
+ if self._got_ending:
+ self.done()
+ raise StopIteration
+ if isinstance(result, Exception):
+ self.done()
+ raise result
+ yield result
+#TODO(pekowski): Remove MulticallWaiter() in Havana.
class MulticallWaiter(object):
def __init__(self, conf, connection, timeout):
self._connection = connection
@@ -353,22 +525,40 @@ def create_connection(conf, new, connection_pool):
return ConnectionContext(conf, connection_pool, pooled=not new)
+_reply_proxy_create_sem = semaphore.Semaphore()
+
+
def multicall(conf, context, topic, msg, timeout, connection_pool):
"""Make a call that returns multiple times."""
+ # TODO(pekowski): Remove all these comments in Havana.
+ # For amqp_rpc_single_reply_queue = False,
# Can't use 'with' for multicall, as it returns an iterator
# that will continue to use the connection. When it's done,
# connection.close() will get called which will put it back into
# the pool
+ # For amqp_rpc_single_reply_queue = True,
+ # The 'with' statement is mandatory for closing the connection
LOG.debug(_('Making synchronous call on %s ...'), topic)
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
LOG.debug(_('MSG_ID is %s') % (msg_id))
pack_context(msg, context)
- conn = ConnectionContext(conf, connection_pool)
- wait_msg = MulticallWaiter(conf, conn, timeout)
- conn.declare_direct_consumer(msg_id, wait_msg)
- conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
+ # TODO(pekowski): Remove this flag and the code under the if clause
+ # in Havana.
+ if not conf.amqp_rpc_single_reply_queue:
+ conn = ConnectionContext(conf, connection_pool)
+ wait_msg = MulticallWaiter(conf, conn, timeout)
+ conn.declare_direct_consumer(msg_id, wait_msg)
+ conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
+ else:
+ with _reply_proxy_create_sem:
+ if not connection_pool.reply_proxy:
+ connection_pool.reply_proxy = ReplyProxy(conf, connection_pool)
+ msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()})
+ wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool)
+ with ConnectionContext(conf, connection_pool) as conn:
+ conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
return wait_msg
diff --git a/nova/openstack/common/rpc/common.py b/nova/openstack/common/rpc/common.py
index d2f5a7b8f..55fc5b044 100644
--- a/nova/openstack/common/rpc/common.py
+++ b/nova/openstack/common/rpc/common.py
@@ -21,7 +21,8 @@ import copy
import sys
import traceback
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
@@ -48,8 +49,8 @@ deserialize_msg().
The current message format (version 2.0) is very simple. It is:
{
- 'nova.version': <RPC Envelope Version as a String>,
- 'nova.message': <Application Message Payload, JSON encoded>
+ 'oslo.version': <RPC Envelope Version as a String>,
+ 'oslo.message': <Application Message Payload, JSON encoded>
}
Message format version '1.0' is just considered to be the messages we sent
@@ -65,8 +66,8 @@ to the messaging libraries as a dict.
'''
_RPC_ENVELOPE_VERSION = '2.0'
-_VERSION_KEY = 'nova.version'
-_MESSAGE_KEY = 'nova.message'
+_VERSION_KEY = 'oslo.version'
+_MESSAGE_KEY = 'oslo.message'
# TODO(russellb) Turn this on after Grizzly.
@@ -196,6 +197,28 @@ class Connection(object):
"""
raise NotImplementedError()
+ def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
+ """Register as a member of a group of consumers for a given topic from
+ the specified exchange.
+
+ Exactly one member of a given pool will receive each message.
+
+ A message will be delivered to multiple pools, if more than
+ one is created.
+
+ :param callback: Callable to be invoked for each message.
+ :type callback: callable accepting one argument
+ :param pool_name: The name of the consumer pool.
+ :type pool_name: str
+ :param topic: The routing topic for desired messages.
+ :type topic: str
+ :param exchange_name: The name of the message exchange where
+ the client should attach. Defaults to
+ the configured exchange.
+ :type exchange_name: str
+ """
+ raise NotImplementedError()
+
def consume_in_thread(self):
"""Spawn a thread to handle incoming messages.
diff --git a/nova/openstack/common/rpc/impl_kombu.py b/nova/openstack/common/rpc/impl_kombu.py
index 4b5550d6f..5e1846f91 100644
--- a/nova/openstack/common/rpc/impl_kombu.py
+++ b/nova/openstack/common/rpc/impl_kombu.py
@@ -28,8 +28,8 @@ import kombu
import kombu.connection
import kombu.entity
import kombu.messaging
+from oslo.config import cfg
-from nova.openstack.common import cfg
from nova.openstack.common.gettextutils import _
from nova.openstack.common import network_utils
from nova.openstack.common.rpc import amqp as rpc_amqp
@@ -165,9 +165,10 @@ class ConsumerBase(object):
try:
msg = rpc_common.deserialize_msg(message.payload)
callback(msg)
- message.ack()
except Exception:
LOG.exception(_("Failed to process message... skipping it."))
+ finally:
+ message.ack()
self.queue.consume(*args, callback=_callback, **options)
@@ -750,6 +751,30 @@ class Connection(object):
self.proxy_callbacks.append(proxy_cb)
self.declare_topic_consumer(topic, proxy_cb, pool_name)
+ def join_consumer_pool(self, callback, pool_name, topic,
+ exchange_name=None):
+ """Register as a member of a group of consumers for a given topic from
+ the specified exchange.
+
+ Exactly one member of a given pool will receive each message.
+
+ A message will be delivered to multiple pools, if more than
+ one is created.
+ """
+ callback_wrapper = rpc_amqp.CallbackWrapper(
+ conf=self.conf,
+ callback=callback,
+ connection_pool=rpc_amqp.get_connection_pool(self.conf,
+ Connection),
+ )
+ self.proxy_callbacks.append(callback_wrapper)
+ self.declare_topic_consumer(
+ queue_name=pool_name,
+ topic=topic,
+ exchange_name=exchange_name,
+ callback=callback_wrapper,
+ )
+
def create_connection(conf, new=True):
"""Create a connection"""
diff --git a/nova/openstack/common/rpc/impl_qpid.py b/nova/openstack/common/rpc/impl_qpid.py
index 544d33790..5601ddb33 100644
--- a/nova/openstack/common/rpc/impl_qpid.py
+++ b/nova/openstack/common/rpc/impl_qpid.py
@@ -22,8 +22,8 @@ import uuid
import eventlet
import greenlet
+from oslo.config import cfg
-from nova.openstack.common import cfg
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
@@ -560,6 +560,34 @@ class Connection(object):
return consumer
+ def join_consumer_pool(self, callback, pool_name, topic,
+ exchange_name=None):
+ """Register as a member of a group of consumers for a given topic from
+ the specified exchange.
+
+ Exactly one member of a given pool will receive each message.
+
+ A message will be delivered to multiple pools, if more than
+ one is created.
+ """
+ callback_wrapper = rpc_amqp.CallbackWrapper(
+ conf=self.conf,
+ callback=callback,
+ connection_pool=rpc_amqp.get_connection_pool(self.conf,
+ Connection),
+ )
+ self.proxy_callbacks.append(callback_wrapper)
+
+ consumer = TopicConsumer(conf=self.conf,
+ session=self.session,
+ topic=topic,
+ callback=callback_wrapper,
+ name=pool_name,
+ exchange_name=exchange_name)
+
+ self._register_consumer(consumer)
+ return consumer
+
def create_connection(conf, new=True):
"""Create a connection"""
diff --git a/nova/openstack/common/rpc/impl_zmq.py b/nova/openstack/common/rpc/impl_zmq.py
index d562b932f..20a7f923d 100644
--- a/nova/openstack/common/rpc/impl_zmq.py
+++ b/nova/openstack/common/rpc/impl_zmq.py
@@ -23,8 +23,8 @@ import uuid
import eventlet
import greenlet
+from oslo.config import cfg
-from nova.openstack.common import cfg
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
diff --git a/nova/openstack/common/rpc/matchmaker.py b/nova/openstack/common/rpc/matchmaker.py
index 9eec1df83..57cc0b34c 100644
--- a/nova/openstack/common/rpc/matchmaker.py
+++ b/nova/openstack/common/rpc/matchmaker.py
@@ -22,7 +22,8 @@ import contextlib
import itertools
import json
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
diff --git a/nova/paths.py b/nova/paths.py
index 7405a7409..8d84289ae 100644
--- a/nova/paths.py
+++ b/nova/paths.py
@@ -19,7 +19,7 @@
import os
-from nova.openstack.common import cfg
+from oslo.config import cfg
path_opts = [
cfg.StrOpt('pybasedir',
diff --git a/nova/policy.py b/nova/policy.py
index ac2f2e730..6e500d926 100644
--- a/nova/policy.py
+++ b/nova/policy.py
@@ -19,8 +19,9 @@
import os.path
+from oslo.config import cfg
+
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import policy
from nova import utils
diff --git a/nova/quota.py b/nova/quota.py
index 1856c97c1..3361154dd 100644
--- a/nova/quota.py
+++ b/nova/quota.py
@@ -20,14 +20,14 @@
import datetime
+from oslo.config import cfg
+
from nova import db
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
-
LOG = logging.getLogger(__name__)
quota_opts = [
diff --git a/nova/scheduler/chance.py b/nova/scheduler/chance.py
index e161166fd..c639aaf80 100644
--- a/nova/scheduler/chance.py
+++ b/nova/scheduler/chance.py
@@ -23,8 +23,9 @@ Chance (Random) Scheduler implementation
import random
+from oslo.config import cfg
+
from nova import exception
-from nova.openstack.common import cfg
from nova.scheduler import driver
CONF = cfg.CONF
@@ -57,6 +58,12 @@ class ChanceScheduler(driver.Scheduler):
return hosts[int(random.random() * len(hosts))]
+ def select_hosts(self, context, request_spec, filter_properties):
+ """Selects a set of random hosts."""
+ return [self._schedule(context, CONF.compute_topic,
+ request_spec, filter_properties)
+ for instance_uuid in request_spec.get('instance_uuids', [])]
+
def schedule_run_instance(self, context, request_spec,
admin_password, injected_files,
requested_networks, is_first_time,
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index 2565e4e40..96a3e5e98 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -23,6 +23,8 @@ Scheduler base class that all Schedulers should inherit from
import sys
+from oslo.config import cfg
+
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import utils as compute_utils
@@ -30,8 +32,8 @@ from nova.compute import vm_states
from nova.conductor import api as conductor_api
from nova import db
from nova import exception
+from nova.image import glance
from nova import notifications
-from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier
@@ -121,6 +123,7 @@ class Scheduler(object):
CONF.scheduler_host_manager)
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.servicegroup_api = servicegroup.API()
+ self.image_service = glance.get_default_image_service()
def update_service_capabilities(self, service_name, host, capabilities):
"""Process a capability update from a service node."""
@@ -160,6 +163,11 @@ class Scheduler(object):
msg = _("Driver must implement schedule_run_instance")
raise NotImplementedError(msg)
+ def select_hosts(self, context, request_spec, filter_properties):
+ """Must override select_hosts method for scheduler to work."""
+ msg = _("Driver must implement select_hosts")
+ raise NotImplementedError(msg)
+
def schedule_live_migration(self, context, instance, dest,
block_migration, disk_over_commit):
"""Live migration scheduling method.
@@ -177,10 +185,33 @@ class Scheduler(object):
"""
# Check we can do live migration
self._live_migration_src_check(context, instance)
- self._live_migration_dest_check(context, instance, dest)
- self._live_migration_common_check(context, instance, dest)
- migrate_data = self.compute_rpcapi.check_can_live_migrate_destination(
- context, instance, dest, block_migration, disk_over_commit)
+
+ if dest is None:
+ # Let scheduler select a dest host, retry next best until success
+ # or no more valid hosts.
+ ignore_hosts = [instance['host']]
+ while dest is None:
+ dest = self._live_migration_dest_check(context, instance, dest,
+ ignore_hosts)
+ try:
+ self._live_migration_common_check(context, instance, dest)
+ migrate_data = self.compute_rpcapi.\
+ check_can_live_migrate_destination(context, instance,
+ dest,
+ block_migration,
+ disk_over_commit)
+ except exception.Invalid:
+ ignore_hosts.append(dest)
+ dest = None
+ continue
+ else:
+ # Test the given dest host
+ self._live_migration_dest_check(context, instance, dest)
+ self._live_migration_common_check(context, instance, dest)
+ migrate_data = self.compute_rpcapi.\
+ check_can_live_migrate_destination(context, instance, dest,
+ block_migration,
+ disk_over_commit)
# Perform migration
src = instance['host']
@@ -213,14 +244,34 @@ class Scheduler(object):
if not self.servicegroup_api.service_is_up(service):
raise exception.ComputeServiceUnavailable(host=src)
- def _live_migration_dest_check(self, context, instance_ref, dest):
+ def _live_migration_dest_check(self, context, instance_ref, dest,
+ ignore_hosts=None):
"""Live migration check routine (for destination host).
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest: destination host
+ :param ignore_hosts: hosts that should be avoided as dest host
"""
+ # If dest is not specified, have scheduler pick one.
+ if dest is None:
+ image = self.image_service.show(context, instance_ref['image_ref'])
+ request_spec = {'instance_properties': instance_ref,
+ 'instance_type': instance_ref['instance_type'],
+ 'instance_uuids': [instance_ref['uuid']],
+ 'image': image}
+ filter_properties = {'ignore_hosts': ignore_hosts}
+ return self.select_hosts(context, request_spec,
+ filter_properties)[0]
+
+ # Checking whether The host where instance is running
+ # and dest is not same.
+ src = instance_ref['host']
+ if dest == src:
+ raise exception.UnableToMigrateToSelf(
+ instance_id=instance_ref['uuid'], host=dest)
+
# Checking dest exists and compute node.
try:
dservice_ref = db.service_get_by_compute_host(context, dest)
@@ -231,17 +282,12 @@ class Scheduler(object):
if not self.servicegroup_api.service_is_up(dservice_ref):
raise exception.ComputeServiceUnavailable(host=dest)
- # Checking whether The host where instance is running
- # and dest is not same.
- src = instance_ref['host']
- if dest == src:
- raise exception.UnableToMigrateToSelf(
- instance_id=instance_ref['uuid'], host=dest)
-
# Check memory requirements
self._assert_compute_node_has_enough_memory(context,
instance_ref, dest)
+ return dest
+
def _live_migration_common_check(self, context, instance_ref, dest):
"""Live migration common check routine.
diff --git a/nova/scheduler/filter_scheduler.py b/nova/scheduler/filter_scheduler.py
index c9118cb22..b886a04cc 100644
--- a/nova/scheduler/filter_scheduler.py
+++ b/nova/scheduler/filter_scheduler.py
@@ -21,8 +21,9 @@ Weighing Functions.
import random
+from oslo.config import cfg
+
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier
from nova.scheduler import driver
@@ -137,6 +138,15 @@ class FilterScheduler(driver.Scheduler):
request_spec=request_spec, filter_properties=filter_properties,
node=weighed_host.obj.nodename)
+ def select_hosts(self, context, request_spec, filter_properties):
+ """Selects a filtered set of hosts."""
+ instance_uuids = request_spec.get('instance_uuids')
+ hosts = [host.obj.host for host in self._schedule(context,
+ request_spec, filter_properties, instance_uuids)]
+ if not hosts:
+ raise exception.NoValidHost(reason="")
+ return hosts
+
def _provision_resource(self, context, weighed_host, request_spec,
filter_properties, requested_networks, injected_files,
admin_password, is_first_time, instance_uuid=None):
diff --git a/nova/scheduler/filters/aggregate_multitenancy_isolation.py b/nova/scheduler/filters/aggregate_multitenancy_isolation.py
new file mode 100644
index 000000000..539da37d1
--- /dev/null
+++ b/nova/scheduler/filters/aggregate_multitenancy_isolation.py
@@ -0,0 +1,47 @@
+# Copyright (c) 2011-2013 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import db
+from nova.openstack.common import log as logging
+from nova.scheduler import filters
+
+LOG = logging.getLogger(__name__)
+
+
+class AggregateMultiTenancyIsolation(filters.BaseHostFilter):
+ """Isolate tenants in specific aggregates."""
+
+ def host_passes(self, host_state, filter_properties):
+ """If a host is in an aggregate that has the metadata key
+ "filter_tenant_id" it can only create instances from that tenant(s).
+ A host can be in different aggregates.
+
+ If a host doesn't belong to an aggregate with the metadata key
+ "filter_tenant_id" it can create instances from all tenants.
+ """
+ spec = filter_properties.get('request_spec', {})
+ props = spec.get('instance_properties', {})
+ tenant_id = props.get('project_id')
+
+ context = filter_properties['context'].elevated()
+ metadata = db.aggregate_metadata_get_by_host(context, host_state.host,
+ key="filter_tenant_id")
+
+ if metadata != {}:
+ if tenant_id not in metadata["filter_tenant_id"]:
+ LOG.debug(_("%(host_state)s fails tenant id on "
+ "aggregate"), locals())
+ return False
+ return True
diff --git a/nova/scheduler/filters/availability_zone_filter.py b/nova/scheduler/filters/availability_zone_filter.py
index 390276ea3..f6c7472bf 100644
--- a/nova/scheduler/filters/availability_zone_filter.py
+++ b/nova/scheduler/filters/availability_zone_filter.py
@@ -13,12 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo.config import cfg
from nova import db
-from nova.openstack.common import cfg
from nova.scheduler import filters
-
CONF = cfg.CONF
CONF.import_opt('default_availability_zone', 'nova.availability_zones')
diff --git a/nova/scheduler/filters/compute_filter.py b/nova/scheduler/filters/compute_filter.py
index 2cdfb91f4..f571955d9 100644
--- a/nova/scheduler/filters/compute_filter.py
+++ b/nova/scheduler/filters/compute_filter.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common import log as logging
from nova.scheduler import filters
from nova import servicegroup
diff --git a/nova/scheduler/filters/core_filter.py b/nova/scheduler/filters/core_filter.py
index 54561b811..a07a1b39a 100644
--- a/nova/scheduler/filters/core_filter.py
+++ b/nova/scheduler/filters/core_filter.py
@@ -15,11 +15,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common import log as logging
from nova.scheduler import filters
-
LOG = logging.getLogger(__name__)
cpu_allocation_ratio_opt = cfg.FloatOpt('cpu_allocation_ratio',
diff --git a/nova/scheduler/filters/disk_filter.py b/nova/scheduler/filters/disk_filter.py
index e7a292c45..812b9b212 100644
--- a/nova/scheduler/filters/disk_filter.py
+++ b/nova/scheduler/filters/disk_filter.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common import log as logging
from nova.scheduler import filters
diff --git a/nova/scheduler/filters/io_ops_filter.py b/nova/scheduler/filters/io_ops_filter.py
index 2780ff252..a60be57e2 100644
--- a/nova/scheduler/filters/io_ops_filter.py
+++ b/nova/scheduler/filters/io_ops_filter.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common import log as logging
from nova.scheduler import filters
diff --git a/nova/scheduler/filters/isolated_hosts_filter.py b/nova/scheduler/filters/isolated_hosts_filter.py
index 37a8f440d..89beb0ed0 100644
--- a/nova/scheduler/filters/isolated_hosts_filter.py
+++ b/nova/scheduler/filters/isolated_hosts_filter.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.scheduler import filters
isolated_opts = [
diff --git a/nova/scheduler/filters/num_instances_filter.py b/nova/scheduler/filters/num_instances_filter.py
index bdc350f95..f65fccbf6 100644
--- a/nova/scheduler/filters/num_instances_filter.py
+++ b/nova/scheduler/filters/num_instances_filter.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common import log as logging
from nova.scheduler import filters
diff --git a/nova/scheduler/filters/ram_filter.py b/nova/scheduler/filters/ram_filter.py
index f9d6bb750..3345bf386 100644
--- a/nova/scheduler/filters/ram_filter.py
+++ b/nova/scheduler/filters/ram_filter.py
@@ -14,7 +14,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common import log as logging
from nova.scheduler import filters
diff --git a/nova/scheduler/filters/trusted_filter.py b/nova/scheduler/filters/trusted_filter.py
index 14f1a37b0..a7bb850bf 100644
--- a/nova/scheduler/filters/trusted_filter.py
+++ b/nova/scheduler/filters/trusted_filter.py
@@ -48,15 +48,15 @@ import httplib
import socket
import ssl
+from oslo.config import cfg
+
from nova import context
from nova import db
-from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova.scheduler import filters
-
LOG = logging.getLogger(__name__)
trusted_opts = [
diff --git a/nova/scheduler/host_manager.py b/nova/scheduler/host_manager.py
index 7203fb735..c004b2947 100644
--- a/nova/scheduler/host_manager.py
+++ b/nova/scheduler/host_manager.py
@@ -19,11 +19,12 @@ Manage hosts in the current zone.
import UserDict
+from oslo.config import cfg
+
from nova.compute import task_states
from nova.compute import vm_states
from nova import db
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova.scheduler import filters
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 64a388f60..a9b774a4d 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -23,6 +23,8 @@ Scheduler Service
import sys
+from oslo.config import cfg
+
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
@@ -33,9 +35,9 @@ from nova import db
from nova import exception
from nova import manager
from nova import notifications
-from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import importutils
+from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier
from nova import quota
@@ -56,7 +58,7 @@ QUOTAS = quota.QUOTAS
class SchedulerManager(manager.Manager):
"""Chooses a host to run instances on."""
- RPC_API_VERSION = '2.5'
+ RPC_API_VERSION = '2.6'
def __init__(self, scheduler_driver=None, *args, **kwargs):
if not scheduler_driver:
@@ -285,3 +287,10 @@ class SchedulerManager(manager.Manager):
def get_backdoor_port(self, context):
return self.backdoor_port
+
+ def select_hosts(self, context, request_spec, filter_properties):
+ """Returns host(s) best suited for this request_spec and
+ filter_properties"""
+ hosts = self.driver.select_hosts(context, request_spec,
+ filter_properties)
+ return jsonutils.to_primitive(hosts)
diff --git a/nova/scheduler/multi.py b/nova/scheduler/multi.py
index a92e09556..ec45ad618 100644
--- a/nova/scheduler/multi.py
+++ b/nova/scheduler/multi.py
@@ -27,7 +27,8 @@ schedule requests to compute nodes but provide their own manager and topic.
https://bugs.launchpad.net/nova/+bug/1009681
"""
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common import importutils
from nova.scheduler import driver
diff --git a/nova/scheduler/rpcapi.py b/nova/scheduler/rpcapi.py
index c3a37d6ad..47b1de79b 100644
--- a/nova/scheduler/rpcapi.py
+++ b/nova/scheduler/rpcapi.py
@@ -18,7 +18,8 @@
Client side of the scheduler manager RPC API.
"""
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common import jsonutils
import nova.openstack.common.rpc.proxy
@@ -56,6 +57,7 @@ class SchedulerAPI(nova.openstack.common.rpc.proxy.RpcProxy):
2.4 - Change update_service_capabilities()
- accepts a list of capabilities
2.5 - Add get_backdoor_port()
+ 2.6 - Add select_hosts()
'''
#
@@ -117,3 +119,9 @@ class SchedulerAPI(nova.openstack.common.rpc.proxy.RpcProxy):
def get_backdoor_port(self, context, host):
return self.call(context, self.make_msg('get_backdoor_port'),
version='2.5')
+
+ def select_hosts(self, ctxt, request_spec, filter_properties):
+ return self.call(ctxt, self.make_msg('select_hosts',
+ request_spec=request_spec,
+ filter_properties=filter_properties),
+ version='2.6')
diff --git a/nova/scheduler/scheduler_options.py b/nova/scheduler/scheduler_options.py
index e0840dd01..1753c897b 100644
--- a/nova/scheduler/scheduler_options.py
+++ b/nova/scheduler/scheduler_options.py
@@ -26,7 +26,8 @@ import datetime
import json
import os
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
diff --git a/nova/scheduler/weights/__init__.py b/nova/scheduler/weights/__init__.py
index b979b1e55..f96ec929a 100644
--- a/nova/scheduler/weights/__init__.py
+++ b/nova/scheduler/weights/__init__.py
@@ -17,8 +17,8 @@
Scheduler host weights
"""
+from oslo.config import cfg
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.scheduler.weights import least_cost
from nova import weights
diff --git a/nova/scheduler/weights/least_cost.py b/nova/scheduler/weights/least_cost.py
index 26b9e7a8c..0e617ff42 100644
--- a/nova/scheduler/weights/least_cost.py
+++ b/nova/scheduler/weights/least_cost.py
@@ -25,8 +25,9 @@ NOTE(comstud): This is deprecated. One should use the RAMWeigher and/or
create other weight modules.
"""
+from oslo.config import cfg
+
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
diff --git a/nova/scheduler/weights/ram.py b/nova/scheduler/weights/ram.py
index ea4cfab38..3fc15fbdf 100644
--- a/nova/scheduler/weights/ram.py
+++ b/nova/scheduler/weights/ram.py
@@ -20,9 +20,9 @@ stacking, you can set the 'ram_weight_multiplier' option to a negative
number and the weighing has the opposite effect of the default.
"""
-from nova.openstack.common import cfg
-from nova.scheduler import weights
+from oslo.config import cfg
+from nova.scheduler import weights
ram_weight_opts = [
cfg.FloatOpt('ram_weight_multiplier',
diff --git a/nova/service.py b/nova/service.py
index 3d556a202..0aa66310a 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -29,11 +29,11 @@ import time
import eventlet
import greenlet
+from oslo.config import cfg
from nova import conductor
from nova import context
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import eventlet_backdoor
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
diff --git a/nova/servicegroup/api.py b/nova/servicegroup/api.py
index 6dc1aa6d1..2701329a0 100644
--- a/nova/servicegroup/api.py
+++ b/nova/servicegroup/api.py
@@ -17,19 +17,21 @@
"""Define APIs for the servicegroup access."""
-from nova.openstack.common import cfg
+import random
+
+from oslo.config import cfg
+
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import utils
-import random
-
-
LOG = logging.getLogger(__name__)
_default_driver = 'db'
servicegroup_driver_opt = cfg.StrOpt('servicegroup_driver',
- default=_default_driver,
- help='The driver for servicegroup service.')
+ default=_default_driver,
+ help='The driver for servicegroup '
+ 'service (valid options are: '
+ 'db, zk, mc)')
CONF = cfg.CONF
CONF.register_opt(servicegroup_driver_opt)
@@ -40,7 +42,8 @@ class API(object):
_driver = None
_driver_name_class_mapping = {
'db': 'nova.servicegroup.drivers.db.DbDriver',
- 'zk': 'nova.servicegroup.drivers.zk.ZooKeeperDriver'
+ 'zk': 'nova.servicegroup.drivers.zk.ZooKeeperDriver',
+ 'mc': 'nova.servicegroup.drivers.mc.MemcachedDriver'
}
def __new__(cls, *args, **kwargs):
diff --git a/nova/servicegroup/drivers/db.py b/nova/servicegroup/drivers/db.py
index 18b4b74e5..994015b34 100644
--- a/nova/servicegroup/drivers/db.py
+++ b/nova/servicegroup/drivers/db.py
@@ -14,9 +14,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from oslo.config import cfg
+
from nova import conductor
from nova import context
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova.servicegroup import api
diff --git a/nova/servicegroup/drivers/mc.py b/nova/servicegroup/drivers/mc.py
new file mode 100644
index 000000000..2857185d9
--- /dev/null
+++ b/nova/servicegroup/drivers/mc.py
@@ -0,0 +1,109 @@
+# Service heartbeat driver using Memcached
+# Copyright (c) 2013 Akira Yoshiyama <akirayoshiyama at gmail dot com>
+#
+# This is derived from nova/servicegroup/drivers/db.py.
+# Copyright (c) IBM 2012 Pavel Kravchenco <kpavel at il dot ibm dot com>
+# Alexey Roytman <roytman at il dot ibm dot com>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from oslo.config import cfg
+
+from nova.common import memorycache
+from nova import conductor
+from nova import context
+from nova.openstack.common import log as logging
+from nova.openstack.common import timeutils
+from nova.servicegroup import api
+from nova import utils
+
+
+CONF = cfg.CONF
+CONF.import_opt('service_down_time', 'nova.service')
+CONF.import_opt('memcached_servers', 'nova.common.memorycache')
+
+
+LOG = logging.getLogger(__name__)
+
+
+class MemcachedDriver(api.ServiceGroupDriver):
+
+ def __init__(self, *args, **kwargs):
+ test = kwargs.get('test')
+ if not CONF.memcached_servers and not test:
+ raise RuntimeError(_('memcached_servers not defined'))
+ self.mc = memorycache.get_client()
+ self.db_allowed = kwargs.get('db_allowed', True)
+ self.conductor_api = conductor.API(use_local=self.db_allowed)
+
+ def join(self, member_id, group_id, service=None):
+ """Join the given service with its group."""
+
+ msg = _('Memcached_Driver: join new ServiceGroup member '
+ '%(member_id)s to the %(group_id)s group, '
+ 'service = %(service)s')
+ LOG.debug(msg, locals())
+ if service is None:
+ raise RuntimeError(_('service is a mandatory argument for '
+ 'Memcached based ServiceGroup driver'))
+ report_interval = service.report_interval
+ if report_interval:
+ pulse = utils.FixedIntervalLoopingCall(self._report_state, service)
+ pulse.start(interval=report_interval,
+ initial_delay=report_interval)
+ return pulse
+
+ def is_up(self, service_ref):
+ """Moved from nova.utils
+ Check whether a service is up based on last heartbeat.
+ """
+ key = "%(topic)s:%(host)s" % service_ref
+ return self.mc.get(str(key)) is not None
+
+ def get_all(self, group_id):
+ """
+ Returns ALL members of the given group
+ """
+ LOG.debug(_('Memcached_Driver: get_all members of the %s group') %
+ group_id)
+ rs = []
+ ctxt = context.get_admin_context()
+ services = self.conductor_api.service_get_all_by_topic(ctxt, group_id)
+ for service in services:
+ if self.is_up(service):
+ rs.append(service['host'])
+ return rs
+
+ def _report_state(self, service):
+ """Update the state of this service in the datastore."""
+ ctxt = context.get_admin_context()
+ try:
+ key = "%(topic)s:%(host)s" % service.service_ref
+ # memcached has data expiration time capability.
+ # set(..., time=CONF.service_down_time) uses it and
+ # reduces key-deleting code.
+ self.mc.set(str(key),
+ timeutils.utcnow(),
+ time=CONF.service_down_time)
+
+ # TODO(termie): make this pattern be more elegant.
+ if getattr(service, 'model_disconnected', False):
+ service.model_disconnected = False
+ LOG.error(_('Recovered model server connection!'))
+
+ # TODO(vish): this should probably only catch connection errors
+ except Exception: # pylint: disable=W0702
+ if not getattr(service, 'model_disconnected', False):
+ service.model_disconnected = True
+ LOG.exception(_('model server went away'))
diff --git a/nova/servicegroup/drivers/zk.py b/nova/servicegroup/drivers/zk.py
index c4e3f7b71..92b49f274 100644
--- a/nova/servicegroup/drivers/zk.py
+++ b/nova/servicegroup/drivers/zk.py
@@ -21,10 +21,10 @@ import os
import eventlet
import evzookeeper
from evzookeeper import membership
+from oslo.config import cfg
import zookeeper
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.servicegroup import api
from nova import utils
@@ -48,7 +48,7 @@ zk_driver_opts = [
]
CONF = cfg.CONF
-CONF.register_opts(zk_driver_opts, group="zk")
+CONF.register_opts(zk_driver_opts, group="zookeeper")
LOG = logging.getLogger(__name__)
diff --git a/nova/spice/__init__.py b/nova/spice/__init__.py
index 390957e27..aa283d5cc 100644
--- a/nova/spice/__init__.py
+++ b/nova/spice/__init__.py
@@ -17,7 +17,7 @@
"""Module for SPICE Proxying."""
-from nova.openstack.common import cfg
+from oslo.config import cfg
spice_opts = [
diff --git a/nova/storage/__init__.py b/nova/storage/__init__.py
new file mode 100644
index 000000000..931ad9875
--- /dev/null
+++ b/nova/storage/__init__.py
@@ -0,0 +1,15 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2013 Hewlett-Packard, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
diff --git a/nova/storage/linuxscsi.py b/nova/storage/linuxscsi.py
new file mode 100644
index 000000000..739092b2e
--- /dev/null
+++ b/nova/storage/linuxscsi.py
@@ -0,0 +1,139 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Generic linux scsi subsystem utilities."""
+
+from nova import exception
+from nova.openstack.common import log as logging
+from nova import utils
+
+LOG = logging.getLogger(__name__)
+
+
+def echo_scsi_command(path, content):
+ """Used to echo strings to scsi subsystem."""
+ args = ["-a", path]
+ kwargs = dict(process_input=content, run_as_root=True)
+ utils.execute('tee', *args, **kwargs)
+
+
+def rescan_hosts(hbas):
+ for hba in hbas:
+ echo_scsi_command("/sys/class/scsi_host/%s/scan"
+ % hba['host_device'], "- - -")
+
+
+def get_device_list():
+ (out, err) = utils.execute('sginfo', '-r', run_as_root=True)
+ devices = []
+ if out:
+ line = out.strip()
+ devices = line.split(" ")
+
+ return devices
+
+
+def get_device_info(device):
+ (out, err) = utils.execute('sg_scan', device, run_as_root=True)
+ dev_info = {'device': device, 'host': None,
+ 'channel': None, 'id': None, 'lun': None}
+ if out:
+ line = out.strip()
+ line = line.replace(device + ": ", "")
+ info = line.split(" ")
+
+ for item in info:
+ if '=' in item:
+ pair = item.split('=')
+ dev_info[pair[0]] = pair[1]
+ elif 'scsi' in item:
+ dev_info['host'] = item.replace('scsi', '')
+
+ return dev_info
+
+
+def _wait_for_remove(device, tries):
+ tries = tries + 1
+ LOG.debug(_("Trying (%(tries)s) to remove device %(device)s")
+ % {'tries': tries, 'device': device["device"]})
+
+ path = "/sys/bus/scsi/drivers/sd/%s:%s:%s:%s/delete"
+ echo_scsi_command(path % (device["host"], device["channel"],
+ device["id"], device["lun"]),
+ "1")
+
+ devices = get_device_list()
+ if device["device"] not in devices:
+ raise utils.LoopingCallDone()
+
+
+def remove_device(device):
+ tries = 0
+ timer = utils.FixedIntervalLoopingCall(_wait_for_remove, device, tries)
+ timer.start(interval=2).wait()
+ timer.stop()
+
+
+def find_multipath_device(device):
+ """Try and discover the multipath device for a volume."""
+ mdev = None
+ devices = []
+ out = None
+ try:
+ (out, err) = utils.execute('multipath', '-l', device,
+ run_as_root=True)
+ except exception.ProcessExecutionError as exc:
+ LOG.warn(_("Multipath call failed exit (%(code)s)")
+ % {'code': exc.exit_code})
+ return None
+
+ if out:
+ lines = out.strip()
+ lines = lines.split("\n")
+ if lines:
+ line = lines[0]
+ info = line.split(" ")
+ # device line output is different depending
+ # on /etc/multipath.conf settings.
+ if info[1][:2] == "dm":
+ mdev = "/dev/%s" % info[1]
+ elif info[2][:2] == "dm":
+ mdev = "/dev/%s" % info[2]
+
+ if mdev is None:
+ LOG.warn(_("Couldn't find multipath device %(line)s")
+ % locals())
+ return None
+
+ LOG.debug(_("Found multipath device = %(mdev)s") % locals())
+ device_lines = lines[3:]
+ for dev_line in device_lines:
+ dev_line = dev_line.strip()
+ dev_line = dev_line[3:]
+ dev_info = dev_line.split(" ")
+ if dev_line.find("policy") != -1:
+ address = dev_info[0].split(":")
+ dev = {'device': '/dev/%s' % dev_info[1],
+ 'host': address[0], 'channel': address[1],
+ 'id': address[2], 'lun': address[3]
+ }
+ devices.append(dev)
+
+ if mdev is not None:
+ info = {"device": mdev,
+ "devices": devices}
+ return info
+ return None
diff --git a/nova/test.py b/nova/test.py
index e5c11081c..e917e6c2a 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -31,6 +31,7 @@ import uuid
import eventlet
import fixtures
import mox
+from oslo.config import cfg
import stubout
import testtools
@@ -38,7 +39,6 @@ from nova import context
from nova import db
from nova.db import migration
from nova.network import manager as network_manager
-from nova.openstack.common import cfg
from nova.openstack.common.db.sqlalchemy import session
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
diff --git a/nova/tests/api/ec2/test_cinder_cloud.py b/nova/tests/api/ec2/test_cinder_cloud.py
index 5e5723a08..d4ce58023 100644
--- a/nova/tests/api/ec2/test_cinder_cloud.py
+++ b/nova/tests/api/ec2/test_cinder_cloud.py
@@ -21,6 +21,7 @@ import copy
import uuid
import fixtures
+from oslo.config import cfg
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
@@ -29,7 +30,6 @@ from nova.compute import utils as compute_utils
from nova import context
from nova import db
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova import test
diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py
index f8219e7a0..07780eb02 100644
--- a/nova/tests/api/ec2/test_cloud.py
+++ b/nova/tests/api/ec2/test_cloud.py
@@ -26,6 +26,7 @@ import string
import tempfile
import fixtures
+from oslo.config import cfg
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
@@ -40,7 +41,6 @@ from nova import db
from nova import exception
from nova.image import s3
from nova.network import api as network_api
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova import test
@@ -461,7 +461,7 @@ class CloudTestCase(test.TestCase):
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81,
- 'ip_ranges':{'1': {'cidr_ip': u'0.0.0.0/0'},
+ 'ip_ranges': {'1': {'cidr_ip': u'0.0.0.0/0'},
'2': {'cidr_ip': u'10.10.10.10/32'}},
'groups': {'1': {'user_id': u'someuser',
'group_name': u'somegroup1'}},
diff --git a/nova/tests/api/ec2/test_ec2_validate.py b/nova/tests/api/ec2/test_ec2_validate.py
index 4dcdf4e54..24d226335 100644
--- a/nova/tests/api/ec2/test_ec2_validate.py
+++ b/nova/tests/api/ec2/test_ec2_validate.py
@@ -18,13 +18,14 @@
import datetime
+from oslo.config import cfg
+
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova.compute import utils as compute_utils
from nova import context
from nova import db
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova.openstack.common import timeutils
diff --git a/nova/tests/api/ec2/test_middleware.py b/nova/tests/api/ec2/test_middleware.py
index f50689028..05463b849 100644
--- a/nova/tests/api/ec2/test_middleware.py
+++ b/nova/tests/api/ec2/test_middleware.py
@@ -17,6 +17,7 @@
# under the License.
from lxml import etree
+from oslo.config import cfg
import webob
import webob.dec
import webob.exc
@@ -24,7 +25,6 @@ import webob.exc
from nova.api import ec2
from nova import context
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import timeutils
from nova import test
diff --git a/nova/tests/api/openstack/compute/contrib/test_admin_actions.py b/nova/tests/api/openstack/compute/contrib/test_admin_actions.py
index 76351e489..600d25889 100644
--- a/nova/tests/api/openstack/compute/contrib/test_admin_actions.py
+++ b/nova/tests/api/openstack/compute/contrib/test_admin_actions.py
@@ -15,6 +15,7 @@
import datetime
import uuid
+from oslo.config import cfg
import webob
from nova.api.openstack import compute
@@ -23,7 +24,6 @@ from nova.compute import api as compute_api
from nova.compute import vm_states
from nova import context
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova import test
diff --git a/nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py b/nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py
new file mode 100644
index 000000000..f7413cad9
--- /dev/null
+++ b/nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py
@@ -0,0 +1,245 @@
+# Copyright 2012 SINA Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+
+from nova.api.openstack.compute.contrib import attach_interfaces
+from nova.compute import api as compute_api
+from nova import context
+from nova import exception
+from nova.network import api as network_api
+from nova.openstack.common import jsonutils
+from nova import test
+
+import webob
+from webob import exc
+
+
+CONF = cfg.CONF
+
+FAKE_UUID1 = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+FAKE_UUID2 = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
+
+FAKE_PORT_ID1 = '11111111-1111-1111-1111-111111111111'
+FAKE_PORT_ID2 = '22222222-2222-2222-2222-222222222222'
+FAKE_PORT_ID3 = '33333333-3333-3333-3333-333333333333'
+
+FAKE_NET_ID1 = '44444444-4444-4444-4444-444444444444'
+FAKE_NET_ID2 = '55555555-5555-5555-5555-555555555555'
+FAKE_NET_ID3 = '66666666-6666-6666-6666-666666666666'
+
+port_data1 = {
+ "id": FAKE_PORT_ID1,
+ "network_id": FAKE_NET_ID1,
+ "admin_state_up": True,
+ "status": "ACTIVE",
+ "mac_address": "aa:aa:aa:aa:aa:aa",
+ "fixed_ips": ["10.0.1.2"],
+ "device_id": FAKE_UUID1,
+}
+
+port_data2 = {
+ "id": FAKE_PORT_ID2,
+ "network_id": FAKE_NET_ID2,
+ "admin_state_up": True,
+ "status": "ACTIVE",
+ "mac_address": "bb:bb:bb:bb:bb:bb",
+ "fixed_ips": ["10.0.2.2"],
+ "device_id": FAKE_UUID1,
+}
+
+port_data3 = {
+ "id": FAKE_PORT_ID3,
+ "network_id": FAKE_NET_ID3,
+ "admin_state_up": True,
+ "status": "ACTIVE",
+ "mac_address": "bb:bb:bb:bb:bb:bb",
+ "fixed_ips": ["10.0.2.2"],
+ "device_id": '',
+}
+
+fake_networks = [FAKE_NET_ID1, FAKE_NET_ID2]
+ports = [port_data1, port_data2, port_data3]
+
+
+def fake_list_ports(self, *args, **kwargs):
+ result = []
+ for port in ports:
+ if port['device_id'] == kwargs['device_id']:
+ result.append(port)
+ return {'ports': result}
+
+
+def fake_show_port(self, context, port_id, **kwargs):
+ for port in ports:
+ if port['id'] == port_id:
+ return {'port': port}
+
+
+def fake_attach_interface(self, context, instance, network_id, port_id,
+ requested_ip='192.168.1.3'):
+ if not network_id:
+ # if no network_id is given when add a port to an instance, use the
+ # first default network.
+ network_id = fake_networks[0]
+ if not port_id:
+ port_id = ports[fake_networks.index(network_id)]['id']
+ network_info = [
+ {'bridge': 'br-100',
+ 'id': network_id,
+ 'cidr': '192.168.1.0/24',
+ 'vlan': '101',
+ 'injected': 'False',
+ 'multi_host': 'False',
+ 'bridge_interface': 'bridge_interface'
+ },
+ {'label': 'fake_network',
+ 'broadcast': '192.168.1.255',
+ 'mac': '11:22:33:11:22:33',
+ 'vif_uuid': port_id,
+ 'rxtx_cap': 0,
+ 'dns': '8.8.8.8',
+ 'dhcp_server': '192.168.1.1',
+ 'ips': {'ip': requested_ip,
+ 'enabled': 1,
+ 'netmask': '255.255.255.0',
+ 'gateway': '192.168.1.254'}
+ }
+ ]
+ return network_info
+
+
+def fake_detach_interface(self, context, instance, port_id):
+ for port in ports:
+ if port['id'] == port_id:
+ return
+ raise exception.PortNotFound(port_id=port_id)
+
+
+def fake_get_instance(self, context, intance_id):
+ return {}
+
+
+class InterfaceAttachTests(test.TestCase):
+ def setUp(self):
+ super(InterfaceAttachTests, self).setUp()
+ self.flags(quantum_auth_strategy=None)
+ self.flags(quantum_url='http://anyhost/')
+ self.flags(quantum_url_timeout=30)
+ self.stubs.Set(network_api.API, 'show_port', fake_show_port)
+ self.stubs.Set(network_api.API, 'list_ports', fake_list_ports)
+ self.stubs.Set(compute_api.API, 'get', fake_get_instance)
+ self.context = context.get_admin_context()
+ self.expected_show = {'interfaceAttachment':
+ {'net_id': FAKE_NET_ID1,
+ 'port_id': FAKE_PORT_ID1,
+ 'mac_addr': port_data1['mac_address'],
+ 'port_state': port_data1['status'],
+ 'fixed_ips': port_data1['fixed_ips'],
+ }}
+
+ def test_show(self):
+ attachments = attach_interfaces.InterfaceAttachmentController()
+ req = webob.Request.blank('/v2/fake/os-interfaces/show')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ result = attachments.show(req, FAKE_UUID1, FAKE_PORT_ID1)
+ self.assertEqual(self.expected_show, result)
+
+ def test_show_invalid(self):
+ attachments = attach_interfaces.InterfaceAttachmentController()
+ req = webob.Request.blank('/v2/fake/os-interfaces/show')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ self.assertRaises(exc.HTTPNotFound,
+ attachments.show, req, FAKE_UUID2, FAKE_PORT_ID1)
+
+ def test_delete(self):
+ self.stubs.Set(compute_api.API, 'detach_interface',
+ fake_detach_interface)
+ attachments = attach_interfaces.InterfaceAttachmentController()
+ req = webob.Request.blank('/v2/fake/os-interfaces/delete')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ result = attachments.delete(req, FAKE_UUID1, FAKE_PORT_ID1)
+ self.assertEqual('202 Accepted', result.status)
+
+ def test_delete_interface_not_found(self):
+ self.stubs.Set(compute_api.API, 'detach_interface',
+ fake_detach_interface)
+ attachments = attach_interfaces.InterfaceAttachmentController()
+ req = webob.Request.blank('/v2/fake/os-interfaces/delete')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ self.assertRaises(exc.HTTPNotFound,
+ attachments.delete,
+ req,
+ FAKE_UUID1,
+ 'invaid-port-id')
+
+ def test_attach_interface_without_network_id(self):
+ self.stubs.Set(compute_api.API, 'attach_interface',
+ fake_attach_interface)
+ attachments = attach_interfaces.InterfaceAttachmentController()
+ req = webob.Request.blank('/v2/fake/os-interfaces/attach')
+ req.method = 'POST'
+ body = jsonutils.dumps({'port_id': FAKE_PORT_ID1})
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ result = attachments.create(req, FAKE_UUID1, jsonutils.loads(req.body))
+ self.assertEqual(result['interfaceAttachment']['net_id'],
+ FAKE_NET_ID1)
+
+ def test_attach_interface_with_network_id(self):
+ self.stubs.Set(compute_api.API, 'attach_interface',
+ fake_attach_interface)
+ attachments = attach_interfaces.InterfaceAttachmentController()
+ req = webob.Request.blank('/v2/fake/os-interfaces/attach')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({'interfaceAttachment':
+ {'net_id': FAKE_NET_ID2}})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ result = attachments.create(req, FAKE_UUID1, jsonutils.loads(req.body))
+ self.assertEqual(result['interfaceAttachment']['net_id'],
+ FAKE_NET_ID2)
+
+ def test_attach_interface_with_port_and_network_id(self):
+ self.stubs.Set(compute_api.API, 'attach_interface',
+ fake_attach_interface)
+ attachments = attach_interfaces.InterfaceAttachmentController()
+ req = webob.Request.blank('/v2/fake/os-interfaces/attach')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({'interfaceAttachment':
+ {'port_id': FAKE_PORT_ID1,
+ 'net_id': FAKE_NET_ID2}})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ self.assertRaises(exc.HTTPBadRequest,
+ attachments.create, req, FAKE_UUID1,
+ jsonutils.loads(req.body))
diff --git a/nova/tests/api/openstack/compute/contrib/test_cloudpipe.py b/nova/tests/api/openstack/compute/contrib/test_cloudpipe.py
index 133554abd..40b4e6d93 100644
--- a/nova/tests/api/openstack/compute/contrib/test_cloudpipe.py
+++ b/nova/tests/api/openstack/compute/contrib/test_cloudpipe.py
@@ -14,12 +14,12 @@
# under the License.
from lxml import etree
+from oslo.config import cfg
from nova.api.openstack.compute.contrib import cloudpipe
from nova.api.openstack import wsgi
from nova.compute import utils as compute_utils
from nova import db
-from nova.openstack.common import cfg
from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack import fakes
diff --git a/nova/tests/api/openstack/compute/contrib/test_evacuate.py b/nova/tests/api/openstack/compute/contrib/test_evacuate.py
index f76bf7bcf..9411b5663 100644
--- a/nova/tests/api/openstack/compute/contrib/test_evacuate.py
+++ b/nova/tests/api/openstack/compute/contrib/test_evacuate.py
@@ -14,12 +14,12 @@
import uuid
+from oslo.config import cfg
import webob
from nova.compute import api as compute_api
from nova.compute import vm_states
from nova import context
-from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
diff --git a/nova/tests/api/openstack/compute/contrib/test_floating_ip_bulk.py b/nova/tests/api/openstack/compute/contrib/test_floating_ip_bulk.py
index 408b17d48..a36bb32cf 100644
--- a/nova/tests/api/openstack/compute/contrib/test_floating_ip_bulk.py
+++ b/nova/tests/api/openstack/compute/contrib/test_floating_ip_bulk.py
@@ -14,11 +14,11 @@
# under the License.
import netaddr
+from oslo.config import cfg
import webob
from nova.api.openstack.compute.contrib import floating_ips_bulk
from nova import context
-from nova.openstack.common import cfg
from nova import test
from nova.tests.api.openstack import fakes
diff --git a/nova/tests/api/openstack/compute/contrib/test_fping.py b/nova/tests/api/openstack/compute/contrib/test_fping.py
index 9a838162a..1a1e3214e 100644
--- a/nova/tests/api/openstack/compute/contrib/test_fping.py
+++ b/nova/tests/api/openstack/compute/contrib/test_fping.py
@@ -40,8 +40,6 @@ class FpingTest(test.TestCase):
return_servers)
self.stubs.Set(nova.db, "instance_get_by_uuid",
return_server)
- self.stubs.Set(nova.db, "instance_get_all_by_project",
- return_servers)
self.stubs.Set(nova.utils, "execute",
execute)
self.stubs.Set(fping.FpingController, "check_fping",
diff --git a/nova/tests/api/openstack/compute/contrib/test_image_size.py b/nova/tests/api/openstack/compute/contrib/test_image_size.py
new file mode 100644
index 000000000..886bccfa7
--- /dev/null
+++ b/nova/tests/api/openstack/compute/contrib/test_image_size.py
@@ -0,0 +1,130 @@
+# Copyright 2013 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+import webob
+
+from nova.api.openstack.compute.contrib import image_size
+from nova.image import glance
+from nova.openstack.common import jsonutils
+from nova import test
+from nova.tests.api.openstack import fakes
+
+NOW_API_FORMAT = "2010-10-11T10:30:22Z"
+IMAGES = [{
+ 'id': '123',
+ 'name': 'public image',
+ 'metadata': {'key1': 'value1'},
+ 'updated': NOW_API_FORMAT,
+ 'created': NOW_API_FORMAT,
+ 'status': 'ACTIVE',
+ 'progress': 100,
+ 'minDisk': 10,
+ 'minRam': 128,
+ 'size': 12345678,
+ "links": [{
+ "rel": "self",
+ "href": "http://localhost/v2/fake/images/123",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/images/123",
+ }],
+ },
+ {
+ 'id': '124',
+ 'name': 'queued snapshot',
+ 'updated': NOW_API_FORMAT,
+ 'created': NOW_API_FORMAT,
+ 'status': 'SAVING',
+ 'progress': 25,
+ 'minDisk': 0,
+ 'minRam': 0,
+ 'size': 87654321,
+ "links": [{
+ "rel": "self",
+ "href": "http://localhost/v2/fake/images/124",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/images/124",
+ }],
+ }]
+
+
+def fake_show(*args, **kwargs):
+ return IMAGES[0]
+
+
+def fake_detail(*args, **kwargs):
+ return IMAGES
+
+
+class ImageSizeTest(test.TestCase):
+ content_type = 'application/json'
+ prefix = 'OS-EXT-IMG-SIZE'
+
+ def setUp(self):
+ super(ImageSizeTest, self).setUp()
+ self.stubs.Set(glance.GlanceImageService, 'show', fake_show)
+ self.stubs.Set(glance.GlanceImageService, 'detail', fake_detail)
+ self.flags(osapi_compute_extension=['nova.api.openstack.compute'
+ '.contrib.image_size.Image_size'])
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app())
+ return res
+
+ def _get_image(self, body):
+ return jsonutils.loads(body).get('image')
+
+ def _get_images(self, body):
+ return jsonutils.loads(body).get('images')
+
+ def assertImageSize(self, image, size):
+ self.assertEqual(image.get('%s:size' % self.prefix), size)
+
+ def test_show(self):
+ url = '/v2/fake/images/1'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ image = self._get_image(res.body)
+ self.assertImageSize(image, 12345678)
+
+ def test_detail(self):
+ url = '/v2/fake/images/detail'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ images = self._get_images(res.body)
+ self.assertImageSize(images[0], 12345678)
+ self.assertImageSize(images[1], 87654321)
+
+
+class ImageSizeXmlTest(ImageSizeTest):
+ content_type = 'application/xml'
+ prefix = '{%s}' % image_size.Image_size.namespace
+
+ def _get_image(self, body):
+ return etree.XML(body)
+
+ def _get_images(self, body):
+ return etree.XML(body).getchildren()
+
+ def assertImageSize(self, image, size):
+ self.assertEqual(int(image.get('%ssize' % self.prefix)), size)
diff --git a/nova/tests/api/openstack/compute/contrib/test_instance_actions.py b/nova/tests/api/openstack/compute/contrib/test_instance_actions.py
index b4db5daba..8650275a7 100644
--- a/nova/tests/api/openstack/compute/contrib/test_instance_actions.py
+++ b/nova/tests/api/openstack/compute/contrib/test_instance_actions.py
@@ -21,6 +21,7 @@ from webob import exc
from nova.api.openstack.compute.contrib import instance_actions
from nova import db
+from nova.db.sqlalchemy import models
from nova import exception
from nova.openstack.common import policy
from nova import test
@@ -98,7 +99,12 @@ class InstanceActionsTest(test.TestCase):
def test_list_actions(self):
def fake_get_actions(context, uuid):
- return self.fake_actions[uuid].values()
+ actions = []
+ for act in self.fake_actions[uuid].itervalues():
+ action = models.InstanceAction()
+ action.update(act)
+ actions.append(action)
+ return actions
self.stubs.Set(db, 'actions_get', fake_get_actions)
req = fakes.HTTPRequest.blank('/v2/123/servers/12/os-instance-actions')
@@ -110,10 +116,17 @@ class InstanceActionsTest(test.TestCase):
def test_get_action_with_events_allowed(self):
def fake_get_action(context, uuid, request_id):
- return self.fake_actions[uuid][request_id]
+ action = models.InstanceAction()
+ action.update(self.fake_actions[uuid][request_id])
+ return action
def fake_get_events(context, action_id):
- return self.fake_events[action_id]
+ events = []
+ for evt in self.fake_events[action_id]:
+ event = models.InstanceActionEvent()
+ event.update(evt)
+ events.append(event)
+ return events
self.stubs.Set(db, 'action_get_by_request_id', fake_get_action)
self.stubs.Set(db, 'action_events_get', fake_get_events)
diff --git a/nova/tests/api/openstack/compute/contrib/test_networks.py b/nova/tests/api/openstack/compute/contrib/test_networks.py
index 44d9e8af3..fef60f20b 100644
--- a/nova/tests/api/openstack/compute/contrib/test_networks.py
+++ b/nova/tests/api/openstack/compute/contrib/test_networks.py
@@ -19,12 +19,12 @@ import math
import netaddr
import uuid
+from oslo.config import cfg
import webob
from nova.api.openstack.compute.contrib import networks_associate
from nova.api.openstack.compute.contrib import os_networks as networks
from nova import exception
-from nova.openstack.common import cfg
from nova import test
from nova.tests.api.openstack import fakes
diff --git a/nova/tests/api/openstack/compute/contrib/test_quantum_security_groups.py b/nova/tests/api/openstack/compute/contrib/test_quantum_security_groups.py
new file mode 100644
index 000000000..e32fadbb8
--- /dev/null
+++ b/nova/tests/api/openstack/compute/contrib/test_quantum_security_groups.py
@@ -0,0 +1,639 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 Nicira, Inc.
+# All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# @author: Aaron Rosen, Nicira Networks, Inc.
+
+import uuid
+
+from lxml import etree
+from oslo.config import cfg
+import webob
+
+from nova.api.openstack.compute.contrib import security_groups
+from nova.api.openstack import xmlutil
+from nova import compute
+from nova import context
+import nova.db
+from nova import exception
+from nova.network import quantumv2
+from nova.network.quantumv2 import api as quantum_api
+from nova.openstack.common import jsonutils
+from nova import test
+from nova.tests.api.openstack.compute.contrib import test_security_groups
+from nova.tests.api.openstack import fakes
+from quantumclient.common import exceptions as q_exc
+
+
+class TestQuantumSecurityGroupsTestCase(test.TestCase):
+ def setUp(self):
+ super(TestQuantumSecurityGroupsTestCase, self).setUp()
+ cfg.CONF.set_override('security_group_api', 'quantum')
+ self.original_client = quantumv2.get_client
+ quantumv2.get_client = get_client
+
+ def tearDown(self):
+ quantumv2.get_client = self.original_client
+ get_client()._reset()
+ super(TestQuantumSecurityGroupsTestCase, self).tearDown()
+
+
+class TestQuantumSecurityGroups(
+ test_security_groups.TestSecurityGroups,
+ TestQuantumSecurityGroupsTestCase):
+
+ def _create_sg_template(self, **kwargs):
+ sg = test_security_groups.security_group_template(**kwargs)
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ return self.controller.create(req, {'security_group': sg})
+
+ def _create_network(self):
+ body = {'network': {'name': 'net1'}}
+ quantum = get_client()
+ net = quantum.create_network(body)
+ body = {'subnet': {'network_id': net['network']['id'],
+ 'cidr': '10.0.0.0/24'}}
+ quantum.create_subnet(body)
+ return net
+
+ def _create_port(self, **kwargs):
+ body = {'port': {}}
+ fields = ['security_groups', 'device_id', 'network_id',
+ 'port_security_enabled']
+ for field in fields:
+ if field in kwargs:
+ body['port'][field] = kwargs[field]
+ quantum = get_client()
+ return quantum.create_port(body)
+
+ def test_create_security_group_with_no_description(self):
+ # Quantum's security group descirption field is optional.
+ pass
+
+ def test_create_security_group_with_blank_name(self):
+ # Quantum's security group name field is optional.
+ pass
+
+ def test_create_security_group_with_whitespace_name(self):
+ # Quantum allows security group name to be whitespace.
+ pass
+
+ def test_create_security_group_with_blank_description(self):
+ # Quantum's security group descirption field is optional.
+ pass
+
+ def test_create_security_group_with_whitespace_description(self):
+ # Quantum allows description to be whitespace.
+ pass
+
+ def test_create_security_group_with_duplicate_name(self):
+ # Quantum allows duplicate names for security groups.
+ pass
+
+ def test_create_security_group_non_string_name(self):
+ # Quantum allows security group name to be non string.
+ pass
+
+ def test_create_security_group_non_string_description(self):
+ # Quantum allows non string description.
+ pass
+
+ def test_create_security_group_quota_limit(self):
+ # Enforced by Quantum server.
+ pass
+
+ def test_get_security_group_list(self):
+ self._create_sg_template().get('security_group')
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ list_dict = self.controller.index(req)
+ self.assertEquals(len(list_dict['security_groups']), 2)
+
+ def test_get_security_group_list_all_tenants(self):
+ pass
+
+ def test_get_security_group_by_instance(self):
+ pass
+
+ def test_get_security_group_by_instance_non_existing(self):
+ pass
+
+ def test_get_security_group_by_id(self):
+ sg = self._create_sg_template().get('security_group')
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s'
+ % sg['id'])
+ res_dict = self.controller.show(req, sg['id'])
+ expected = {'security_group': sg}
+ self.assertEquals(res_dict, expected)
+
+ def test_delete_security_group_by_id(self):
+ sg = self._create_sg_template().get('security_group')
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s' %
+ sg['id'])
+ self.controller.delete(req, sg['id'])
+
+ def test_delete_security_group_in_use(self):
+ sg = self._create_sg_template().get('security_group')
+ self._create_network()
+ fake_instance = {'project_id': 'fake_tenant',
+ 'availability_zone': 'zone_one',
+ 'security_groups': [],
+ 'uuid': str(uuid.uuid4()),
+ 'display_name': 'test_instance'}
+ quantum = quantum_api.API()
+ quantum.allocate_for_instance(context.get_admin_context(),
+ fake_instance,
+ security_groups=[sg['id']])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s'
+ % sg['id'])
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
+ req, sg['id'])
+
+ def test_associate_non_running_instance(self):
+ # Quantum does not care if the instance is running or not. When the
+ # instances is detected by quantum it will push down the security
+ # group policy to it.
+ pass
+
+ def test_associate_already_associated_security_group_to_instance(self):
+ # Quantum security groups does not raise an error if you update a
+ # port adding a security group to it that was already associated
+ # to the port. This is because PUT semantics are used.
+ pass
+
+ def test_associate(self):
+ sg = self._create_sg_template().get('security_group')
+ net = self._create_network()
+ self._create_port(
+ network_id=net['network']['id'], security_groups=[sg['id']],
+ device_id=test_security_groups.FAKE_UUID)
+
+ self.stubs.Set(nova.db, 'instance_get',
+ test_security_groups.return_server)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ test_security_groups.return_server_by_uuid)
+ body = dict(addSecurityGroup=dict(name="test"))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.manager._addSecurityGroup(req, '1', body)
+
+ def test_disassociate_by_non_existing_security_group_name(self):
+ self.stubs.Set(nova.db, 'instance_get',
+ test_security_groups.return_server)
+ body = dict(removeSecurityGroup=dict(name='non-existing'))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.manager._removeSecurityGroup, req, '1', body)
+
+ def test_disassociate_non_running_instance(self):
+ # Quantum does not care if the instance is running or not. When the
+ # instances is detected by quantum it will push down the security
+ # group policy to it.
+ pass
+
+ def test_disassociate_already_associated_security_group_to_instance(self):
+ # Quantum security groups does not raise an error if you update a
+ # port adding a security group to it that was already associated
+ # to the port. This is because PUT semantics are used.
+ pass
+
+ def test_disassociate(self):
+ sg = self._create_sg_template().get('security_group')
+ net = self._create_network()
+ self._create_port(
+ network_id=net['network']['id'], security_groups=[sg['id']],
+ device_id=test_security_groups.FAKE_UUID)
+
+ self.stubs.Set(nova.db, 'instance_get',
+ test_security_groups.return_server)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ test_security_groups.return_server_by_uuid)
+ body = dict(removeSecurityGroup=dict(name="test"))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.manager._removeSecurityGroup(req, '1', body)
+
+
+class TestQuantumSecurityGroupRulesTestCase(TestQuantumSecurityGroupsTestCase):
+ def setUp(self):
+ super(TestQuantumSecurityGroupRulesTestCase, self).setUp()
+ id1 = '11111111-1111-1111-1111-111111111111'
+ sg_template1 = test_security_groups.security_group_template(
+ security_group_rules=[], id=id1)
+ id2 = '22222222-2222-2222-2222-222222222222'
+ sg_template2 = test_security_groups.security_group_template(
+ security_group_rules=[], id=id2)
+ quantum = get_client()
+ quantum._fake_security_groups[id1] = sg_template1
+ quantum._fake_security_groups[id2] = sg_template2
+
+ def tearDown(self):
+ quantumv2.get_client = self.original_client
+ get_client()._reset()
+ super(TestQuantumSecurityGroupsTestCase, self).tearDown()
+
+
+class TestQuantumSecurityGroupRules(
+ test_security_groups.TestSecurityGroupRules,
+ TestQuantumSecurityGroupRulesTestCase):
+
+ def test_create_add_existing_rules_by_cidr(self):
+ # Enforced by quantum
+ pass
+
+ def test_create_add_existing_rules_by_group_id(self):
+ # Enforced by quantum
+ pass
+
+ def test_delete(self):
+ rule = test_security_groups.security_group_rule_template(
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule})
+ security_group_rule = res_dict['security_group_rule']
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules/%s'
+ % security_group_rule['id'])
+ self.controller.delete(req, security_group_rule['id'])
+
+ def test_create_rule_quota_limit(self):
+ # Enforced by quantum
+ pass
+
+
+class TestQuantumSecurityGroupsXMLDeserializer(
+ test_security_groups.TestSecurityGroupXMLDeserializer,
+ TestQuantumSecurityGroupsTestCase):
+ pass
+
+
+class TestQuantumSecurityGroupsXMLSerializer(
+ test_security_groups.TestSecurityGroupXMLSerializer,
+ TestQuantumSecurityGroupsTestCase):
+ pass
+
+
+class TestQuantumSecurityGroupsOutputTest(TestQuantumSecurityGroupsTestCase):
+ content_type = 'application/json'
+
+ def setUp(self):
+ super(TestQuantumSecurityGroupsOutputTest, self).setUp()
+ fakes.stub_out_nw_api(self.stubs)
+ self.controller = security_groups.SecurityGroupController()
+ self.stubs.Set(compute.api.API, 'get',
+ test_security_groups.fake_compute_get)
+ self.stubs.Set(compute.api.API, 'create',
+ test_security_groups.fake_compute_create)
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Security_groups'])
+
+ def _make_request(self, url, body=None):
+ req = webob.Request.blank(url)
+ if body:
+ req.method = 'POST'
+ req.body = self._encode_body(body)
+ req.content_type = self.content_type
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
+ return res
+
+ def _encode_body(self, body):
+ return jsonutils.dumps(body)
+
+ def _get_server(self, body):
+ return jsonutils.loads(body).get('server')
+
+ def _get_servers(self, body):
+ return jsonutils.loads(body).get('servers')
+
+ def _get_groups(self, server):
+ return server.get('security_groups')
+
+ def test_create(self):
+ url = '/v2/fake/servers'
+ image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ security_groups = [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]
+ for security_group in security_groups:
+ sg = test_security_groups.security_group_template(
+ name=security_group['name'])
+ self.controller.create(req, {'security_group': sg})
+
+ server = dict(name='server_test', imageRef=image_uuid, flavorRef=2,
+ security_groups=security_groups)
+ res = self._make_request(url, {'server': server})
+ self.assertEqual(res.status_int, 202)
+ server = self._get_server(res.body)
+ for i, group in enumerate(self._get_groups(server)):
+ name = 'fake-2-%s' % i
+ self.assertEqual(group.get('name'), name)
+
+ def test_show(self):
+ url = '/v2/fake/servers'
+ image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ security_groups = [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]
+ for security_group in security_groups:
+ sg = test_security_groups.security_group_template(
+ name=security_group['name'])
+ self.controller.create(req, {'security_group': sg})
+ server = dict(name='server_test', imageRef=image_uuid, flavorRef=2,
+ security_groups=security_groups)
+
+ res = self._make_request(url, {'server': server})
+ self.assertEqual(res.status_int, 202)
+ server = self._get_server(res.body)
+ for i, group in enumerate(self._get_groups(server)):
+ name = 'fake-2-%s' % i
+ self.assertEqual(group.get('name'), name)
+
+ def test_detail(self):
+ url = '/v2/fake/servers/detail'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ for i, server in enumerate(self._get_servers(res.body)):
+ for j, group in enumerate(self._get_groups(server)):
+ name = 'fake-%s-%s' % (i, j)
+ self.assertEqual(group.get('name'), name)
+
+ def test_no_instance_passthrough_404(self):
+
+ def fake_compute_get(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='fake')
+
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ url = '/v2/fake/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 404)
+
+
+class TestQuantumSecurityGroupsOutputXMLTest(
+ TestQuantumSecurityGroupsOutputTest):
+
+ content_type = 'application/xml'
+
+ class MinimalCreateServerTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('server', selector='server')
+ root.set('name')
+ root.set('id')
+ root.set('imageRef')
+ root.set('flavorRef')
+ elem = xmlutil.SubTemplateElement(root, 'security_groups')
+ sg = xmlutil.SubTemplateElement(elem, 'security_group',
+ selector='security_groups')
+ sg.set('name')
+ return xmlutil.MasterTemplate(root, 1,
+ nsmap={None: xmlutil.XMLNS_V11})
+
+ def _encode_body(self, body):
+ serializer = self.MinimalCreateServerTemplate()
+ return serializer.serialize(body)
+
+ def _get_server(self, body):
+ return etree.XML(body)
+
+ def _get_servers(self, body):
+ return etree.XML(body).getchildren()
+
+ def _get_groups(self, server):
+ # NOTE(vish): we are adding security groups without an extension
+ # namespace so we don't break people using the existing
+ # functionality, but that means we need to use find with
+ # the existing server namespace.
+ namespace = server.nsmap[None]
+ return server.find('{%s}security_groups' % namespace).getchildren()
+
+
+def get_client(context=None, admin=False):
+ return MockClient()
+
+
+class MockClient(object):
+
+ # Needs to be global to survive multiple calls to get_client.
+ _fake_security_groups = {}
+ _fake_ports = {}
+ _fake_networks = {}
+ _fake_subnets = {}
+ _fake_security_group_rules = {}
+
+ def __init__(self):
+ # add default security group
+ if not len(self._fake_security_groups):
+ ret = {'name': 'default', 'description': 'default',
+ 'tenant_id': 'fake_tenant', 'security_group_rules': [],
+ 'id': str(uuid.uuid4())}
+ self._fake_security_groups[ret['id']] = ret
+
+ def _reset(self):
+ self._fake_security_groups.clear()
+ self._fake_ports.clear()
+ self._fake_networks.clear()
+ self._fake_subnets.clear()
+ self._fake_security_group_rules.clear()
+
+ def create_security_group(self, body=None):
+ s = body.get('security_group')
+ if len(s.get('name')) > 255 or len(s.get('description')) > 255:
+ msg = 'Security Group name great than 255'
+ raise q_exc.QuantumClientException(message=msg, status_code=401)
+ ret = {'name': s.get('name'), 'description': s.get('description'),
+ 'tenant_id': 'fake_tenant', 'security_group_rules': [],
+ 'id': str(uuid.uuid4())}
+
+ self._fake_security_groups[ret['id']] = ret
+ return {'security_group': ret}
+
+ def create_network(self, body):
+ n = body.get('network')
+ ret = {'status': 'ACTIVE', 'subnets': [], 'name': n.get('name'),
+ 'admin_state_up': n.get('admin_state_up', True),
+ 'tenant_id': 'fake_tenant',
+ 'port_security_enabled': n.get('port_security_enabled', True),
+ 'id': str(uuid.uuid4())}
+ self._fake_networks[ret['id']] = ret
+ return {'network': ret}
+
+ def create_subnet(self, body):
+ s = body.get('subnet')
+ try:
+ net = self._fake_networks[s.get('network_id')]
+ except KeyError:
+ msg = 'Network %s not found' % s.get('network_id')
+ raise q_exc.QuantumClientException(message=msg, status_code=404)
+ ret = {'name': s.get('name'), 'network_id': s.get('network_id'),
+ 'tenant_id': 'fake_tenant', 'cidr': s.get('cidr'),
+ 'id': str(uuid.uuid4()), 'gateway_ip': '10.0.0.1'}
+ net['subnets'].append(ret['id'])
+ self._fake_networks[net['id']] = net
+ self._fake_subnets[ret['id']] = ret
+ return {'subnet': ret}
+
+ def create_port(self, body):
+ p = body.get('port')
+ ret = {'status': 'ACTIVE', 'id': str(uuid.uuid4()),
+ 'mac_address': p.get('mac_address', 'fa:16:3e:b8:f5:fb'),
+ 'port_security_enabled': p.get('port_security_enabled'),
+ 'device_owner': str(uuid.uuid4())}
+
+ fields = ['network_id', 'security_groups', 'admin_state_up']
+ for field in fields:
+ ret[field] = p.get(field)
+
+ network = self._fake_networks[p['network_id']]
+ if not ret['port_security_enabled']:
+ ret['port_security_enabled'] = network['port_security_enabled']
+ if network['subnets']:
+ ret['fixed_ips'] = [{'subnet_id': network['subnets'][0],
+ 'ip_address': '10.0.0.1'}]
+ if not ret['security_groups']:
+ for security_group in self._fake_security_groups.values():
+ if security_group['name'] == 'default':
+ ret['security_groups'] = [security_group['id']]
+ break
+ self._fake_ports[ret['id']] = ret
+ return {'port': ret}
+
+ def create_security_group_rule(self, body):
+ # does not handle bulk case so just picks rule[0]
+ r = body.get('security_group_rules')[0]
+ fields = ['direction', 'protocol', 'port_range_min', 'port_range_max',
+ 'ethertype', 'source_ip_prefix', 'tenant_id',
+ 'security_group_id', 'source_group_id']
+ ret = {}
+ for field in fields:
+ ret[field] = r.get(field)
+ ret['id'] = str(uuid.uuid4())
+ self._fake_security_group_rules[ret['id']] = ret
+ return {'security_group_rules': [ret]}
+
+ def show_security_group(self, security_group, **_params):
+ try:
+ return {'security_group':
+ self._fake_security_groups[security_group]}
+ except KeyError:
+ msg = 'Security Group %s not found' % security_group
+ raise q_exc.QuantumClientException(message=msg, status_code=404)
+
+ def show_security_group_rule(self, security_group_rule, **_params):
+ try:
+ return {'security_group_rule':
+ self._fake_security_group_rules[security_group_rule]}
+ except KeyError:
+ msg = 'Security Group rule %s not found' % security_group_rule
+ raise q_exc.QuantumClientException(message=msg, status_code=404)
+
+ def show_network(self, network, **_params):
+ try:
+ return {'network':
+ self._fake_networks[network]}
+ except KeyError:
+ msg = 'Network %s not found' % network
+ raise q_exc.QuantumClientException(message=msg, status_code=404)
+
+ def show_port(self, port, **_params):
+ try:
+ return {'port':
+ self._fake_ports[port]}
+ except KeyError:
+ msg = 'Port %s not found' % port
+ raise q_exc.QuantumClientException(message=msg, status_code=404)
+
+ def show_subnet(self, subnet, **_params):
+ try:
+ return {'subnet':
+ self._fake_subnets[subnet]}
+ except KeyError:
+ msg = 'Port %s not found' % subnet
+ raise q_exc.QuantumClientException(message=msg, status_code=404)
+
+ def list_security_groups(self, **_params):
+ ret = []
+ for security_group in self._fake_security_groups.values():
+ if _params.get('name'):
+ if security_group.get('name') == _params['name']:
+ ret.append(security_group)
+ else:
+ ret.append(security_group)
+ return {'security_groups': ret}
+
+ def list_networks(self, **_params):
+ return {'networks':
+ [network for network in self._fake_networks.values()]}
+
+ def list_ports(self, **_params):
+ return {'ports':
+ [port for port in self._fake_ports.values()]}
+
+ def list_subnets(self, **_params):
+ return {'subnets':
+ [subnet for subnet in self._fake_subnets.values()]}
+
+ def delete_security_group(self, security_group):
+ self.show_security_group(security_group)
+ ports = self.list_ports()
+ for port in ports.get('ports'):
+ for sg_port in port['security_groups']:
+ if sg_port == security_group:
+ msg = ('Unable to delete Security group %s in use'
+ % security_group)
+ raise q_exc.QuantumClientException(message=msg,
+ status_code=409)
+ del self._fake_security_groups[security_group]
+
+ def delete_security_group_rule(self, security_group_rule):
+ self.show_security_group_rule(security_group_rule)
+ del self._fake_security_group_rules[security_group_rule]
+
+ def delete_network(self, network):
+ self.show_network(network)
+ self._check_ports_on_network(network)
+ for subnet in self._fake_subnets.values():
+ if subnet['network_id'] == network:
+ del self._fake_subnets[subnet['id']]
+ del self._fake_networks[network]
+
+ def delete_subnet(self, subnet):
+ subnet = self.show_subnet(subnet).get('subnet')
+ self._check_ports_on_network(subnet['network_id'])
+ del self._fake_subnet[subnet]
+
+ def delete_port(self, port):
+ self.show_port(port)
+ del self._fake_ports[port]
+
+ def update_port(self, port, body=None):
+ self.show_port(port)
+ self._fake_ports[port].update(body['port'])
+ return {'port': self._fake_ports[port]}
+
+ def list_extensions(self, **_parms):
+ return {'extensions': []}
+
+ def _check_ports_on_network(self, network):
+ ports = self.list_ports()
+ for port in ports:
+ if port['network_id'] == network:
+ msg = ('Unable to complete operation on network %s. There is '
+ 'one or more ports still in use on the network'
+ % network)
+ raise q_exc.QuantumClientException(message=msg, status_code=409)
diff --git a/nova/tests/api/openstack/compute/contrib/test_rescue.py b/nova/tests/api/openstack/compute/contrib/test_rescue.py
index 2719c1339..359eafb2a 100644
--- a/nova/tests/api/openstack/compute/contrib/test_rescue.py
+++ b/nova/tests/api/openstack/compute/contrib/test_rescue.py
@@ -12,11 +12,11 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo.config import cfg
import webob
from nova import compute
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
diff --git a/nova/tests/api/openstack/compute/contrib/test_security_group_default_rules.py b/nova/tests/api/openstack/compute/contrib/test_security_group_default_rules.py
new file mode 100644
index 000000000..2b4145764
--- /dev/null
+++ b/nova/tests/api/openstack/compute/contrib/test_security_group_default_rules.py
@@ -0,0 +1,467 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 Metacloud, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.config import cfg
+import webob
+
+from nova.api.openstack.compute.contrib import security_group_default_rules
+from nova.api.openstack import wsgi
+from nova import context
+import nova.db
+from nova import test
+from nova.tests.api.openstack import fakes
+
+
+CONF = cfg.CONF
+
+
+class AttrDict(dict):
+ def __getattr__(self, k):
+ return self[k]
+
+
+def security_group_default_rule_template(**kwargs):
+ rule = kwargs.copy()
+ rule.setdefault('ip_protocol', 'TCP')
+ rule.setdefault('from_port', 22)
+ rule.setdefault('to_port', 22)
+ rule.setdefault('cidr', '10.10.10.0/24')
+ return rule
+
+
+def security_group_default_rule_db(security_group_default_rule, id=None):
+ attrs = security_group_default_rule.copy()
+ if id is not None:
+ attrs['id'] = id
+ return AttrDict(attrs)
+
+
+class TestSecurityGroupDefaultRules(test.TestCase):
+ def setUp(self):
+ super(TestSecurityGroupDefaultRules, self).setUp()
+ self.controller = \
+ security_group_default_rules.SecurityGroupDefaultRulesController()
+
+ def test_create_security_group_default_rule(self):
+ sgr = security_group_default_rule_template()
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ sgr_dict = dict(security_group_default_rule=sgr)
+ res_dict = self.controller.create(req, sgr_dict)
+ security_group_default_rule = res_dict['security_group_default_rule']
+ self.assertEqual(security_group_default_rule['ip_protocol'],
+ sgr['ip_protocol'])
+ self.assertEqual(security_group_default_rule['from_port'],
+ sgr['from_port'])
+ self.assertEqual(security_group_default_rule['to_port'],
+ sgr['to_port'])
+ self.assertEqual(security_group_default_rule['ip_range']['cidr'],
+ sgr['cidr'])
+
+ def test_create_security_group_default_rule_with_no_to_port(self):
+ sgr = security_group_default_rule_template()
+ del sgr['to_port']
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_with_no_from_port(self):
+ sgr = security_group_default_rule_template()
+ del sgr['from_port']
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_with_no_ip_protocol(self):
+ sgr = security_group_default_rule_template()
+ del sgr['ip_protocol']
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_with_no_cidr(self):
+ sgr = security_group_default_rule_template()
+ del sgr['cidr']
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ res_dict = self.controller.create(req,
+ {'security_group_default_rule': sgr})
+ security_group_default_rule = res_dict['security_group_default_rule']
+ self.assertNotEquals(security_group_default_rule['id'], 0)
+ self.assertEquals(security_group_default_rule['ip_range']['cidr'],
+ '0.0.0.0/0')
+
+ def test_create_security_group_default_rule_with_blank_to_port(self):
+ sgr = security_group_default_rule_template(to_port='')
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_with_blank_from_port(self):
+ sgr = security_group_default_rule_template(from_port='')
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_with_blank_ip_protocol(self):
+ sgr = security_group_default_rule_template(ip_protocol='')
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_with_blank_cidr(self):
+ sgr = security_group_default_rule_template(cidr='')
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ res_dict = self.controller.create(req,
+ {'security_group_default_rule': sgr})
+ security_group_default_rule = res_dict['security_group_default_rule']
+ self.assertNotEquals(security_group_default_rule['id'], 0)
+ self.assertEquals(security_group_default_rule['ip_range']['cidr'],
+ '0.0.0.0/0')
+
+ def test_create_security_group_default_rule_non_numerical_to_port(self):
+ sgr = security_group_default_rule_template(to_port='invalid')
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_non_numerical_from_port(self):
+ sgr = security_group_default_rule_template(from_port='invalid')
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_invalid_ip_protocol(self):
+ sgr = security_group_default_rule_template(ip_protocol='invalid')
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_invalid_cidr(self):
+ sgr = security_group_default_rule_template(cidr='10.10.2222.0/24')
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_invalid_to_port(self):
+ sgr = security_group_default_rule_template(to_port='666666')
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_invalid_from_port(self):
+ sgr = security_group_default_rule_template(from_port='666666')
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_with_no_body(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPUnprocessableEntity,
+ self.controller.create, req, None)
+
+ def test_create_duplicate_security_group_default_rule(self):
+ sgr = security_group_default_rule_template()
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.controller.create(req, {'security_group_default_rule': sgr})
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_security_group_default_rules_list(self):
+ self.test_create_security_group_default_rule()
+ rules = [dict(id=1,
+ ip_protocol='TCP',
+ from_port=22,
+ to_port=22,
+ ip_range=dict(cidr='10.10.10.0/24'))]
+ expected = {'security_group_default_rules': rules}
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ res_dict = self.controller.index(req)
+ self.assertEqual(res_dict, expected)
+
+ def test_default_security_group_default_rule_show(self):
+ sgr = security_group_default_rule_template(id=1)
+
+ self.test_create_security_group_default_rule()
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ res_dict = self.controller.show(req, '1')
+
+ security_group_default_rule = res_dict['security_group_default_rule']
+
+ self.assertEqual(security_group_default_rule['ip_protocol'],
+ sgr['ip_protocol'])
+ self.assertEqual(security_group_default_rule['to_port'],
+ sgr['to_port'])
+ self.assertEqual(security_group_default_rule['from_port'],
+ sgr['from_port'])
+ self.assertEqual(security_group_default_rule['ip_range']['cidr'],
+ sgr['cidr'])
+
+ def test_delete_security_group_default_rule(self):
+ sgr = security_group_default_rule_template(id=1)
+
+ self.test_create_security_group_default_rule()
+
+ self.called = False
+
+ def security_group_default_rule_destroy(context, id):
+ self.called = True
+
+ def return_security_group_default_rule(context, id):
+ self.assertEquals(sgr['id'], id)
+ return security_group_default_rule_db(sgr)
+
+ self.stubs.Set(nova.db, 'security_group_default_rule_destroy',
+ security_group_default_rule_destroy)
+ self.stubs.Set(nova.db, 'security_group_default_rule_get',
+ return_security_group_default_rule)
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.controller.delete(req, '1')
+
+ self.assertTrue(self.called)
+
+ def test_security_group_ensure_default(self):
+ sgr = security_group_default_rule_template(id=1)
+ self.test_create_security_group_default_rule()
+
+ ctxt = context.get_admin_context()
+
+ setattr(ctxt, 'project_id', 'new_project_id')
+
+ _, sg = nova.db.security_group_ensure_default(ctxt)
+ rules = nova.db.security_group_rule_get_by_security_group(ctxt, sg.id)
+ security_group_rule = rules[0]
+ self.assertEqual(sgr['id'], security_group_rule.id)
+ self.assertEqual(sgr['ip_protocol'], security_group_rule.protocol)
+ self.assertEqual(sgr['from_port'], security_group_rule.from_port)
+ self.assertEqual(sgr['to_port'], security_group_rule.to_port)
+ self.assertEqual(sgr['cidr'], security_group_rule.cidr)
+
+
+class TestSecurityGroupDefaultRulesXMLDeserializer(test.TestCase):
+ def setUp(self):
+ super(TestSecurityGroupDefaultRulesXMLDeserializer, self).setUp()
+ deserializer = security_group_default_rules.\
+ SecurityGroupDefaultRulesXMLDeserializer()
+ self.deserializer = deserializer
+
+ def test_create_request(self):
+ serial_request = """
+<security_group_default_rule>
+ <from_port>22</from_port>
+ <to_port>22</to_port>
+ <ip_protocol>TCP</ip_protocol>
+ <cidr>10.10.10.0/24</cidr>
+</security_group_default_rule>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "security_group_default_rule": {
+ "from_port": "22",
+ "to_port": "22",
+ "ip_protocol": "TCP",
+ "cidr": "10.10.10.0/24"
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_create_no_to_port_request(self):
+ serial_request = """
+<security_group_default_rule>
+ <from_port>22</from_port>
+ <ip_protocol>TCP</ip_protocol>
+ <cidr>10.10.10.0/24</cidr>
+</security_group_default_rule>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "security_group_default_rule": {
+ "from_port": "22",
+ "ip_protocol": "TCP",
+ "cidr": "10.10.10.0/24"
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_create_no_from_port_request(self):
+ serial_request = """
+<security_group_default_rule>
+ <to_port>22</to_port>
+ <ip_protocol>TCP</ip_protocol>
+ <cidr>10.10.10.0/24</cidr>
+</security_group_default_rule>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "security_group_default_rule": {
+ "to_port": "22",
+ "ip_protocol": "TCP",
+ "cidr": "10.10.10.0/24"
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_create_no_ip_protocol_request(self):
+ serial_request = """
+<security_group_default_rule>
+ <from_port>22</from_port>
+ <to_port>22</to_port>
+ <cidr>10.10.10.0/24</cidr>
+</security_group_default_rule>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "security_group_default_rule": {
+ "from_port": "22",
+ "to_port": "22",
+ "cidr": "10.10.10.0/24"
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_create_no_cidr_request(self):
+ serial_request = """
+<security_group_default_rule>
+ <from_port>22</from_port>
+ <to_port>22</to_port>
+ <ip_protocol>TCP</ip_protocol>
+</security_group_default_rule>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "security_group_default_rule": {
+ "from_port": "22",
+ "to_port": "22",
+ "ip_protocol": "TCP",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+
+class TestSecurityGroupDefaultRuleXMLSerializer(test.TestCase):
+ def setUp(self):
+ super(TestSecurityGroupDefaultRuleXMLSerializer, self).setUp()
+ self.namespace = wsgi.XMLNS_V11
+ self.rule_serializer =\
+ security_group_default_rules.SecurityGroupDefaultRuleTemplate()
+ self.index_serializer =\
+ security_group_default_rules.SecurityGroupDefaultRulesTemplate()
+
+ def _tag(self, elem):
+ tagname = elem.tag
+ self.assertEqual(tagname[0], '{')
+ tmp = tagname.partition('}')
+ namespace = tmp[0][1:]
+ self.assertEqual(namespace, self.namespace)
+ return tmp[2]
+
+ def _verify_security_group_default_rule(self, raw_rule, tree):
+ self.assertEqual(raw_rule['id'], tree.get('id'))
+
+ seen = set()
+ expected = set(['ip_protocol', 'from_port', 'to_port', 'ip_range',
+ 'ip_range/cidr'])
+
+ for child in tree:
+ child_tag = self._tag(child)
+ seen.add(child_tag)
+ if child_tag == 'ip_range':
+ for gr_child in child:
+ gr_child_tag = self._tag(gr_child)
+ self.assertTrue(gr_child_tag in raw_rule[child_tag])
+ seen.add('%s/%s' % (child_tag, gr_child_tag))
+ self.assertEqual(gr_child.text,
+ raw_rule[child_tag][gr_child_tag])
+ else:
+ self.assertEqual(child.text, raw_rule[child_tag])
+ self.assertEqual(seen, expected)
+
+ def test_rule_serializer(self):
+ raw_rule = dict(id='123',
+ ip_protocol='TCP',
+ from_port='22',
+ to_port='22',
+ ip_range=dict(cidr='10.10.10.0/24'))
+ rule = dict(security_group_default_rule=raw_rule)
+ text = self.rule_serializer.serialize(rule)
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('security_group_default_rule', self._tag(tree))
+ self._verify_security_group_default_rule(raw_rule, tree)
+
+ def test_index_serializer(self):
+ rules = [dict(id='123',
+ ip_protocol='TCP',
+ from_port='22',
+ to_port='22',
+ ip_range=dict(cidr='10.10.10.0/24')),
+ dict(id='234',
+ ip_protocol='UDP',
+ from_port='23456',
+ to_port='234567',
+ ip_range=dict(cidr='10.12.0.0/18')),
+ dict(id='345',
+ ip_protocol='tcp',
+ from_port='3456',
+ to_port='4567',
+ ip_range=dict(cidr='192.168.1.0/32'))]
+
+ rules_dict = dict(security_group_default_rules=rules)
+
+ text = self.index_serializer.serialize(rules_dict)
+
+ tree = etree.fromstring(text)
+ self.assertEqual('security_group_default_rules', self._tag(tree))
+ self.assertEqual(len(rules), len(tree))
+ for idx, child in enumerate(tree):
+ self._verify_security_group_default_rule(rules[idx], child)
diff --git a/nova/tests/api/openstack/compute/contrib/test_security_groups.py b/nova/tests/api/openstack/compute/contrib/test_security_groups.py
index 2823c3e8f..5058f17ac 100644
--- a/nova/tests/api/openstack/compute/contrib/test_security_groups.py
+++ b/nova/tests/api/openstack/compute/contrib/test_security_groups.py
@@ -17,6 +17,7 @@
from lxml import etree
import mox
+from oslo.config import cfg
import webob
from nova.api.openstack.compute.contrib import security_groups
@@ -25,7 +26,6 @@ from nova.api.openstack import xmlutil
from nova import compute
import nova.db
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova import quota
from nova import test
@@ -118,6 +118,14 @@ class TestSecurityGroups(test.TestCase):
security_groups.ServerSecurityGroupController())
self.manager = security_groups.SecurityGroupActionController()
+ # This needs to be done here to set fake_id because the derived
+ # class needs to be called first if it wants to set
+ # 'security_group_api' and this setUp method needs to be called.
+ if self.controller.security_group_api.id_is_uuid:
+ self.fake_id = '11111111-1111-1111-1111-111111111111'
+ else:
+ self.fake_id = '11111111'
+
def _assert_no_security_groups_reserved(self, context):
"""Check that no reservations are leaked during tests."""
result = quota.QUOTAS.get_project_quotas(context, context.project_id)
@@ -392,9 +400,10 @@ class TestSecurityGroups(test.TestCase):
req, 'invalid')
def test_get_security_group_by_non_existing_id(self):
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/111111111')
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s' %
+ self.fake_id)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
- req, '111111111')
+ req, self.fake_id)
def test_delete_security_group_by_id(self):
sg = security_group_template(id=1, rules=[])
@@ -424,9 +433,10 @@ class TestSecurityGroups(test.TestCase):
req, 'invalid')
def test_delete_security_group_by_non_existing_id(self):
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/11111111')
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s'
+ % self.fake_id)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
- req, '11111111')
+ req, self.fake_id)
def test_delete_security_group_in_use(self):
sg = security_group_template(id=1, rules=[])
@@ -639,12 +649,23 @@ class TestSecurityGroupRules(test.TestCase):
def setUp(self):
super(TestSecurityGroupRules, self).setUp()
- sg1 = security_group_template(id=1)
- sg2 = security_group_template(id=2,
- name='authorize_revoke',
- description='authorize-revoke testing')
- db1 = security_group_db(sg1)
- db2 = security_group_db(sg2)
+ self.controller = security_groups.SecurityGroupController()
+ if self.controller.security_group_api.id_is_uuid:
+ id1 = '11111111-1111-1111-1111-111111111111'
+ id2 = '22222222-2222-2222-2222-222222222222'
+ self.invalid_id = '33333333-3333-3333-3333-333333333333'
+ else:
+ id1 = 1
+ id2 = 2
+ self.invalid_id = '33333333'
+
+ self.sg1 = security_group_template(id=id1)
+ self.sg2 = security_group_template(
+ id=id2, name='authorize_revoke',
+ description='authorize-revoke testing')
+
+ db1 = security_group_db(self.sg1)
+ db2 = security_group_db(self.sg2)
def return_security_group(context, group_id):
if group_id == db1['id']:
@@ -661,41 +682,47 @@ class TestSecurityGroupRules(test.TestCase):
self.controller = security_groups.SecurityGroupRulesController()
def test_create_by_cidr(self):
- rule = security_group_rule_template(cidr='10.2.3.124/24')
+ rule = security_group_rule_template(cidr='10.2.3.124/24',
+ parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
-
security_group_rule = res_dict['security_group_rule']
self.assertNotEquals(security_group_rule['id'], 0)
- self.assertEquals(security_group_rule['parent_group_id'], 2)
+ self.assertEquals(security_group_rule['parent_group_id'],
+ self.sg2['id'])
self.assertEquals(security_group_rule['ip_range']['cidr'],
"10.2.3.124/24")
def test_create_by_group_id(self):
- rule = security_group_rule_template(group_id=1)
+ rule = security_group_rule_template(group_id=self.sg1['id'],
+ parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEquals(security_group_rule['id'], 0)
- self.assertEquals(security_group_rule['parent_group_id'], 2)
+ self.assertEquals(security_group_rule['parent_group_id'],
+ self.sg2['id'])
def test_create_by_same_group_id(self):
- rule1 = security_group_rule_template(group_id=1, from_port=80,
- to_port=80)
+ rule1 = security_group_rule_template(group_id=self.sg1['id'],
+ from_port=80, to_port=80,
+ parent_group_id=self.sg2['id'])
self.parent_security_group['rules'] = [security_group_rule_db(rule1)]
- rule2 = security_group_rule_template(group_id=1, from_port=81,
- to_port=81)
+ rule2 = security_group_rule_template(group_id=self.sg1['id'],
+ from_port=81, to_port=81,
+ parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule2})
security_group_rule = res_dict['security_group_rule']
self.assertNotEquals(security_group_rule['id'], 0)
- self.assertEquals(security_group_rule['parent_group_id'], 2)
+ self.assertEquals(security_group_rule['parent_group_id'],
+ self.sg2['id'])
self.assertEquals(security_group_rule['from_port'], 81)
self.assertEquals(security_group_rule['to_port'], 81)
@@ -705,13 +732,13 @@ class TestSecurityGroupRules(test.TestCase):
"ip_protocol": "tcp",
"from_port": "22",
"to_port": "22",
- "parent_group_id": 2,
+ "parent_group_id": self.sg2['id'],
"cidr": "10.2.3.124/2433"}}
rule = security_group_rule_template(
ip_protocol="tcp",
from_port=22,
to_port=22,
- parent_group_id=2,
+ parent_group_id=self.sg2['id'],
cidr="10.2.3.124/2433")
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
@@ -722,7 +749,7 @@ class TestSecurityGroupRules(test.TestCase):
ip_protocol="tcp",
from_port=75534,
to_port=22,
- parent_group_id=2,
+ parent_group_id=self.sg2['id'],
cidr="10.2.3.124/24")
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
@@ -734,14 +761,15 @@ class TestSecurityGroupRules(test.TestCase):
ip_protocol="icmp",
from_port=1,
to_port=256,
- parent_group_id=2,
+ parent_group_id=self.sg2['id'],
cidr="10.2.3.124/24")
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_add_existing_rules_by_cidr(self):
- rule = security_group_rule_template(cidr='10.0.0.0/24')
+ rule = security_group_rule_template(cidr='10.0.0.0/24',
+ parent_group_id=self.sg2['id'])
self.parent_security_group['rules'] = [security_group_rule_db(rule)]
@@ -778,7 +806,7 @@ class TestSecurityGroupRules(test.TestCase):
def test_create_with_non_existing_parent_group_id(self):
rule = security_group_rule_template(group_id='invalid',
- parent_group_id='1111111111111')
+ parent_group_id=self.invalid_id)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
@@ -786,14 +814,16 @@ class TestSecurityGroupRules(test.TestCase):
def test_create_with_invalid_protocol(self):
rule = security_group_rule_template(ip_protocol='invalid-protocol',
- cidr='10.2.2.0/24')
+ cidr='10.2.2.0/24',
+ parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_no_protocol(self):
- rule = security_group_rule_template(cidr='10.2.2.0/24')
+ rule = security_group_rule_template(cidr='10.2.2.0/24',
+ parent_group_id=self.sg2['id'])
del rule['ip_protocol']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
@@ -802,7 +832,8 @@ class TestSecurityGroupRules(test.TestCase):
def test_create_with_invalid_from_port(self):
rule = security_group_rule_template(from_port='666666',
- cidr='10.2.2.0/24')
+ cidr='10.2.2.0/24',
+ parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
@@ -810,7 +841,8 @@ class TestSecurityGroupRules(test.TestCase):
def test_create_with_invalid_to_port(self):
rule = security_group_rule_template(to_port='666666',
- cidr='10.2.2.0/24')
+ cidr='10.2.2.0/24',
+ parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
@@ -818,7 +850,8 @@ class TestSecurityGroupRules(test.TestCase):
def test_create_with_non_numerical_from_port(self):
rule = security_group_rule_template(from_port='invalid',
- cidr='10.2.2.0/24')
+ cidr='10.2.2.0/24',
+ parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
@@ -826,14 +859,16 @@ class TestSecurityGroupRules(test.TestCase):
def test_create_with_non_numerical_to_port(self):
rule = security_group_rule_template(to_port='invalid',
- cidr='10.2.2.0/24')
+ cidr='10.2.2.0/24',
+ parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_no_from_port(self):
- rule = security_group_rule_template(cidr='10.2.2.0/24')
+ rule = security_group_rule_template(cidr='10.2.2.0/24',
+ parent_group_id=self.sg2['id'])
del rule['from_port']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
@@ -841,7 +876,8 @@ class TestSecurityGroupRules(test.TestCase):
req, {'security_group_rule': rule})
def test_create_with_no_to_port(self):
- rule = security_group_rule_template(cidr='10.2.2.0/24')
+ rule = security_group_rule_template(cidr='10.2.2.0/24',
+ parent_group_id=self.sg2['id'])
del rule['to_port']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
@@ -849,14 +885,15 @@ class TestSecurityGroupRules(test.TestCase):
req, {'security_group_rule': rule})
def test_create_with_invalid_cidr(self):
- rule = security_group_rule_template(cidr='10.2.2222.0/24')
+ rule = security_group_rule_template(cidr='10.2.2222.0/24',
+ parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_no_cidr_group(self):
- rule = security_group_rule_template()
+ rule = security_group_rule_template(parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
@@ -869,54 +906,59 @@ class TestSecurityGroupRules(test.TestCase):
"0.0.0.0/0")
def test_create_with_invalid_group_id(self):
- rule = security_group_rule_template(group_id='invalid')
+ rule = security_group_rule_template(group_id='invalid',
+ parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_empty_group_id(self):
- rule = security_group_rule_template(group_id='')
+ rule = security_group_rule_template(group_id='',
+ parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_nonexist_group_id(self):
- rule = security_group_rule_template(group_id='222222')
+ rule = security_group_rule_template(group_id=self.invalid_id,
+ parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_with_same_group_parent_id_and_group_id(self):
- rule = security_group_rule_template(group_id=1, parent_group_id=1)
-
+ rule = security_group_rule_template(group_id=self.sg1['id'],
+ parent_group_id=self.sg1['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
self.assertNotEquals(security_group_rule['id'], 0)
- self.assertEquals(security_group_rule['parent_group_id'], 1)
- self.assertEquals(security_group_rule['id'], 1)
+ self.assertEquals(security_group_rule['parent_group_id'],
+ self.sg1['id'])
+ self.assertEquals(security_group_rule['group']['name'],
+ self.sg1['name'])
def _test_create_with_no_ports_and_no_group(self, proto):
- rule = {'ip_protocol': proto, 'parent_group_id': '2'}
+ rule = {'ip_protocol': proto, 'parent_group_id': self.sg2['id']}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def _test_create_with_no_ports(self, proto):
- rule = {'ip_protocol': proto, 'parent_group_id': '2', 'group_id': '1'}
+ rule = {'ip_protocol': proto, 'parent_group_id': self.sg2['id'],
+ 'group_id': self.sg1['id']}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
-
security_group_rule = res_dict['security_group_rule']
expected_rule = {
'from_port': 1, 'group': {'tenant_id': '123', 'name': 'test'},
- 'ip_protocol': proto, 'to_port': 65535, 'parent_group_id': 2,
- 'ip_range': {}, 'id': 1
+ 'ip_protocol': proto, 'to_port': 65535, 'parent_group_id':
+ self.sg2['id'], 'ip_range': {}, 'id': security_group_rule['id']
}
if proto == 'icmp':
expected_rule['to_port'] = -1
@@ -935,10 +977,10 @@ class TestSecurityGroupRules(test.TestCase):
self._test_create_with_no_ports_and_no_group('udp')
self._test_create_with_no_ports('udp')
- def _test_create_with_ports(self, id_val, proto, from_port, to_port):
+ def _test_create_with_ports(self, proto, from_port, to_port):
rule = {
'ip_protocol': proto, 'from_port': from_port, 'to_port': to_port,
- 'parent_group_id': '2', 'group_id': '1'
+ 'parent_group_id': self.sg2['id'], 'group_id': self.sg1['id']
}
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
@@ -947,32 +989,32 @@ class TestSecurityGroupRules(test.TestCase):
expected_rule = {
'from_port': from_port,
'group': {'tenant_id': '123', 'name': 'test'},
- 'ip_protocol': proto, 'to_port': to_port, 'parent_group_id': 2,
- 'ip_range': {}, 'id': id_val
+ 'ip_protocol': proto, 'to_port': to_port, 'parent_group_id':
+ self.sg2['id'], 'ip_range': {}, 'id': security_group_rule['id']
}
self.assertTrue(security_group_rule['ip_protocol'] == proto)
- self.assertTrue(security_group_rule['id'] == id_val)
self.assertTrue(security_group_rule['from_port'] == from_port)
self.assertTrue(security_group_rule['to_port'] == to_port)
self.assertTrue(security_group_rule == expected_rule)
def test_create_with_ports_icmp(self):
- self._test_create_with_ports(1, 'icmp', 0, 1)
- self._test_create_with_ports(2, 'icmp', 0, 0)
- self._test_create_with_ports(3, 'icmp', 1, 0)
+ self._test_create_with_ports('icmp', 0, 1)
+ self._test_create_with_ports('icmp', 0, 0)
+ self._test_create_with_ports('icmp', 1, 0)
def test_create_with_ports_tcp(self):
- self._test_create_with_ports(1, 'tcp', 1, 1)
- self._test_create_with_ports(2, 'tcp', 1, 65535)
- self._test_create_with_ports(3, 'tcp', 65535, 65535)
+ self._test_create_with_ports('tcp', 1, 1)
+ self._test_create_with_ports('tcp', 1, 65535)
+ self._test_create_with_ports('tcp', 65535, 65535)
def test_create_with_ports_udp(self):
- self._test_create_with_ports(1, 'udp', 1, 1)
- self._test_create_with_ports(2, 'udp', 1, 65535)
- self._test_create_with_ports(3, 'udp', 65535, 65535)
+ self._test_create_with_ports('udp', 1, 1)
+ self._test_create_with_ports('udp', 1, 65535)
+ self._test_create_with_ports('udp', 65535, 65535)
def test_delete(self):
- rule = security_group_rule_template(id=10)
+ rule = security_group_rule_template(id=self.sg2['id'],
+ parent_group_id=self.sg2['id'])
def security_group_rule_get(context, id):
return security_group_rule_db(rule)
@@ -985,8 +1027,9 @@ class TestSecurityGroupRules(test.TestCase):
self.stubs.Set(nova.db, 'security_group_rule_destroy',
security_group_rule_destroy)
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules/10')
- self.controller.delete(req, '10')
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules/%s'
+ % self.sg2['id'])
+ self.controller.delete(req, self.sg2['id'])
def test_delete_invalid_rule_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules' +
@@ -995,30 +1038,32 @@ class TestSecurityGroupRules(test.TestCase):
req, 'invalid')
def test_delete_non_existing_rule_id(self):
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules' +
- '/22222222222222')
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules/%s'
+ % self.invalid_id)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
- req, '22222222222222')
+ req, self.invalid_id)
def test_create_rule_quota_limit(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
for num in range(100, 100 + CONF.quota_security_group_rules):
rule = {
'ip_protocol': 'tcp', 'from_port': num,
- 'to_port': num, 'parent_group_id': '2', 'group_id': '1'
+ 'to_port': num, 'parent_group_id': self.sg2['id'],
+ 'group_id': self.sg1['id']
}
self.controller.create(req, {'security_group_rule': rule})
rule = {
'ip_protocol': 'tcp', 'from_port': '121', 'to_port': '121',
- 'parent_group_id': '2', 'group_id': '1'
+ 'parent_group_id': self.sg2['id'], 'group_id': self.sg1['id']
}
self.assertRaises(exception.SecurityGroupLimitExceeded,
self.controller.create,
req, {'security_group_rule': rule})
def test_create_rule_cidr_allow_all(self):
- rule = security_group_rule_template(cidr='0.0.0.0/0')
+ rule = security_group_rule_template(cidr='0.0.0.0/0',
+ parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
@@ -1031,7 +1076,8 @@ class TestSecurityGroupRules(test.TestCase):
"0.0.0.0/0")
def test_create_rule_cidr_allow_some(self):
- rule = security_group_rule_template(cidr='15.0.0.0/8')
+ rule = security_group_rule_template(cidr='15.0.0.0/8',
+ parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
@@ -1340,6 +1386,7 @@ class SecurityGroupsOutputTest(test.TestCase):
def setUp(self):
super(SecurityGroupsOutputTest, self).setUp()
+ self.controller = security_groups.SecurityGroupController()
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
diff --git a/nova/tests/api/openstack/compute/contrib/test_server_password.py b/nova/tests/api/openstack/compute/contrib/test_server_password.py
index 87da90efe..6e7d66a06 100644
--- a/nova/tests/api/openstack/compute/contrib/test_server_password.py
+++ b/nova/tests/api/openstack/compute/contrib/test_server_password.py
@@ -14,11 +14,11 @@
# under the License.
from lxml import etree
+from oslo.config import cfg
import webob
from nova.api.metadata import password
from nova import compute
-from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
diff --git a/nova/tests/api/openstack/compute/contrib/test_volumes.py b/nova/tests/api/openstack/compute/contrib/test_volumes.py
index 1a8a570e8..90e226eac 100644
--- a/nova/tests/api/openstack/compute/contrib/test_volumes.py
+++ b/nova/tests/api/openstack/compute/contrib/test_volumes.py
@@ -16,19 +16,19 @@
import datetime
from lxml import etree
+from oslo.config import cfg
import webob
+from webob import exc
from nova.api.openstack.compute.contrib import volumes
from nova.compute import api as compute_api
from nova.compute import instance_types
from nova import context
-from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.volume import cinder
-from webob import exc
CONF = cfg.CONF
CONF.import_opt('password_length', 'nova.utils')
@@ -86,7 +86,7 @@ def fake_get_instance_bdms(self, context, instance):
'volume_id': FAKE_UUID_A,
'volume_size': 1},
{'id': 2,
- 'instance_uuid':instance['uuid'],
+ 'instance_uuid': instance['uuid'],
'device_name': '/dev/fake1',
'delete_on_termination': 'False',
'virtual_name': 'MyNamesVirtual',
diff --git a/nova/tests/api/openstack/compute/test_extensions.py b/nova/tests/api/openstack/compute/test_extensions.py
index 9c45edc08..68e5e1b99 100644
--- a/nova/tests/api/openstack/compute/test_extensions.py
+++ b/nova/tests/api/openstack/compute/test_extensions.py
@@ -18,6 +18,7 @@
import iso8601
from lxml import etree
+from oslo.config import cfg
import webob
from nova.api.openstack import compute
@@ -25,7 +26,6 @@ from nova.api.openstack.compute import extensions as compute_extensions
from nova.api.openstack import extensions as base_extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
-from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
@@ -185,6 +185,7 @@ class ExtensionControllerTest(ExtensionTestCase):
"FloatingIpsBulk",
"Fox In Socks",
"Hosts",
+ "ImageSize",
"InstanceActions",
"Keypairs",
"Multinic",
@@ -193,6 +194,7 @@ class ExtensionControllerTest(ExtensionTestCase):
"Quotas",
"Rescue",
"SchedulerHints",
+ "SecurityGroupDefaultRules",
"SecurityGroups",
"ServerDiagnostics",
"ServerPassword",
diff --git a/nova/tests/api/openstack/compute/test_image_metadata.py b/nova/tests/api/openstack/compute/test_image_metadata.py
index 9a8b75c9e..29165b548 100644
--- a/nova/tests/api/openstack/compute/test_image_metadata.py
+++ b/nova/tests/api/openstack/compute/test_image_metadata.py
@@ -15,10 +15,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo.config import cfg
import webob
from nova.api.openstack.compute import image_metadata
-from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
diff --git a/nova/tests/api/openstack/compute/test_server_actions.py b/nova/tests/api/openstack/compute/test_server_actions.py
index d4c93ef39..101458bf3 100644
--- a/nova/tests/api/openstack/compute/test_server_actions.py
+++ b/nova/tests/api/openstack/compute/test_server_actions.py
@@ -17,6 +17,7 @@ import base64
import uuid
import mox
+from oslo.config import cfg
import webob
from nova.api.openstack.compute import servers
@@ -26,14 +27,12 @@ from nova.compute import vm_states
from nova import db
from nova import exception
from nova.image import glance
-from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests.image import fake
from nova.tests import matchers
-
CONF = cfg.CONF
CONF.import_opt('password_length', 'nova.utils')
FAKE_UUID = fakes.FAKE_UUID
diff --git a/nova/tests/api/openstack/compute/test_server_metadata.py b/nova/tests/api/openstack/compute/test_server_metadata.py
index 71fa9f3f3..3ba895953 100644
--- a/nova/tests/api/openstack/compute/test_server_metadata.py
+++ b/nova/tests/api/openstack/compute/test_server_metadata.py
@@ -17,6 +17,7 @@
import uuid
+from oslo.config import cfg
import webob
from nova.api.openstack.compute import server_metadata
@@ -24,7 +25,6 @@ from nova.compute import rpcapi as compute_rpcapi
from nova.compute import vm_states
import nova.db
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py
index 4bfb1c1e3..228d81f45 100644
--- a/nova/tests/api/openstack/compute/test_servers.py
+++ b/nova/tests/api/openstack/compute/test_servers.py
@@ -23,6 +23,7 @@ import uuid
import iso8601
from lxml import etree
+from oslo.config import cfg
import webob
from nova.api.openstack import compute
@@ -41,7 +42,6 @@ from nova.db.sqlalchemy import models
from nova import exception
from nova.network import manager
from nova.network.quantumv2 import api as quantum_api
-from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import policy as common_policy
from nova.openstack.common import rpc
@@ -52,7 +52,6 @@ from nova.tests import fake_network
from nova.tests.image import fake
from nova.tests import matchers
-
CONF = cfg.CONF
CONF.import_opt('password_length', 'nova.utils')
CONF.import_opt('scheduler_topic', 'nova.scheduler.rpcapi')
@@ -165,8 +164,6 @@ class ServersControllerTest(test.TestCase):
return_servers)
self.stubs.Set(db, 'instance_get_by_uuid',
return_server)
- self.stubs.Set(db, 'instance_get_all_by_project',
- return_servers)
self.stubs.Set(db, 'instance_add_security_group',
return_security_group)
self.stubs.Set(db, 'instance_update_and_get_original',
@@ -1740,6 +1737,11 @@ class ServersControllerCreateTest(test.TestCase):
"""
return self.instance_cache_by_id[instance_id]
+ def instance_update(context, uuid, values):
+ instance = self.instance_cache_by_uuid[uuid]
+ instance.update(values)
+ return instance
+
def rpc_call_wrapper(context, topic, msg, timeout=None):
"""Stub out the scheduler creating the instance entry."""
if (topic == CONF.scheduler_topic and
@@ -1779,6 +1781,7 @@ class ServersControllerCreateTest(test.TestCase):
self.stubs.Set(db, 'instance_system_metadata_update',
fake_method)
self.stubs.Set(db, 'instance_get', instance_get)
+ self.stubs.Set(db, 'instance_update', instance_update)
self.stubs.Set(rpc, 'cast', fake_method)
self.stubs.Set(rpc, 'call', rpc_call_wrapper)
self.stubs.Set(db, 'instance_update_and_get_original',
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index 3ef98b902..7da10e73e 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -227,7 +227,7 @@ def _make_image_fixtures():
# Public image
add_fixture(id=image_id, name='public image', is_public=True,
status='active', properties={'key1': 'value1'},
- min_ram="128", min_disk="10")
+ min_ram="128", min_disk="10", size='25165824')
image_id += 1
# Snapshot for User 1
@@ -238,7 +238,7 @@ def _make_image_fixtures():
'deleted', 'pending_delete'):
add_fixture(id=image_id, name='%s snapshot' % status,
is_public=False, status=status,
- properties=snapshot_properties)
+ properties=snapshot_properties, size='25165824')
image_id += 1
# Image without a name
@@ -427,7 +427,7 @@ def stub_instance(id, user_id=None, project_id=None, host=None,
project_id = 'fake_project'
if metadata:
- metadata = [{'key':k, 'value':v} for k, v in metadata.items()]
+ metadata = [{'key': k, 'value': v} for k, v in metadata.items()]
elif include_fake_metadata:
metadata = [models.InstanceMetadata(key='seq', value=str(id))]
else:
diff --git a/nova/tests/api/test_sizelimit.py b/nova/tests/api/test_sizelimit.py
index 9c5ab1a27..862a0d65f 100644
--- a/nova/tests/api/test_sizelimit.py
+++ b/nova/tests/api/test_sizelimit.py
@@ -12,10 +12,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo.config import cfg
import webob
import nova.api.sizelimit
-from nova.openstack.common import cfg
from nova import test
CONF = cfg.CONF
diff --git a/nova/tests/baremetal/db/base.py b/nova/tests/baremetal/db/base.py
index 37e51fe79..499eee32a 100644
--- a/nova/tests/baremetal/db/base.py
+++ b/nova/tests/baremetal/db/base.py
@@ -15,8 +15,9 @@
"""Bare-metal DB test base class."""
+from oslo.config import cfg
+
from nova import context as nova_context
-from nova.openstack.common import cfg
from nova import test
from nova.virt.baremetal.db import migration as bm_migration
from nova.virt.baremetal.db.sqlalchemy import session as bm_session
diff --git a/nova/tests/baremetal/db/test_bm_node.py b/nova/tests/baremetal/db/test_bm_node.py
index 8cac38378..204a6bf7b 100644
--- a/nova/tests/baremetal/db/test_bm_node.py
+++ b/nova/tests/baremetal/db/test_bm_node.py
@@ -46,10 +46,6 @@ class BareMetalNodesTestCase(base.BMDBTestCase):
ref = db.bm_node_create(self.context, n)
self.ids.append(ref['id'])
- def test_get_all0(self):
- r = db.bm_node_get_all(self.context)
- self.assertEquals(r, [])
-
def test_get_all(self):
r = db.bm_node_get_all(self.context)
self.assertEquals(r, [])
@@ -69,7 +65,7 @@ class BareMetalNodesTestCase(base.BMDBTestCase):
self.assertEquals(r['pm_address'], '1')
self.assertRaises(
- exception.InstanceNotFound,
+ exception.NodeNotFound,
db.bm_node_get,
self.context, -1)
@@ -95,13 +91,63 @@ class BareMetalNodesTestCase(base.BMDBTestCase):
r = db.bm_node_get_all(self.context, service_host="host3")
self.assertEquals(r, [])
+ def test_get_associated(self):
+ self._create_nodes()
+
+ r = db.bm_node_get_associated(self.context, service_host=None)
+ self.assertEquals(len(r), 1)
+ self.assertEquals(r[0]['pm_address'], '1')
+
+ r = db.bm_node_get_unassociated(self.context, service_host=None)
+ self.assertEquals(len(r), 5)
+ pmaddrs = [x['pm_address'] for x in r]
+ self.assertIn('0', pmaddrs)
+ self.assertIn('2', pmaddrs)
+ self.assertIn('3', pmaddrs)
+ self.assertIn('4', pmaddrs)
+ self.assertIn('5', pmaddrs)
+
def test_destroy(self):
self._create_nodes()
db.bm_node_destroy(self.context, self.ids[0])
self.assertRaises(
- exception.InstanceNotFound,
+ exception.NodeNotFound,
+ db.bm_node_get,
+ self.context, self.ids[0])
+
+ r = db.bm_node_get_all(self.context)
+ self.assertEquals(len(r), 5)
+
+ def test_destroy_with_interfaces(self):
+ self._create_nodes()
+
+ if_a_id = db.bm_interface_create(self.context, self.ids[0],
+ 'aa:aa:aa:aa:aa:aa', None, None)
+ if_b_id = db.bm_interface_create(self.context, self.ids[0],
+ 'bb:bb:bb:bb:bb:bb', None, None)
+ if_x_id = db.bm_interface_create(self.context, self.ids[1],
+ '11:22:33:44:55:66', None, None)
+
+ db.bm_node_destroy(self.context, self.ids[0])
+
+ self.assertRaises(
+ exception.NovaException,
+ db.bm_interface_get,
+ self.context, if_a_id)
+
+ self.assertRaises(
+ exception.NovaException,
+ db.bm_interface_get,
+ self.context, if_b_id)
+
+ # Another node's interface is not affected
+ if_x = db.bm_interface_get(self.context, if_x_id)
+ self.assertEqual(self.ids[1], if_x['bm_node_id'])
+
+ self.assertRaises(
+ exception.NodeNotFound,
db.bm_node_get,
self.context, self.ids[0])
diff --git a/nova/tests/baremetal/db/utils.py b/nova/tests/baremetal/db/utils.py
index 800305402..e3a7b8489 100644
--- a/nova/tests/baremetal/db/utils.py
+++ b/nova/tests/baremetal/db/utils.py
@@ -22,6 +22,7 @@ from nova.virt.baremetal.db.sqlalchemy import models as bm_models
def new_bm_node(**kwargs):
h = bm_models.BareMetalNode()
h.id = kwargs.pop('id', None)
+ h.uuid = kwargs.pop('uuid', None)
h.service_host = kwargs.pop('service_host', None)
h.instance_uuid = kwargs.pop('instance_uuid', None)
h.cpus = kwargs.pop('cpus', 1)
diff --git a/nova/tests/baremetal/test_driver.py b/nova/tests/baremetal/test_driver.py
index 37ef71881..8e23908f4 100644
--- a/nova/tests/baremetal/test_driver.py
+++ b/nova/tests/baremetal/test_driver.py
@@ -20,8 +20,9 @@
"""Tests for the base baremetal driver class."""
+from oslo.config import cfg
+
from nova import exception
-from nova.openstack.common import cfg
from nova import test
from nova.tests.baremetal.db import base as bm_db_base
from nova.tests.baremetal.db import utils as bm_db_utils
@@ -79,45 +80,58 @@ class BareMetalDriverWithDBTestCase(bm_db_base.BMDBTestCase):
fake_image.stub_out_image_service(self.stubs)
self.context = utils.get_test_admin_context()
self.driver = bm_driver.BareMetalDriver(None)
- self.node_info = bm_db_utils.new_bm_node(
- id=123,
- service_host='test_host',
- cpus=2,
- memory_mb=2048,
- )
- self.nic_info = [
- {'address': '01:23:45:67:89:01', 'datapath_id': '0x1',
- 'port_no': 1},
- {'address': '01:23:45:67:89:02', 'datapath_id': '0x2',
- 'port_no': 2},
- ]
self.addCleanup(fake_image.FakeImageService_reset)
- def _create_node(self):
- self.node = db.bm_node_create(self.context, self.node_info)
- for nic in self.nic_info:
+ def _create_node(self, node_info=None, nic_info=None):
+ result = {}
+ if node_info is None:
+ node_info = bm_db_utils.new_bm_node(
+ id=123,
+ service_host='test_host',
+ cpus=2,
+ memory_mb=2048,
+ )
+ if nic_info is None:
+ nic_info = [
+ {'address': '01:23:45:67:89:01', 'datapath_id': '0x1',
+ 'port_no': 1},
+ {'address': '01:23:45:67:89:02', 'datapath_id': '0x2',
+ 'port_no': 2},
+ ]
+ result['node_info'] = node_info
+ result['nic_info'] = nic_info
+ result['node'] = db.bm_node_create(self.context, node_info)
+
+ for nic in nic_info:
db.bm_interface_create(
self.context,
- self.node['id'],
+ result['node']['id'],
nic['address'],
nic['datapath_id'],
nic['port_no'],
)
- self.test_instance = utils.get_test_instance()
- self.test_instance['node'] = self.node['id']
- self.spawn_params = dict(
+ result['instance'] = utils.get_test_instance()
+ result['instance']['node'] = result['node']['uuid']
+ result['spawn_params'] = dict(
admin_password='test_pass',
block_device_info=None,
context=self.context,
- image_meta=utils.get_test_image_info(None,
- self.test_instance),
+ image_meta=utils.get_test_image_info(
+ None, result['instance']),
injected_files=[('/fake/path', 'hello world')],
- instance=self.test_instance,
+ instance=result['instance'],
network_info=utils.get_test_network_info(),
)
+ result['destroy_params'] = dict(
+ instance=result['instance'],
+ network_info=result['spawn_params']['network_info'],
+ block_device_info=result['spawn_params']['block_device_info'],
+ )
+
+ return result
def test_get_host_stats(self):
- self._create_node()
+ node = self._create_node()
stats = self.driver.get_host_stats()
self.assertTrue(isinstance(stats, list))
self.assertEqual(len(stats), 1)
@@ -125,61 +139,219 @@ class BareMetalDriverWithDBTestCase(bm_db_base.BMDBTestCase):
self.assertEqual(stats['cpu_arch'], 'test')
self.assertEqual(stats['test_spec'], 'test_value')
self.assertEqual(stats['hypervisor_type'], 'baremetal')
- self.assertEqual(stats['hypervisor_hostname'], '123')
+ self.assertEqual(stats['hypervisor_hostname'], node['node']['uuid'])
self.assertEqual(stats['host'], 'test_host')
self.assertEqual(stats['vcpus'], 2)
self.assertEqual(stats['host_memory_total'], 2048)
def test_spawn_ok(self):
- self._create_node()
- self.driver.spawn(**self.spawn_params)
- row = db.bm_node_get(self.context, self.node['id'])
+ node = self._create_node()
+ self.driver.spawn(**node['spawn_params'])
+ row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.ACTIVE)
+ self.assertEqual(row['instance_uuid'], node['instance']['uuid'])
+ self.assertEqual(row['instance_name'], node['instance']['hostname'])
def test_macs_for_instance(self):
- self._create_node()
+ node = self._create_node()
+ expected = set([nic['address'] for nic in node['nic_info']])
+ self.assertEqual(
+ expected, self.driver.macs_for_instance(node['instance']))
+
+ def test_macs_for_instance_after_spawn(self):
+ node = self._create_node()
+ self.driver.spawn(**node['spawn_params'])
+
+ expected = set([nic['address'] for nic in node['nic_info']])
+ self.assertEqual(
+ expected, self.driver.macs_for_instance(node['instance']))
+
+ def test_macs_for_instance(self):
+ node = self._create_node()
expected = set(['01:23:45:67:89:01', '01:23:45:67:89:02'])
self.assertEqual(
- expected, self.driver.macs_for_instance(self.test_instance))
+ expected, self.driver.macs_for_instance(node['instance']))
def test_macs_for_instance_no_interfaces(self):
# Nodes cannot boot with no MACs, so we raise an error if that happens.
- self.nic_info = []
- self._create_node()
+ node = self._create_node(nic_info=[])
self.assertRaises(exception.NovaException,
- self.driver.macs_for_instance, self.test_instance)
+ self.driver.macs_for_instance, node['instance'])
- def test_spawn_node_in_use(self):
- self._create_node()
- db.bm_node_update(self.context, self.node['id'],
+ def test_spawn_node_already_associated(self):
+ node = self._create_node()
+ db.bm_node_update(self.context, node['node']['id'],
{'instance_uuid': '1234-5678'})
self.assertRaises(exception.NovaException,
- self.driver.spawn, **self.spawn_params)
+ self.driver.spawn, **node['spawn_params'])
- row = db.bm_node_get(self.context, self.node['id'])
+ row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], None)
+ def test_spawn_node_in_use(self):
+ node = self._create_node()
+
+ self.driver.spawn(**node['spawn_params'])
+ self.assertRaises(exception.NovaException,
+ self.driver.spawn, **node['spawn_params'])
+
def test_spawn_node_not_found(self):
- self._create_node()
- db.bm_node_update(self.context, self.node['id'],
- {'id': 9876})
+ node = self._create_node()
+ db.bm_node_update(self.context, node['node']['id'],
+ {'uuid': 'hide-this-node'})
self.assertRaises(exception.NovaException,
- self.driver.spawn, **self.spawn_params)
+ self.driver.spawn, **node['spawn_params'])
- row = db.bm_node_get(self.context, 9876)
+ row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], None)
def test_spawn_fails(self):
- self._create_node()
+ node = self._create_node()
self.mox.StubOutWithMock(fake.FakePowerManager, 'activate_node')
fake.FakePowerManager.activate_node().AndRaise(test.TestingException)
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
- self.driver.spawn, **self.spawn_params)
+ self.driver.spawn, **node['spawn_params'])
+
+ row = db.bm_node_get(self.context, node['node']['id'])
+ self.assertEqual(row['task_state'], baremetal_states.DELETED)
+
+ def test_spawn_fails_to_cleanup(self):
+ node = self._create_node()
+
+ self.mox.StubOutWithMock(fake.FakePowerManager, 'activate_node')
+ self.mox.StubOutWithMock(fake.FakePowerManager, 'deactivate_node')
+ fake.FakePowerManager.activate_node().AndRaise(test.TestingException)
+ fake.FakePowerManager.deactivate_node().AndRaise(test.TestingException)
+ self.mox.ReplayAll()
+
+ self.assertRaises(test.TestingException,
+ self.driver.spawn, **node['spawn_params'])
+
+ row = db.bm_node_get(self.context, node['node']['id'])
+ self.assertEqual(row['task_state'], baremetal_states.ERROR)
+
+ def test_destroy_ok(self):
+ node = self._create_node()
+ self.driver.spawn(**node['spawn_params'])
+ self.driver.destroy(**node['destroy_params'])
+
+ row = db.bm_node_get(self.context, node['node']['id'])
+ self.assertEqual(row['task_state'], baremetal_states.DELETED)
+ self.assertEqual(row['instance_uuid'], None)
+ self.assertEqual(row['instance_name'], None)
+
+ def test_destroy_fails(self):
+ node = self._create_node()
+
+ self.mox.StubOutWithMock(fake.FakePowerManager, 'deactivate_node')
+ fake.FakePowerManager.deactivate_node().AndRaise(test.TestingException)
+ self.mox.ReplayAll()
+
+ self.driver.spawn(**node['spawn_params'])
+ self.assertRaises(test.TestingException,
+ self.driver.destroy, **node['destroy_params'])
- row = db.bm_node_get(self.context, self.node['id'])
+ row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.ERROR)
+ self.assertEqual(row['instance_uuid'], node['instance']['uuid'])
+
+ def test_get_available_resources(self):
+ node = self._create_node()
+
+ resources = self.driver.get_available_resource(node['node']['uuid'])
+ self.assertEqual(resources['memory_mb'],
+ node['node_info']['memory_mb'])
+ self.assertEqual(resources['memory_mb_used'], 0)
+
+ self.driver.spawn(**node['spawn_params'])
+ resources = self.driver.get_available_resource(node['node']['uuid'])
+ self.assertEqual(resources['memory_mb_used'],
+ node['node_info']['memory_mb'])
+
+ self.driver.destroy(**node['destroy_params'])
+ resources = self.driver.get_available_resource(node['node']['uuid'])
+ self.assertEqual(resources['memory_mb_used'], 0)
+
+ def test_get_available_nodes(self):
+ self.assertEqual(0, len(self.driver.get_available_nodes()))
+
+ node1 = self._create_node()
+ self.assertEqual(1, len(self.driver.get_available_nodes()))
+
+ node_info = bm_db_utils.new_bm_node(
+ id=456,
+ service_host='test_host',
+ cpus=2,
+ memory_mb=2048,
+ )
+ nic_info = [
+ {'address': 'cc:cc:cc', 'datapath_id': '0x1',
+ 'port_no': 1},
+ {'address': 'dd:dd:dd', 'datapath_id': '0x2',
+ 'port_no': 2},
+ ]
+ node2 = self._create_node(node_info=node_info, nic_info=nic_info)
+ self.assertEqual(2, len(self.driver.get_available_nodes()))
+ self.assertEqual([node1['node']['uuid'], node2['node']['uuid']],
+ self.driver.get_available_nodes())
+
+ node1['instance']['hostname'] = 'test-host-1'
+ node2['instance']['hostname'] = 'test-host-2'
+
+ self.driver.spawn(**node1['spawn_params'])
+ self.assertEqual(1, len(self.driver.get_available_nodes()))
+
+ self.driver.spawn(**node2['spawn_params'])
+ self.assertEqual(0, len(self.driver.get_available_nodes()))
+
+ self.driver.destroy(**node1['destroy_params'])
+ self.assertEqual(1, len(self.driver.get_available_nodes()))
+
+ self.driver.destroy(**node2['destroy_params'])
+ self.assertEqual(2, len(self.driver.get_available_nodes()))
+ self.assertEqual([node1['node']['uuid'], node2['node']['uuid']],
+ self.driver.get_available_nodes())
+
+ def test_list_instances(self):
+ self.assertEqual([], self.driver.list_instances())
+
+ node1 = self._create_node()
+ self.assertEqual([], self.driver.list_instances())
+
+ node_info = bm_db_utils.new_bm_node(
+ id=456,
+ service_host='test_host',
+ cpus=2,
+ memory_mb=2048,
+ )
+ nic_info = [
+ {'address': 'cc:cc:cc', 'datapath_id': '0x1',
+ 'port_no': 1},
+ {'address': 'dd:dd:dd', 'datapath_id': '0x2',
+ 'port_no': 2},
+ ]
+ node2 = self._create_node(node_info=node_info, nic_info=nic_info)
+ self.assertEqual([], self.driver.list_instances())
+
+ node1['instance']['hostname'] = 'test-host-1'
+ node2['instance']['hostname'] = 'test-host-2'
+
+ self.driver.spawn(**node1['spawn_params'])
+ self.assertEqual(['test-host-1'],
+ self.driver.list_instances())
+
+ self.driver.spawn(**node2['spawn_params'])
+ self.assertEqual(['test-host-1', 'test-host-2'],
+ self.driver.list_instances())
+
+ self.driver.destroy(**node1['destroy_params'])
+ self.assertEqual(['test-host-2'],
+ self.driver.list_instances())
+
+ self.driver.destroy(**node2['destroy_params'])
+ self.assertEqual([], self.driver.list_instances())
diff --git a/nova/tests/baremetal/test_ipmi.py b/nova/tests/baremetal/test_ipmi.py
index 87800cb47..faf800a46 100644
--- a/nova/tests/baremetal/test_ipmi.py
+++ b/nova/tests/baremetal/test_ipmi.py
@@ -23,7 +23,8 @@ import os
import stat
import tempfile
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova import test
from nova.tests.baremetal.db import utils as bm_db_utils
from nova import utils
diff --git a/nova/tests/baremetal/test_nova_baremetal_deploy_helper.py b/nova/tests/baremetal/test_nova_baremetal_deploy_helper.py
index 56c3f953e..b78aa5e8b 100644
--- a/nova/tests/baremetal/test_nova_baremetal_deploy_helper.py
+++ b/nova/tests/baremetal/test_nova_baremetal_deploy_helper.py
@@ -22,10 +22,11 @@ import sys
import tempfile
import time
-from nova import test
+import mox
+from nova import test
from nova.tests.baremetal.db import base as bm_db_base
-
+from nova.virt.baremetal import db as bm_db
TOPDIR = os.path.normpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
@@ -93,11 +94,19 @@ class WorkerTestCase(bm_db_base.BMDBTestCase):
history.append(params)
self.stubs.Set(bmdh, 'deploy', fake_deploy)
+ self.mox.StubOutWithMock(bm_db, 'bm_node_update')
+ # update is called twice inside Worker.run
+ for i in range(6):
+ bm_db.bm_node_update(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg())
+ self.mox.ReplayAll()
+
params_list = [{'fake1': ''}, {'fake2': ''}, {'fake3': ''}]
for (dep_id, params) in enumerate(params_list):
bmdh.QUEUE.put((dep_id, params))
self.wait_queue_empty(1)
self.assertEqual(params_list, history)
+ self.mox.VerifyAll()
def test_run_with_failing_deploy(self):
"""Check a worker keeps on running even if deploy() raises
@@ -111,11 +120,19 @@ class WorkerTestCase(bm_db_base.BMDBTestCase):
raise Exception('test')
self.stubs.Set(bmdh, 'deploy', fake_deploy)
+ self.mox.StubOutWithMock(bm_db, 'bm_node_update')
+ # update is called twice inside Worker.run
+ for i in range(6):
+ bm_db.bm_node_update(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg())
+ self.mox.ReplayAll()
+
params_list = [{'fake1': ''}, {'fake2': ''}, {'fake3': ''}]
for (dep_id, params) in enumerate(params_list):
bmdh.QUEUE.put((dep_id, params))
self.wait_queue_empty(1)
self.assertEqual(params_list, history)
+ self.mox.VerifyAll()
class PhysicalWorkTestCase(test.TestCase):
@@ -175,6 +192,8 @@ class PhysicalWorkTestCase(test.TestCase):
bmdh.deploy(address, port, iqn, lun, image_path, pxe_config_path,
root_mb, swap_mb)
+ self.mox.VerifyAll()
+
def test_always_logout_iscsi(self):
"""logout_iscsi() must be called once login_iscsi() is called."""
address = '127.0.0.1'
diff --git a/nova/tests/baremetal/test_pxe.py b/nova/tests/baremetal/test_pxe.py
index 9703feb40..d9e41bc67 100644
--- a/nova/tests/baremetal/test_pxe.py
+++ b/nova/tests/baremetal/test_pxe.py
@@ -21,15 +21,18 @@
import os
+import mox
+
+from oslo.config import cfg
from testtools import matchers
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.tests.baremetal.db import base as bm_db_base
from nova.tests.baremetal.db import utils as bm_db_utils
from nova.tests.image import fake as fake_image
from nova.tests import utils
+from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import db
from nova.virt.baremetal import pxe
from nova.virt.baremetal import utils as bm_utils
@@ -67,7 +70,6 @@ class BareMetalPXETestCase(bm_db_base.BMDBTestCase):
self.instance = utils.get_test_instance()
self.test_network_info = utils.get_test_network_info(),
self.node_info = bm_db_utils.new_bm_node(
- id=123,
service_host='test_host',
cpus=4,
memory_mb=2048,
@@ -421,7 +423,7 @@ class PXEPublicMethodsTestCase(BareMetalPXETestCase):
self.driver.destroy_images(self.context, self.node, self.instance)
self.mox.VerifyAll()
- def test_activate_bootloader(self):
+ def test_activate_bootloader_passes_details(self):
self._create_node()
macs = [nic['address'] for nic in self.nic_info]
macs.append(self.node_info['prov_mac_address'])
@@ -441,7 +443,6 @@ class PXEPublicMethodsTestCase(BareMetalPXETestCase):
self.mox.StubOutWithMock(pxe, 'get_tftp_image_info')
self.mox.StubOutWithMock(pxe, 'get_partition_sizes')
self.mox.StubOutWithMock(bm_utils, 'random_alnum')
- self.mox.StubOutWithMock(db, 'bm_deployment_create')
self.mox.StubOutWithMock(pxe, 'build_pxe_config')
self.mox.StubOutWithMock(bm_utils, 'write_to_file')
self.mox.StubOutWithMock(bm_utils, 'create_link_without_raise')
@@ -449,68 +450,73 @@ class PXEPublicMethodsTestCase(BareMetalPXETestCase):
pxe.get_tftp_image_info(self.instance).AndReturn(image_info)
pxe.get_partition_sizes(self.instance).AndReturn((0, 0))
bm_utils.random_alnum(32).AndReturn('alnum')
- db.bm_deployment_create(
- self.context, 'alnum', image_path, pxe_path, 0, 0).\
- AndReturn(1234)
pxe.build_pxe_config(
- 1234, 'alnum', iqn, 'aaaa', 'bbbb', 'cccc', 'dddd').\
- AndReturn(pxe_config)
+ self.node['id'], 'alnum', iqn,
+ 'aaaa', 'bbbb', 'cccc', 'dddd').AndReturn(pxe_config)
bm_utils.write_to_file(pxe_path, pxe_config)
for mac in macs:
bm_utils.create_link_without_raise(
pxe_path, pxe.get_pxe_mac_path(mac))
+
self.mox.ReplayAll()
- self.driver.activate_bootloader(
- self.context, self.node, self.instance)
+ self.driver.activate_bootloader(self.context, self.node, self.instance)
+
self.mox.VerifyAll()
- def test_deactivate_bootloader(self):
+ def test_activate_and_deactivate_bootloader(self):
self._create_node()
- macs = [nic['address'] for nic in self.nic_info]
- macs.append(self.node_info['prov_mac_address'])
- macs.sort()
- image_info = {
- 'deploy_kernel': [None, 'aaaa'],
- 'deploy_ramdisk': [None, 'bbbb'],
- 'kernel': [None, 'cccc'],
- 'ramdisk': [None, 'dddd'],
+ extra_specs = {
+ 'deploy_kernel_id': 'eeee',
+ 'deploy_ramdisk_id': 'ffff',
}
+ self.instance['extra_specs'] = extra_specs
self.instance['uuid'] = 'fake-uuid'
- pxe_path = pxe.get_pxe_config_file_path(self.instance)
+ self.mox.StubOutWithMock(bm_utils, 'write_to_file')
+ self.mox.StubOutWithMock(bm_utils, 'create_link_without_raise')
self.mox.StubOutWithMock(bm_utils, 'unlink_without_raise')
self.mox.StubOutWithMock(bm_utils, 'rmtree_without_raise')
- self.mox.StubOutWithMock(pxe, 'get_tftp_image_info')
- self.mox.StubOutWithMock(self.driver, '_collect_mac_addresses')
- pxe.get_tftp_image_info(self.instance).AndReturn(image_info)
- for uuid, path in [image_info[label] for label in image_info]:
- bm_utils.unlink_without_raise(path)
- bm_utils.unlink_without_raise(pxe_path)
- self.driver._collect_mac_addresses(self.context, self.node).\
- AndReturn(macs)
- for mac in macs:
- bm_utils.unlink_without_raise(pxe.get_pxe_mac_path(mac))
- bm_utils.rmtree_without_raise(
- os.path.join(CONF.baremetal.tftp_root, 'fake-uuid'))
+ # create the config file
+ bm_utils.write_to_file(mox.StrContains('fake-uuid'),
+ mox.StrContains(CONF.baremetal.tftp_root))
+ # unlink and link the 3 interfaces
+ for i in range(3):
+ bm_utils.unlink_without_raise(mox.Or(
+ mox.StrContains('fake-uuid'),
+ mox.StrContains(CONF.baremetal.tftp_root)))
+ bm_utils.create_link_without_raise(
+ mox.StrContains('fake-uuid'),
+ mox.StrContains(CONF.baremetal.tftp_root))
+ # unlink all 3 interfaces, 4 images, and the config file
+ for i in range(8):
+ bm_utils.unlink_without_raise(mox.Or(
+ mox.StrContains('fake-uuid'),
+ mox.StrContains(CONF.baremetal.tftp_root)))
+ bm_utils.rmtree_without_raise(mox.StrContains('fake-uuid'))
+
self.mox.ReplayAll()
- self.driver.deactivate_bootloader(
- self.context, self.node, self.instance)
+ # activate and deactivate the bootloader
+ # and check the deployment task_state in the database
+ row = db.bm_node_get(self.context, 1)
+ self.assertTrue(row['deploy_key'] is None)
+
+ self.driver.activate_bootloader(self.context, self.node,
+ self.instance)
+ row = db.bm_node_get(self.context, 1)
+ self.assertTrue(row['deploy_key'] is not None)
+
+ self.driver.deactivate_bootloader(self.context, self.node,
+ self.instance)
+ row = db.bm_node_get(self.context, 1)
+ self.assertTrue(row['deploy_key'] is None)
+
self.mox.VerifyAll()
def test_deactivate_bootloader_for_nonexistent_instance(self):
self._create_node()
- macs = [nic['address'] for nic in self.nic_info]
- macs.append(self.node_info['prov_mac_address'])
- macs.sort()
- image_info = {
- 'deploy_kernel': [None, 'aaaa'],
- 'deploy_ramdisk': [None, 'bbbb'],
- 'kernel': [None, 'cccc'],
- 'ramdisk': [None, 'dddd'],
- }
self.instance['uuid'] = 'fake-uuid'
pxe_path = pxe.get_pxe_config_file_path(self.instance)
@@ -531,3 +537,33 @@ class PXEPublicMethodsTestCase(BareMetalPXETestCase):
self.driver.deactivate_bootloader(
self.context, self.node, self.instance)
self.mox.VerifyAll()
+
+ def test_activate_node(self):
+ self._create_node()
+ self.instance['uuid'] = 'fake-uuid'
+ self.flags(pxe_deploy_timeout=1, group='baremetal')
+
+ db.bm_node_update(self.context, 1,
+ {'task_state': baremetal_states.DEPLOYING,
+ 'instance_uuid': 'fake-uuid'})
+
+ # test timeout
+ self.assertRaises(exception.InstanceDeployFailure,
+ self.driver.activate_node,
+ self.context, self.node, self.instance)
+
+ # test DEPLOYDONE
+ db.bm_node_update(self.context, 1,
+ {'task_state': baremetal_states.DEPLOYDONE})
+ self.driver.activate_node(self.context, self.node, self.instance)
+
+ # test no deploy -- state is just ACTIVE
+ db.bm_node_update(self.context, 1,
+ {'task_state': baremetal_states.ACTIVE})
+ self.driver.activate_node(self.context, self.node, self.instance)
+
+ # test node gone
+ db.bm_node_destroy(self.context, 1)
+ self.assertRaises(exception.InstanceDeployFailure,
+ self.driver.activate_node,
+ self.context, self.node, self.instance)
diff --git a/nova/tests/baremetal/test_virtual_power_driver.py b/nova/tests/baremetal/test_virtual_power_driver.py
index 689677698..5a38ed87b 100644
--- a/nova/tests/baremetal/test_virtual_power_driver.py
+++ b/nova/tests/baremetal/test_virtual_power_driver.py
@@ -19,16 +19,14 @@
"""Tests for baremetal virtual power driver."""
import mox
+from oslo.config import cfg
from nova import exception
-from nova.openstack.common import cfg
-from nova import utils as nutils
-
from nova.tests.baremetal.db import base as bm_db_base
from nova.tests.baremetal.db import utils as bm_db_utils
from nova.tests.image import fake as fake_image
from nova.tests import utils
-
+from nova import utils as nutils
from nova.virt.baremetal import db
from nova.virt.baremetal import virtual_power_driver
import nova.virt.powervm.common as connection
diff --git a/nova/tests/baremetal/test_volume_driver.py b/nova/tests/baremetal/test_volume_driver.py
index c83277516..24dadac94 100644
--- a/nova/tests/baremetal/test_volume_driver.py
+++ b/nova/tests/baremetal/test_volume_driver.py
@@ -15,9 +15,9 @@
"""Tests for baremetal volume driver."""
-from nova.openstack.common import cfg
-from nova import test
+from oslo.config import cfg
+from nova import test
from nova.virt.baremetal import volume_driver
CONF = cfg.CONF
diff --git a/nova/tests/cells/fakes.py b/nova/tests/cells/fakes.py
index e996cbe13..c200d3dba 100644
--- a/nova/tests/cells/fakes.py
+++ b/nova/tests/cells/fakes.py
@@ -16,6 +16,8 @@
Fakes For Cells tests.
"""
+from oslo.config import cfg
+
from nova.cells import driver
from nova.cells import manager as cells_manager
from nova.cells import messaging
@@ -23,7 +25,6 @@ from nova.cells import state as cells_state
import nova.db
from nova.db import base
from nova import exception
-from nova.openstack.common import cfg
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
diff --git a/nova/tests/cells/test_cells_manager.py b/nova/tests/cells/test_cells_manager.py
index 1ebbc407d..d53fdbb7f 100644
--- a/nova/tests/cells/test_cells_manager.py
+++ b/nova/tests/cells/test_cells_manager.py
@@ -18,10 +18,11 @@ Tests For CellsManager
import copy
import datetime
+from oslo.config import cfg
+
from nova.cells import messaging
from nova.cells import utils as cells_utils
from nova import context
-from nova.openstack.common import cfg
from nova.openstack.common import rpc
from nova.openstack.common import timeutils
from nova import test
diff --git a/nova/tests/cells/test_cells_messaging.py b/nova/tests/cells/test_cells_messaging.py
index 811ad17fd..30adfdcd7 100644
--- a/nova/tests/cells/test_cells_messaging.py
+++ b/nova/tests/cells/test_cells_messaging.py
@@ -14,17 +14,18 @@
"""
Tests For Cells Messaging module
"""
+
+from oslo.config import cfg
+
from nova.cells import messaging
from nova.cells import utils as cells_utils
from nova import context
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import rpc
from nova.openstack.common import timeutils
from nova import test
from nova.tests.cells import fakes
-
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
CONF.import_opt('allowed_rpc_exception_modules',
diff --git a/nova/tests/cells/test_cells_rpc_driver.py b/nova/tests/cells/test_cells_rpc_driver.py
index a44fe9376..e5e847c12 100644
--- a/nova/tests/cells/test_cells_rpc_driver.py
+++ b/nova/tests/cells/test_cells_rpc_driver.py
@@ -16,10 +16,11 @@
Tests For Cells RPC Communication Driver
"""
+from oslo.config import cfg
+
from nova.cells import messaging
from nova.cells import rpc_driver
from nova import context
-from nova.openstack.common import cfg
from nova.openstack.common import rpc
from nova.openstack.common.rpc import dispatcher as rpc_dispatcher
from nova import test
diff --git a/nova/tests/cells/test_cells_rpcapi.py b/nova/tests/cells/test_cells_rpcapi.py
index d19ce5b2b..f00b1b290 100644
--- a/nova/tests/cells/test_cells_rpcapi.py
+++ b/nova/tests/cells/test_cells_rpcapi.py
@@ -16,8 +16,9 @@
Tests For Cells RPCAPI
"""
+from oslo.config import cfg
+
from nova.cells import rpcapi as cells_rpcapi
-from nova.openstack.common import cfg
from nova.openstack.common import rpc
from nova import test
diff --git a/nova/tests/cells/test_cells_scheduler.py b/nova/tests/cells/test_cells_scheduler.py
index 15b2571b5..c9e626385 100644
--- a/nova/tests/cells/test_cells_scheduler.py
+++ b/nova/tests/cells/test_cells_scheduler.py
@@ -17,11 +17,12 @@ Tests For CellsScheduler
"""
import time
+from oslo.config import cfg
+
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import uuidutils
from nova import test
from nova.tests.cells import fakes
@@ -78,7 +79,8 @@ class CellsSchedulerTestCase(test.TestCase):
for instance_uuid in self.instance_uuids:
instance = db.instance_get_by_uuid(self.ctxt, instance_uuid)
self.assertEqual('meow', instance['hostname'])
- self.assertEqual('moo', instance['display_name'])
+ self.assertEqual('moo-%s' % instance['uuid'],
+ instance['display_name'])
self.assertEqual('fake_image_ref', instance['image_ref'])
def test_run_instance_selects_child_cell(self):
diff --git a/nova/tests/cert/test_rpcapi.py b/nova/tests/cert/test_rpcapi.py
index dce325354..b743ca1e5 100644
--- a/nova/tests/cert/test_rpcapi.py
+++ b/nova/tests/cert/test_rpcapi.py
@@ -18,9 +18,10 @@
Unit Tests for nova.cert.rpcapi
"""
+from oslo.config import cfg
+
from nova.cert import rpcapi as cert_rpcapi
from nova import context
-from nova.openstack.common import cfg
from nova.openstack.common import rpc
from nova import test
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index 6c58fffe0..e19470db5 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -27,6 +27,7 @@ import traceback
import uuid
import mox
+from oslo.config import cfg
import nova
from nova import compute
@@ -45,7 +46,7 @@ from nova import exception
from nova.image import glance
from nova.network import api as network_api
from nova.network import model as network_model
-from nova.openstack.common import cfg
+from nova.network.security_group import openstack_driver
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
@@ -62,13 +63,13 @@ from nova.tests.compute import fake_resource_tracker
from nova.tests.db import fakes as db_fakes
from nova.tests import fake_instance_actions
from nova.tests import fake_network
+from nova.tests import fake_network_cache_model
from nova.tests.image import fake as fake_image
from nova.tests import matchers
from nova import utils
from nova.virt import fake
from nova.volume import cinder
-
QUOTAS = quota.QUOTAS
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@@ -1661,7 +1662,8 @@ class ComputeTestCase(BaseTestCase):
mox.IgnoreArg(),
requested_networks=None,
vpn=False, macs=macs,
- conductor_api=self.compute.conductor_api).AndReturn(
+ conductor_api=self.compute.conductor_api,
+ security_groups=[]).AndReturn(
fake_network.fake_get_instance_nw_info(self.stubs, 1, 1,
spectacular=True))
self.mox.StubOutWithMock(self.compute.driver, "macs_for_instance")
@@ -1680,7 +1682,8 @@ class ComputeTestCase(BaseTestCase):
mox.IgnoreArg(),
requested_networks=None,
vpn=False, macs=None,
- conductor_api=self.compute.conductor_api
+ conductor_api=self.compute.conductor_api,
+ security_groups=[]
).AndRaise(rpc_common.RemoteError())
fake_network.unset_stub_network_methods(self.stubs)
@@ -1786,13 +1789,11 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(instance))
- def _test_state_revert(self, operation, pre_task_state,
+ def _test_state_revert(self, instance, operation, pre_task_state,
post_task_state=None, kwargs=None):
if kwargs is None:
kwargs = {}
- instance = self._create_fake_instance()
-
# The API would have set task_state, so do that here to test
# that the state gets reverted on failure
db.instance_update(self.context, instance['uuid'],
@@ -1862,8 +1863,9 @@ class ComputeTestCase(BaseTestCase):
("resume_instance", task_states.RESUMING),
]
+ instance = self._create_fake_instance()
for operation in actions:
- self._test_state_revert(*operation)
+ self._test_state_revert(instance, *operation)
def _ensure_quota_reservations_committed(self):
"""Mock up commit of quota reservations."""
@@ -3551,7 +3553,7 @@ class ComputeTestCase(BaseTestCase):
instance).AndReturn(power_state.SHUTDOWN)
self.compute.driver.plug_vifs(instance, mox.IgnoreArg())
self.compute._get_instance_volume_block_device_info(mox.IgnoreArg(),
- instance['uuid']).AndReturn('fake-bdm')
+ instance).AndReturn('fake-bdm')
self.compute.driver.resume_state_on_host_boot(mox.IgnoreArg(),
instance, mox.IgnoreArg(),
'fake-bdm').AndRaise(test.TestingException)
@@ -3649,7 +3651,9 @@ class ComputeAPITestCase(BaseTestCase):
super(ComputeAPITestCase, self).setUp()
self.stubs.Set(network_api.API, 'get_instance_nw_info',
fake_get_nw_info)
- self.security_group_api = compute_api.SecurityGroupAPI()
+ self.security_group_api = (
+ openstack_driver.get_openstack_security_group_driver())
+
self.compute_api = compute.API(
security_group_api=self.security_group_api)
self.fake_image = {
@@ -4986,6 +4990,7 @@ class ComputeAPITestCase(BaseTestCase):
orig_instance_type)
self.assertEqual(request_spec['instance_uuids'],
[instance['uuid']])
+ self.assertEqual(FAKE_IMAGE_REF, request_spec['image']['id'])
self.assertEqual(instance_properties['uuid'], instance['uuid'])
self.assertEqual(instance_properties['host'], 'host2')
# Ensure the instance passed to us has been updated with
@@ -5636,6 +5641,34 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, refs[0]['uuid'])
+ def test_multi_instance_display_name_template(self):
+ self.flags(multi_instance_display_name_template='%(name)s')
+ (refs, resv_id) = self.compute_api.create(self.context,
+ instance_types.get_default_instance_type(), None,
+ min_count=2, max_count=2, display_name='x')
+ self.assertEqual(refs[0]['display_name'], 'x')
+ self.assertEqual(refs[0]['hostname'], 'x')
+ self.assertEqual(refs[1]['display_name'], 'x')
+ self.assertEqual(refs[1]['hostname'], 'x')
+
+ self.flags(multi_instance_display_name_template='%(name)s-%(count)s')
+ (refs, resv_id) = self.compute_api.create(self.context,
+ instance_types.get_default_instance_type(), None,
+ min_count=2, max_count=2, display_name='x')
+ self.assertEqual(refs[0]['display_name'], 'x-1')
+ self.assertEqual(refs[0]['hostname'], 'x-1')
+ self.assertEqual(refs[1]['display_name'], 'x-2')
+ self.assertEqual(refs[1]['hostname'], 'x-2')
+
+ self.flags(multi_instance_display_name_template='%(name)s-%(uuid)s')
+ (refs, resv_id) = self.compute_api.create(self.context,
+ instance_types.get_default_instance_type(), None,
+ min_count=2, max_count=2, display_name='x')
+ self.assertEqual(refs[0]['display_name'], 'x-%s' % refs[0]['uuid'])
+ self.assertEqual(refs[0]['hostname'], 'x-%s' % refs[0]['uuid'])
+ self.assertEqual(refs[1]['display_name'], 'x-%s' % refs[1]['uuid'])
+ self.assertEqual(refs[1]['hostname'], 'x-%s' % refs[1]['uuid'])
+
def test_instance_architecture(self):
# Test the instance architecture.
i_ref = self._create_fake_instance()
@@ -5827,6 +5860,41 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
+ def test_attach_interface(self):
+ instance = {
+ 'image_ref': 'foo',
+ }
+ self.mox.StubOutWithMock(compute_manager, '_get_image_meta')
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'allocate_port_for_instance')
+ nwinfo = network_model.NetworkInfo()
+ nwinfo.append(fake_network_cache_model.new_vif())
+ network_id = nwinfo[0]['network']['id']
+ port_id = nwinfo[0]['id']
+ req_ip = '1.2.3.4'
+ self.compute.network_api.allocate_port_for_instance(
+ self.context, instance, port_id, network_id, req_ip,
+ self.compute.conductor_api).AndReturn(nwinfo)
+ compute_manager._get_image_meta(self.context, instance['image_ref'])
+ self.mox.ReplayAll()
+ network, mapping = self.compute.attach_interface(self.context,
+ instance,
+ network_id,
+ port_id,
+ req_ip)
+ self.assertEqual(network['id'], network_id)
+ return nwinfo, port_id
+
+ def test_detach_interface(self):
+ nwinfo, port_id = self.test_attach_interface()
+ self.stubs.Set(self.compute.network_api, 'get_instance_nw_info',
+ lambda *a, **k: nwinfo)
+ self.stubs.Set(self.compute.network_api,
+ 'deallocate_port_for_instance',
+ lambda a, b, c, d: [])
+ self.compute.detach_interface(self.context, {}, port_id)
+ self.assertEqual(self.compute.driver._interfaces, {})
+
def test_attach_volume(self):
# Ensure instance can be soft rebooted.
diff --git a/nova/tests/compute/test_compute_utils.py b/nova/tests/compute/test_compute_utils.py
index 9b865014c..0f96e657d 100644
--- a/nova/tests/compute/test_compute_utils.py
+++ b/nova/tests/compute/test_compute_utils.py
@@ -19,6 +19,8 @@
import string
+from oslo.config import cfg
+
from nova.compute import instance_types
from nova.compute import utils as compute_utils
from nova import context
@@ -26,7 +28,6 @@ from nova import db
from nova import exception
from nova.image import glance
from nova.network import api as network_api
-from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier_api
diff --git a/nova/tests/compute/test_multiple_nodes.py b/nova/tests/compute/test_multiple_nodes.py
index 27ee7aaba..1a967c5e4 100644
--- a/nova/tests/compute/test_multiple_nodes.py
+++ b/nova/tests/compute/test_multiple_nodes.py
@@ -16,9 +16,10 @@
# under the License.
"""Tests for compute service with multiple compute nodes."""
+from oslo.config import cfg
+
from nova import context
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova import test
from nova.virt import fake
diff --git a/nova/tests/compute/test_resource_tracker.py b/nova/tests/compute/test_resource_tracker.py
index f1d0d1f0c..84dbc69fc 100644
--- a/nova/tests/compute/test_resource_tracker.py
+++ b/nova/tests/compute/test_resource_tracker.py
@@ -19,13 +19,14 @@
import uuid
+from oslo.config import cfg
+
from nova.compute import instance_types
from nova.compute import resource_tracker
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import test
diff --git a/nova/tests/compute/test_rpcapi.py b/nova/tests/compute/test_rpcapi.py
index b81e049bf..a78a13883 100644
--- a/nova/tests/compute/test_rpcapi.py
+++ b/nova/tests/compute/test_rpcapi.py
@@ -18,10 +18,11 @@
Unit Tests for nova.compute.rpcapi
"""
+from oslo.config import cfg
+
from nova.compute import rpcapi as compute_rpcapi
from nova import context
from nova import db
-from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import rpc
from nova import test
diff --git a/nova/tests/conf_fixture.py b/nova/tests/conf_fixture.py
index 230f70a1b..e2ea18f98 100644
--- a/nova/tests/conf_fixture.py
+++ b/nova/tests/conf_fixture.py
@@ -17,10 +17,10 @@
# under the License.
import fixtures
+from oslo.config import cfg
from nova import config
from nova import ipv6
-from nova.openstack.common import cfg
from nova import paths
from nova.tests import utils
diff --git a/nova/tests/console/test_console.py b/nova/tests/console/test_console.py
index 8c2e603aa..0f95f3d95 100644
--- a/nova/tests/console/test_console.py
+++ b/nova/tests/console/test_console.py
@@ -18,12 +18,13 @@
"""Tests For Console proxy."""
+from oslo.config import cfg
+
from nova.console import api as console_api
from nova.console import rpcapi as console_rpcapi
from nova import context
from nova import db
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import rpc
from nova import test
diff --git a/nova/tests/console/test_rpcapi.py b/nova/tests/console/test_rpcapi.py
index 8d9d0514f..298c59c4e 100644
--- a/nova/tests/console/test_rpcapi.py
+++ b/nova/tests/console/test_rpcapi.py
@@ -18,9 +18,10 @@
Unit Tests for nova.console.rpcapi
"""
+from oslo.config import cfg
+
from nova.console import rpcapi as console_rpcapi
from nova import context
-from nova.openstack.common import cfg
from nova.openstack.common import rpc
from nova import test
diff --git a/nova/tests/consoleauth/test_rpcapi.py b/nova/tests/consoleauth/test_rpcapi.py
index 264c4e10b..15af5fdcf 100644
--- a/nova/tests/consoleauth/test_rpcapi.py
+++ b/nova/tests/consoleauth/test_rpcapi.py
@@ -18,9 +18,10 @@
Unit Tests for nova.consoleauth.rpcapi
"""
+from oslo.config import cfg
+
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import context
-from nova.openstack.common import cfg
from nova.openstack.common import rpc
from nova import test
diff --git a/nova/tests/db/fakes.py b/nova/tests/db/fakes.py
index b14f248e6..d77e189df 100644
--- a/nova/tests/db/fakes.py
+++ b/nova/tests/db/fakes.py
@@ -421,13 +421,6 @@ def stub_out_db_instance_api(stubs, injected=True):
return inst_type
return None
- def fake_network_get_by_instance(context, instance_id):
- # Even instance numbers are on vlan networks
- if instance_id % 2 == 0:
- return FakeModel(vlan_network_fields)
- else:
- return FakeModel(flat_network_fields)
-
def fake_network_get_all_by_instance(context, instance_id):
# Even instance numbers are on vlan networks
if instance_id % 2 == 0:
@@ -438,8 +431,7 @@ def stub_out_db_instance_api(stubs, injected=True):
def fake_fixed_ip_get_by_instance(context, instance_id):
return [FakeModel(fixed_ip_fields)]
- funcs = [fake_network_get_by_instance,
- fake_network_get_all_by_instance,
+ funcs = [fake_network_get_all_by_instance,
fake_instance_type_get_all,
fake_instance_type_get_by_name,
fake_instance_type_get,
diff --git a/nova/tests/fake_imagebackend.py b/nova/tests/fake_imagebackend.py
index c284a5042..48426505e 100644
--- a/nova/tests/fake_imagebackend.py
+++ b/nova/tests/fake_imagebackend.py
@@ -41,7 +41,7 @@ class Backend(object):
pass
def libvirt_info(self, disk_bus, disk_dev, device_type,
- cache_mode):
+ cache_mode, extra_specs):
info = config.LibvirtConfigGuestDisk()
info.source_type = 'file'
info.source_device = device_type
diff --git a/nova/tests/fake_libvirt_utils.py b/nova/tests/fake_libvirt_utils.py
index 285a4b7e3..31b1af231 100644
--- a/nova/tests/fake_libvirt_utils.py
+++ b/nova/tests/fake_libvirt_utils.py
@@ -17,7 +17,9 @@
import os
import StringIO
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
+from nova.virt.libvirt import utils as libvirt_utils
CONF = cfg.CONF
@@ -34,6 +36,60 @@ def get_iscsi_initiator():
return "fake.initiator.iqn"
+def get_fc_hbas():
+ return [{'ClassDevice': 'host1',
+ 'ClassDevicePath': '/sys/devices/pci0000:00/0000:00:03.0'
+ '/0000:05:00.2/host1/fc_host/host1',
+ 'dev_loss_tmo': '30',
+ 'fabric_name': '0x1000000533f55566',
+ 'issue_lip': '<store method only>',
+ 'max_npiv_vports': '255',
+ 'maxframe_size': '2048 bytes',
+ 'node_name': '0x200010604b019419',
+ 'npiv_vports_inuse': '0',
+ 'port_id': '0x680409',
+ 'port_name': '0x100010604b019419',
+ 'port_state': 'Online',
+ 'port_type': 'NPort (fabric via point-to-point)',
+ 'speed': '10 Gbit',
+ 'supported_classes': 'Class 3',
+ 'supported_speeds': '10 Gbit',
+ 'symbolic_name': 'Emulex 554M FV4.0.493.0 DV8.3.27',
+ 'tgtid_bind_type': 'wwpn (World Wide Port Name)',
+ 'uevent': None,
+ 'vport_create': '<store method only>',
+ 'vport_delete': '<store method only>'}]
+
+
+def get_fc_hbas_info():
+ hbas = get_fc_hbas()
+ info = [{'port_name': hbas[0]['port_name'].replace('0x', ''),
+ 'node_name': hbas[0]['node_name'].replace('0x', ''),
+ 'host_device': hbas[0]['ClassDevice'],
+ 'device_path': hbas[0]['ClassDevicePath']}]
+ return info
+
+
+def get_fc_wwpns():
+ hbas = get_fc_hbas()
+ wwpns = []
+ for hba in hbas:
+ wwpn = hba['port_name'].replace('0x', '')
+ wwpns.append(wwpn)
+
+ return wwpns
+
+
+def get_fc_wwnns():
+ hbas = get_fc_hbas()
+ wwnns = []
+ for hba in hbas:
+ wwnn = hba['node_name'].replace('0x', '')
+ wwnns.append(wwnn)
+
+ return wwnns
+
+
def create_image(disk_format, path, size):
pass
@@ -141,9 +197,8 @@ def fetch_image(context, target, image_id, user_id, project_id):
pass
-def get_instance_path(instance):
- # TODO(mikal): we should really just call the real one here
- return os.path.join(CONF.instances_path, instance['name'])
+def get_instance_path(instance, forceold=False):
+ return libvirt_utils.get_instance_path(instance, forceold=forceold)
def pick_disk_driver_name(is_block_dev=False):
diff --git a/nova/tests/fake_network.py b/nova/tests/fake_network.py
index f384df272..42ce71ded 100644
--- a/nova/tests/fake_network.py
+++ b/nova/tests/fake_network.py
@@ -15,6 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo.config import cfg
+
from nova.compute import api as compute_api
from nova.compute import manager as compute_manager
import nova.context
@@ -25,7 +27,6 @@ from nova.network import manager as network_manager
from nova.network import model as network_model
from nova.network import nova_ipam_lib
from nova.network import rpcapi as network_rpcapi
-from nova.openstack.common import cfg
from nova.virt.libvirt import config as libvirt_config
diff --git a/nova/tests/fake_policy.py b/nova/tests/fake_policy.py
index 3878df531..7f9fffddc 100644
--- a/nova/tests/fake_policy.py
+++ b/nova/tests/fake_policy.py
@@ -105,6 +105,7 @@ policy_data = """
"compute_extension:admin_actions:migrate": "",
"compute_extension:aggregates": "",
"compute_extension:agents": "",
+ "compute_extension:attach_interfaces": "",
"compute_extension:baremetal_nodes": "",
"compute_extension:cells": "",
"compute_extension:certificates": "",
@@ -143,6 +144,7 @@ policy_data = """
"compute_extension:hide_server_addresses": "",
"compute_extension:hosts": "",
"compute_extension:hypervisors": "",
+ "compute_extension:image_size": "",
"compute_extension:instance_actions": "",
"compute_extension:instance_actions:events": "is_admin:True",
"compute_extension:instance_usage_audit_log": "",
@@ -156,6 +158,7 @@ policy_data = """
"compute_extension:quotas:update": "",
"compute_extension:quota_classes": "",
"compute_extension:rescue": "",
+ "compute_extension:security_group_default_rules": "",
"compute_extension:security_groups": "",
"compute_extension:server_diagnostics": "",
"compute_extension:server_password": "",
diff --git a/nova/tests/fake_volume.py b/nova/tests/fake_volume.py
index 0d8a502a5..607f1444d 100644
--- a/nova/tests/fake_volume.py
+++ b/nova/tests/fake_volume.py
@@ -16,8 +16,9 @@
import uuid
+from oslo.config import cfg
+
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
diff --git a/nova/tests/fakelibvirt.py b/nova/tests/fakelibvirt.py
index 6abe7771c..69a4e677e 100644
--- a/nova/tests/fakelibvirt.py
+++ b/nova/tests/fakelibvirt.py
@@ -70,6 +70,17 @@ VIR_DOMAIN_CRASHED = 6
VIR_DOMAIN_XML_SECURE = 1
+VIR_DOMAIN_EVENT_ID_LIFECYCLE = 0
+
+VIR_DOMAIN_EVENT_DEFINED = 0
+VIR_DOMAIN_EVENT_UNDEFINED = 1
+VIR_DOMAIN_EVENT_STARTED = 2
+VIR_DOMAIN_EVENT_SUSPENDED = 3
+VIR_DOMAIN_EVENT_RESUMED = 4
+VIR_DOMAIN_EVENT_STOPPED = 5
+VIR_DOMAIN_EVENT_SHUTDOWN = 6
+VIR_DOMAIN_EVENT_PMSUSPENDED = 7
+
VIR_DOMAIN_UNDEFINE_MANAGED_SAVE = 1
VIR_DOMAIN_AFFECT_CURRENT = 0
@@ -506,6 +517,7 @@ class Connection(object):
self._running_vms = {}
self._id_counter = 1 # libvirt reserves 0 for the hypervisor.
self._nwfilters = {}
+ self._event_callbacks = {}
self.fakeLibVersion = version
self.fakeVersion = version
@@ -517,6 +529,7 @@ class Connection(object):
def _mark_running(self, dom):
self._running_vms[self._id_counter] = dom
+ self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_STARTED, 0)
self._id_counter += 1
def _mark_not_running(self, dom):
@@ -528,10 +541,13 @@ class Connection(object):
for (k, v) in self._running_vms.iteritems():
if v == dom:
del self._running_vms[k]
+ self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_STOPPED, 0)
return
def _undefine(self, dom):
del self._vms[dom.name()]
+ if not dom._transient:
+ self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_UNDEFINED, 0)
def getInfo(self):
return [node_arch,
@@ -563,14 +579,25 @@ class Connection(object):
'name "%s"' % name,
VIR_ERR_NO_DOMAIN, VIR_FROM_QEMU)
+ def _emit_lifecycle(self, dom, event, detail):
+ if VIR_DOMAIN_EVENT_ID_LIFECYCLE not in self._event_callbacks:
+ return
+
+ cbinfo = self._event_callbacks[VIR_DOMAIN_EVENT_ID_LIFECYCLE]
+ callback = cbinfo[0]
+ opaque = cbinfo[1]
+ callback(self, dom, event, detail, opaque)
+
def defineXML(self, xml):
dom = Domain(connection=self, running=False, transient=False, xml=xml)
self._vms[dom.name()] = dom
+ self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_DEFINED, 0)
return dom
def createXML(self, xml, flags):
dom = Domain(connection=self, running=True, transient=True, xml=xml)
self._vms[dom.name()] = dom
+ self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_STARTED, 0)
return dom
def getType(self):
@@ -586,6 +613,9 @@ class Connection(object):
def getHostname(self):
return 'compute1'
+ def domainEventRegisterAny(self, dom, eventid, callback, opaque):
+ self._event_callbacks[eventid] = [callback, opaque]
+
def getCapabilities(self):
return '''<capabilities>
<host>
@@ -875,6 +905,14 @@ def openAuth(uri, auth, flags):
return Connection(uri, readonly=False)
+def virEventRunDefaultImpl():
+ time.sleep(1)
+
+
+def virEventRegisterDefaultImpl():
+ pass
+
+
virDomain = Domain
diff --git a/nova/tests/hyperv/db_fakes.py b/nova/tests/hyperv/db_fakes.py
index e384e909a..7169edf8d 100644
--- a/nova/tests/hyperv/db_fakes.py
+++ b/nova/tests/hyperv/db_fakes.py
@@ -153,18 +153,6 @@ def stub_out_db_instance_api(stubs):
}
return FakeModel(base_options)
- def fake_network_get_by_instance(context, instance_id):
- """Stubs out the db.network_get_by_instance method."""
-
- fields = {
- 'bridge': 'vmnet0',
- 'netmask': '255.255.255.0',
- 'gateway': '10.10.10.1',
- 'broadcast': '10.10.10.255',
- 'dns1': 'fake',
- 'vlan': 100}
- return FakeModel(fields)
-
def fake_instance_type_get_all(context, inactive=0, filters=None):
return INSTANCE_TYPES.values()
@@ -175,7 +163,6 @@ def stub_out_db_instance_api(stubs):
return {}
stubs.Set(db, 'instance_create', fake_instance_create)
- stubs.Set(db, 'network_get_by_instance', fake_network_get_by_instance)
stubs.Set(db, 'instance_type_get_all', fake_instance_type_get_all)
stubs.Set(db, 'instance_type_get_by_name', fake_instance_type_get_by_name)
stubs.Set(db, 'block_device_mapping_get_all_by_instance',
diff --git a/nova/tests/image/fake.py b/nova/tests/image/fake.py
index 78cd667e4..06f2f5147 100644
--- a/nova/tests/image/fake.py
+++ b/nova/tests/image/fake.py
@@ -22,12 +22,12 @@ import copy
import datetime
import uuid
+from oslo.config import cfg
+
from nova import exception
import nova.image.glance
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
-
CONF = cfg.CONF
CONF.import_opt('null_kernel', 'nova.compute.api')
LOG = logging.getLogger(__name__)
@@ -52,6 +52,7 @@ class _FakeImageService(object):
'is_public': False,
'container_format': 'raw',
'disk_format': 'raw',
+ 'size': '25165824',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel,
'architecture': 'x86_64'}}
@@ -66,6 +67,7 @@ class _FakeImageService(object):
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
+ 'size': '58145823',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel}}
@@ -79,6 +81,7 @@ class _FakeImageService(object):
'is_public': True,
'container_format': None,
'disk_format': None,
+ 'size': '83594576',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel}}
@@ -92,6 +95,7 @@ class _FakeImageService(object):
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
+ 'size': '84035174',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel}}
@@ -105,6 +109,7 @@ class _FakeImageService(object):
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
+ 'size': '26360814',
'properties': {'kernel_id':
'155d900f-4e14-4e4c-a73d-069cbf4541e6',
'ramdisk_id': None}}
@@ -119,6 +124,7 @@ class _FakeImageService(object):
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
+ 'size': '49163826',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel,
'architecture': 'x86_64',
@@ -134,6 +140,7 @@ class _FakeImageService(object):
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
+ 'size': '74185822',
'properties': {'kernel_id': CONF.null_kernel,
'ramdisk_id': CONF.null_kernel,
'architecture': 'x86_64',
diff --git a/nova/tests/image/test_fake.py b/nova/tests/image/test_fake.py
index 614201b67..c63bb5389 100644
--- a/nova/tests/image/test_fake.py
+++ b/nova/tests/image/test_fake.py
@@ -41,7 +41,8 @@ class FakeImageServiceTestCase(test.TestCase):
self.assertEquals(keys, set(['id', 'name', 'created_at',
'updated_at', 'deleted_at', 'deleted',
'status', 'is_public', 'properties',
- 'disk_format', 'container_format']))
+ 'disk_format', 'container_format',
+ 'size']))
self.assertTrue(isinstance(image['created_at'], datetime.datetime))
self.assertTrue(isinstance(image['updated_at'], datetime.datetime))
diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py
index fb26fa4f1..a7ed4c409 100644
--- a/nova/tests/image/test_glance.py
+++ b/nova/tests/image/test_glance.py
@@ -24,11 +24,11 @@ import tempfile
import time
import glanceclient.exc
+from oslo.config import cfg
from nova import context
from nova import exception
from nova.image import glance
-from nova.openstack.common import cfg
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests.glance import stubs as glance_stubs
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.json.tpl b/nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.json.tpl
new file mode 100644
index 000000000..f5f470cbc
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.json.tpl
@@ -0,0 +1,34 @@
+{
+ "image": {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "%(image_id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/%(image_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/%(image_id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/%(image_id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "True",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage7",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "%(timestamp)s"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.xml.tpl b/nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.xml.tpl
new file mode 100644
index 000000000..e36ddc76c
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.xml.tpl
@@ -0,0 +1,12 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<image xmlns:OS-EXT-IMG-SIZE="http://docs.openstack.org/compute/ext/image_size/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage7" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" OS-EXT-IMG-SIZE:size="%(int)s" id="%(image_id)s">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="auto_disk_config">True</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/%(image_id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/%(image_id)s" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/%(image_id)s" type="application/vnd.openstack.image" rel="alternate"/>
+</image>
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.json.tpl b/nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.json.tpl
new file mode 100644
index 000000000..a29172bf4
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.json.tpl
@@ -0,0 +1,219 @@
+{
+ "images": [
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "True",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage7",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "a2459075-d96c-40d5-893e-577ff92e721c",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "a440c04b-79fa-479c-bed1-0b816eaec379",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "False",
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage6",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "ramdisk_id": null
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "cedef40a-ed67-4d10-800e-17455edce175",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "bookmark"
+ },
+ {
+ "href": "%(glance_host)s/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "kernel_id": "nokernel",
+ "ramdisk_id": "nokernel"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ }
+ ]
+}
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.xml.tpl b/nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.xml.tpl
new file mode 100644
index 000000000..4c1e4c4be
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.xml.tpl
@@ -0,0 +1,71 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<images xmlns:OS-EXT-IMG-SIZE="http://docs.openstack.org/compute/ext/image_size/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <image xmlns:OS-EXT-IMG-SIZE="http://docs.openstack.org/compute/ext/image_size/api/v1.1" status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage7" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" OS-EXT-IMG-SIZE:size="%(int)s" id="70a599e0-31e7-49b7-b260-868f441e862b">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="auto_disk_config">True</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" OS-EXT-IMG-SIZE:size="%(int)s" id="155d900f-4e14-4e4c-a73d-069cbf4541e6">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/155d900f-4e14-4e4c-a73d-069cbf4541e6" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" OS-EXT-IMG-SIZE:size="%(int)s" id="a2459075-d96c-40d5-893e-577ff92e721c">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/a2459075-d96c-40d5-893e-577ff92e721c" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage6" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" OS-EXT-IMG-SIZE:size="%(int)s" id="a440c04b-79fa-479c-bed1-0b816eaec379">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="auto_disk_config">False</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ <meta key="architecture">x86_64</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/a440c04b-79fa-479c-bed1-0b816eaec379" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" OS-EXT-IMG-SIZE:size="%(int)s" id="c905cedb-7281-47e4-8a62-f26bc5fc4c77">
+ <metadata>
+ <meta key="kernel_id">155d900f-4e14-4e4c-a73d-069cbf4541e6</meta>
+ <meta key="ramdisk_id">None</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" OS-EXT-IMG-SIZE:size="%(int)s" id="cedef40a-ed67-4d10-800e-17455edce175">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/cedef40a-ed67-4d10-800e-17455edce175" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/cedef40a-ed67-4d10-800e-17455edce175" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/cedef40a-ed67-4d10-800e-17455edce175" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+ <image status="ACTIVE" updated="2011-01-01T01:02:03Z" name="fakeimage123456" created="2011-01-01T01:02:03Z" minDisk="0" progress="100" minRam="0" OS-EXT-IMG-SIZE:size="%(int)s" id="76fa36fc-c930-4bf3-8c8a-ea2a2420deb6">
+ <metadata>
+ <meta key="kernel_id">nokernel</meta>
+ <meta key="ramdisk_id">nokernel</meta>
+ </metadata>
+ <atom:link href="%(host)s/v2/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6" rel="self"/>
+ <atom:link href="%(host)s/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6" rel="bookmark"/>
+ <atom:link href="%(glance_host)s/openstack/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6" type="application/vnd.openstack.image" rel="alternate"/>
+ </image>
+</images>
diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
index 17914de42..1faf7f480 100644
--- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
@@ -33,6 +33,14 @@
"updated": "%(timestamp)s"
},
{
+ "alias": "OS-EXT-IMG-SIZE",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ImageSize",
+ "namespace": "http://docs.openstack.org/compute/ext/image_size/api/v1.1",
+ "updated": "%(timestamp)s"
+ },
+ {
"alias": "OS-EXT-SRV-ATTR",
"description": "%(text)s",
"links": [],
@@ -97,6 +105,14 @@
"updated": "%(timestamp)s"
},
{
+ "alias": "os-attach-interfaces",
+ "description": "Attach interface support.",
+ "links": [],
+ "name": "AttachInterfaces",
+ "namespace": "http://docs.openstack.org/compute/ext/interfaces/api/v1.1",
+ "updated": "2012-07-22T00:00:00+00:00"
+ },
+ {
"alias": "os-availability-zone",
"description": "%(text)s",
"links": [],
@@ -393,6 +409,14 @@
"updated": "%(timestamp)s"
},
{
+ "alias": "os-security-group-default-rules",
+ "description": "%(text)s",
+ "links": [],
+ "name": "SecurityGroupDefaultRules",
+ "namespace": "http://docs.openstack.org/compute/ext/securitygroupdefaultrules/api/v1.1",
+ "updated": "%(timestamp)s"
+ },
+ {
"alias": "os-security-groups",
"description": "%(text)s",
"links": [],
diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
index 4492ed3aa..0aefc123f 100644
--- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
@@ -12,6 +12,9 @@
<extension alias="OS-EXT-IPS" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/extended_ips/api/v1.1" name="ExtendedIps">
<description>%(text)s</description>
</extension>
+ <extension alias="OS-EXT-IMG-SIZE" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/image_size/api/v1.1" name="ImageSize">
+ <description>%(text)s</description>
+ </extension>
<extension alias="OS-EXT-SRV-ATTR" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" name="ExtendedServerAttributes">
<description>%(text)s</description>
</extension>
@@ -33,6 +36,9 @@
<extension alias="os-aggregates" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/aggregates/api/v1.1" name="Aggregates">
<description>%(text)s</description>
</extension>
+ <extension alias="os-attach-interfaces" updated="2012-07-22T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/interfaces/api/v1.1" name="AttachInterfaces">
+ <description>Attach interface support.</description>
+ </extension>
<extension alias="os-availability-zone" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/availabilityzone/api/v1.1" name="AvailabilityZone">
<description>%(text)s</description>
</extension>
@@ -147,6 +153,9 @@
<extension alias="os-rescue" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/rescue/api/v1.1" name="Rescue">
<description>%(text)s</description>
</extension>
+ <extension alias="os-security-group-default-rules" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/securitygroupdefaultrules/api/v1.1" name="SecurityGroupDefaultRules">
+ <description>%(text)s</description>
+ </extension>
<extension alias="os-security-groups" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/securitygroups/api/v1.1" name="SecurityGroups">
<description>%(text)s</description>
</extension>
diff --git a/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tpl b/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tpl
new file mode 100644
index 000000000..11dcf6437
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "interfaceAttachment": {
+ "port_id": "ce531f90-199f-48c0-816c-13e38010b442"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-req.xml.tpl b/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-req.xml.tpl
new file mode 100644
index 000000000..75e9b97c8
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-req.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<interfaceAttachment>
+ <port_id>%(port_id)s</port_id>
+</interfaceAttachment>
diff --git a/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tpl b/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tpl
new file mode 100644
index 000000000..d882cdc61
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tpl
@@ -0,0 +1,12 @@
+{
+ "interfaceAttachment": {
+ "fixed_ips": [{
+ "subnet_id": "%(subnet_id)s",
+ "ip_address": "%(ip_address)s"
+ }],
+ "mac_addr": "fa:16:3e:4c:2c:30",
+ "net_id": "%(net_id)s",
+ "port_id": "%(port_id)s",
+ "port_state": "%(port_state)s"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-resp.xml.tpl b/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-resp.xml.tpl
new file mode 100644
index 000000000..b391e5973
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-resp.xml.tpl
@@ -0,0 +1,12 @@
+<interfaceAttachment>
+ <net_id>%(net_id)s</net_id>
+ <port_id>%(port_id)s</port_id>
+ <fixed_ips>
+ <fixed_ip>
+ <subnet_id>%(subnet_id)s</subnet_id>
+ <ip_address>%(ip_address)s</ip_address>
+ </fixed_ip>
+ </fixed_ips>
+ <port_state>%(port_state)s</port_state>
+ <mac_addr>%(mac_addr)s</mac_addr>
+</interfaceAttachment>
diff --git a/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tpl b/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tpl
new file mode 100644
index 000000000..47dcf2dc6
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "interfaceAttachments": [
+ {
+ "port_state": "%(port_state)s",
+ "fixed_ips": [
+ {
+ "subnet_id": "%(subnet_id)s",
+ "ip_address": "%(ip_address)s"
+ }
+ ],
+ "net_id": "%(net_id)s",
+ "port_id": "%(port_id)s",
+ "mac_addr": "%(mac_addr)s"
+ }
+ ]
+}
diff --git a/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-list-resp.xml.tpl b/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-list-resp.xml.tpl
new file mode 100644
index 000000000..f3262e948
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-list-resp.xml.tpl
@@ -0,0 +1,15 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<interfaceAttachments>
+ <interfaceAttachment>
+ <port_state>%(port_state)s</port_state>
+ <fixed_ips>
+ <fixed_ip>
+ <subnet_id>%(subnet_id)s</subnet_id>
+ <ip_address>%(ip_address)s</ip_address>
+ </fixed_ip>
+ </fixed_ips>
+ <port_id>%(port_id)s</port_id>
+ <net_id>%(net_id)s</net_id>
+ <mac_addr>%(mac_addr)s</mac_addr>
+ </interfaceAttachment>
+</interfaceAttachments>
diff --git a/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tpl b/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tpl
new file mode 100644
index 000000000..3333bb499
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tpl
@@ -0,0 +1,14 @@
+{
+ "interfaceAttachment": {
+ "port_state": "%(port_state)s",
+ "fixed_ips": [
+ {
+ "subnet_id": "%(subnet_id)s",
+ "ip_address": "%(ip_address)s"
+ }
+ ],
+ "net_id": "%(net_id)s",
+ "port_id": "%(port_id)s",
+ "mac_addr": "%(mac_addr)s"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-show-resp.xml.tpl b/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-show-resp.xml.tpl
new file mode 100644
index 000000000..a3393448d
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-show-resp.xml.tpl
@@ -0,0 +1,13 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<interfaceAttachment>
+ <port_state>%(port_state)s</port_state>
+ <fixed_ips>
+ <fixed_ip>
+ <subnet_id>%(subnet_id)s</subnet_id>
+ <ip_address>%(ip_address)s</ip_address>
+ </fixed_ip>
+ </fixed_ips>
+ <port_id>%(port_id)s</port_id>
+ <net_id>%(net_id)s</net_id>
+ <mac_addr>%(mac_addr)s</mac_addr>
+</interfaceAttachment>
diff --git a/nova/tests/integrated/api_samples/os-attach-interfaces/server-post-req.json.tpl b/nova/tests/integrated/api_samples/os-attach-interfaces/server-post-req.json.tpl
new file mode 100644
index 000000000..d3916d1aa
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-attach-interfaces/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-attach-interfaces/server-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-attach-interfaces/server-post-req.xml.tpl
new file mode 100644
index 000000000..f92614984
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-attach-interfaces/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/integrated/api_samples/os-attach-interfaces/server-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-attach-interfaces/server-post-resp.json.tpl
new file mode 100644
index 000000000..d5f030c87
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-attach-interfaces/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-attach-interfaces/server-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-attach-interfaces/server-post-resp.xml.tpl
new file mode 100644
index 000000000..3bb13e69b
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-attach-interfaces/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl
new file mode 100644
index 000000000..88eb4ad86
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl
@@ -0,0 +1,26 @@
+{
+ "hypervisors": [
+ {
+ "cpu_info": "?",
+ "current_workload": 0,
+ "disk_available_least": null,
+ "free_disk_gb": 1028,
+ "free_ram_mb": 7680,
+ "hypervisor_hostname": "fake-mini",
+ "hypervisor_type": "fake",
+ "hypervisor_version": 1,
+ "id": 1,
+ "local_gb": 1028,
+ "local_gb_used": 0,
+ "memory_mb": 8192,
+ "memory_mb_used": 512,
+ "running_vms": 0,
+ "service": {
+ "host": "%(host_name)s",
+ "id": 2
+ },
+ "vcpus": 1,
+ "vcpus_used": 0
+ }
+ ]
+}
diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.xml.tpl b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.xml.tpl
new file mode 100644
index 000000000..f0c20662d
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<hypervisors>
+ <hypervisor vcpus_used="0" hypervisor_type="fake" local_gb_used="0" hypervisor_hostname="fake-mini" memory_mb_used="512" memory_mb="8192" current_workload="0" vcpus="1" cpu_info="?" running_vms="0" free_disk_gb="1028" hypervisor_version="1" disk_available_least="None" local_gb="1028" free_ram_mb="7680" id="1">
+ <service host="%(host_name)s" id="2"/>
+ </hypervisor>
+</hypervisors>
diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-list-resp.json.tpl b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-list-resp.json.tpl
new file mode 100644
index 000000000..8d9402127
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-list-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "hypervisors": [
+ {
+ "hypervisor_hostname": "fake-mini",
+ "id": 1
+ }
+ ]
+} \ No newline at end of file
diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-list-resp.xml.tpl b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-list-resp.xml.tpl
new file mode 100644
index 000000000..6b7d9d7ca
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-list-resp.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<hypervisors>
+ <hypervisor id="1" hypervisor_hostname="fake-mini"/>
+</hypervisors>
diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-search-resp.json.tpl b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-search-resp.json.tpl
new file mode 100644
index 000000000..8d9402127
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-search-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "hypervisors": [
+ {
+ "hypervisor_hostname": "fake-mini",
+ "id": 1
+ }
+ ]
+} \ No newline at end of file
diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-search-resp.xml.tpl b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-search-resp.xml.tpl
new file mode 100644
index 000000000..6b7d9d7ca
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-search-resp.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<hypervisors>
+ <hypervisor id="1" hypervisor_hostname="fake-mini"/>
+</hypervisors>
diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-servers-resp.json.tpl b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-servers-resp.json.tpl
new file mode 100644
index 000000000..8d9402127
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-servers-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "hypervisors": [
+ {
+ "hypervisor_hostname": "fake-mini",
+ "id": 1
+ }
+ ]
+} \ No newline at end of file
diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-servers-resp.xml.tpl b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-servers-resp.xml.tpl
new file mode 100644
index 000000000..7782732ba
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-servers-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<hypervisors>
+ <hypervisor id="1" hypervisor_hostname="fake-mini">
+ <servers/>
+ </hypervisor>
+</hypervisors>
diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl
new file mode 100644
index 000000000..4eaded8d7
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl
@@ -0,0 +1,24 @@
+{
+ "hypervisor": {
+ "cpu_info": "?",
+ "current_workload": 0,
+ "disk_available_least": null,
+ "free_disk_gb": 1028,
+ "free_ram_mb": 7680,
+ "hypervisor_hostname": "fake-mini",
+ "hypervisor_type": "fake",
+ "hypervisor_version": 1,
+ "id": %(hypervisor_id)s,
+ "local_gb": 1028,
+ "local_gb_used": 0,
+ "memory_mb": 8192,
+ "memory_mb_used": 512,
+ "running_vms": 0,
+ "service": {
+ "host": "%(host_name)s",
+ "id": 2
+ },
+ "vcpus": 1,
+ "vcpus_used": 0
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-show-resp.xml.tpl b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-show-resp.xml.tpl
new file mode 100644
index 000000000..336f23be2
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-show-resp.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<hypervisor vcpus_used="0" hypervisor_type="fake" local_gb_used="0" hypervisor_hostname="fake-mini" memory_mb_used="512" memory_mb="8192" current_workload="0" vcpus="1" cpu_info="?" running_vms="0" free_disk_gb="1028" hypervisor_version="1" disk_available_least="None" local_gb="1028" free_ram_mb="7680" id="%(hypervisor_id)s">
+ <service host="%(host_name)s" id="2"/>
+</hypervisor>
diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-statistics-resp.json.tpl b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-statistics-resp.json.tpl
new file mode 100644
index 000000000..2cfb51e70
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-statistics-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "hypervisor_statistics": {
+ "count": 1,
+ "current_workload": 0,
+ "disk_available_least": 0,
+ "free_disk_gb": 1028,
+ "free_ram_mb": 7680,
+ "local_gb": 1028,
+ "local_gb_used": 0,
+ "memory_mb": 8192,
+ "memory_mb_used": 512,
+ "running_vms": 0,
+ "vcpus": 1,
+ "vcpus_used": 0
+ }
+} \ No newline at end of file
diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-statistics-resp.xml.tpl b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-statistics-resp.xml.tpl
new file mode 100644
index 000000000..5d10411e3
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-statistics-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<hypervisor_statistics count="1" vcpus_used="0" local_gb_used="0" memory_mb="8192" current_workload="0" vcpus="1" running_vms="0" free_disk_gb="1028" disk_available_least="0" local_gb="1028" free_ram_mb="7680" memory_mb_used="512"/> \ No newline at end of file
diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tpl b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tpl
new file mode 100644
index 000000000..8a36c65f2
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tpl
@@ -0,0 +1,7 @@
+{
+ "hypervisor": {
+ "hypervisor_hostname": "fake-mini",
+ "id": %(hypervisor_id)s,
+ "uptime": " 08:32:11 up 93 days, 18:25, 12 users, load average: 0.20, 0.12, 0.14"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-uptime-resp.xml.tpl b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-uptime-resp.xml.tpl
new file mode 100644
index 000000000..04219f5b5
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-uptime-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<hypervisor uptime=" 08:32:11 up 93 days, 18:25, 12 users, load average: 0.20, 0.12, 0.14" id="%(hypervisor_id)s" hypervisor_hostname="fake-mini"/>
diff --git a/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json.tpl b/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json.tpl
new file mode 100644
index 000000000..8836d0eec
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json.tpl
@@ -0,0 +1,8 @@
+{
+ "security_group_default_rule": {
+ "ip_protocol": "TCP",
+ "from_port": "80",
+ "to_port": "80",
+ "cidr": "10.10.10.0/24"
+ }
+} \ No newline at end of file
diff --git a/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.xml.tpl b/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.xml.tpl
new file mode 100644
index 000000000..daee12290
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.xml.tpl
@@ -0,0 +1,7 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<security_group_default_rule>
+ <ip_protocol>TCP</ip_protocol>
+ <from_port>80</from_port>
+ <to_port>80</to_port>
+ <cidr>10.10.10.0/24</cidr>
+</security_group_default_rule> \ No newline at end of file
diff --git a/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json.tpl b/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json.tpl
new file mode 100644
index 000000000..ae6c62bfd
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json.tpl
@@ -0,0 +1,11 @@
+{
+ "security_group_default_rule": {
+ "from_port": 80,
+ "id": 1,
+ "ip_protocol": "TCP",
+ "ip_range":{
+ "cidr": "10.10.10.0/24"
+ },
+ "to_port": 80
+ }
+} \ No newline at end of file
diff --git a/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.xml.tpl b/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.xml.tpl
new file mode 100644
index 000000000..9e700969f
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.xml.tpl
@@ -0,0 +1,9 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<security_group_default_rule xmlns="http://docs.openstack.org/compute/api/v1.1" id="1">
+ <ip_protocol>TCP</ip_protocol>
+ <from_port>80</from_port>
+ <to_port>80</to_port>
+ <ip_range>
+ <cidr>10.10.10.0/24</cidr>
+ </ip_range>
+</security_group_default_rule> \ No newline at end of file
diff --git a/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json.tpl b/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json.tpl
new file mode 100644
index 000000000..c083640c3
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json.tpl
@@ -0,0 +1,13 @@
+{
+ "security_group_default_rules": [
+ {
+ "from_port": 80,
+ "id": 1,
+ "ip_protocol": "TCP",
+ "ip_range": {
+ "cidr": "10.10.10.0/24"
+ },
+ "to_port": 80
+ }
+ ]
+} \ No newline at end of file
diff --git a/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.xml.tpl b/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.xml.tpl
new file mode 100644
index 000000000..f009bf80f
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.xml.tpl
@@ -0,0 +1,11 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<security_group_default_rules xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <security_group_default_rule id="1">
+ <ip_protocol>TCP</ip_protocol>
+ <from_port>80</from_port>
+ <to_port>80</to_port>
+ <ip_range>
+ <cidr>10.10.10.0/24</cidr>
+ </ip_range>
+ </security_group_default_rule>
+</security_group_default_rules> \ No newline at end of file
diff --git a/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json.tpl b/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json.tpl
new file mode 100644
index 000000000..97b5259a1
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json.tpl
@@ -0,0 +1,11 @@
+{
+ "security_group_default_rule": {
+ "id": 1,
+ "from_port": 80,
+ "to_port": 80,
+ "ip_protocol": "TCP",
+ "ip_range": {
+ "cidr": "10.10.10.0/24"
+ }
+ }
+} \ No newline at end of file
diff --git a/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.xml.tpl b/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.xml.tpl
new file mode 100644
index 000000000..9181abd38
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.xml.tpl
@@ -0,0 +1,9 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<security_group_default_rule xmlns="http://docs.openstack.org/compute/api/v1.1" id="1">
+ <from_port>80</from_port>
+ <to_port>80</to_port>
+ <ip_protocol>TCP</ip_protocol>
+ <ip_range>
+ <cidr>10.10.10.0/24</cidr>
+ </ip_range>
+</security_group_default_rule> \ No newline at end of file
diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py
index 90e9a806e..e8fb2b6ba 100644
--- a/nova/tests/integrated/integrated_helpers.py
+++ b/nova/tests/integrated/integrated_helpers.py
@@ -23,9 +23,10 @@ import random
import string
import uuid
+from oslo.config import cfg
+
import nova.image.glance
-from nova.openstack.common import cfg
-from nova.openstack.common.log import logging
+from nova.openstack.common import log as logging
from nova import service
from nova import test
from nova.tests import fake_crypto
diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py
index ae34765d9..db26b07ec 100644
--- a/nova/tests/integrated/test_api_samples.py
+++ b/nova/tests/integrated/test_api_samples.py
@@ -25,21 +25,22 @@ import uuid as uuid_lib
import coverage
from lxml import etree
+from oslo.config import cfg
from nova.api.metadata import password
from nova.api.openstack.compute.contrib import coverage_ext
from nova.api.openstack.compute.contrib import fping
# Import extensions to pull in osapi_compute_extension CONF option used below.
from nova.cloudpipe import pipelib
+from nova.compute import api as compute_api
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova.network import api as network_api
-from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
-from nova.openstack.common.log import logging
+from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
import nova.quota
from nova.scheduler import driver
@@ -266,7 +267,6 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
sample_data = "{}"
else:
sample_data = None
-
try:
response_result = self._verify_something(subs, expected,
response_data)
@@ -325,6 +325,7 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
'glance_host': self._get_glance_host(),
'compute_host': self.compute.host,
'text': text,
+ 'int': '[0-9]+',
}
def _get_response(self, url, method, body=None, strip_version=False):
@@ -383,7 +384,6 @@ class ApiSamplesTrap(ApiSampleTestBase):
# removed) soon.
do_not_approve_additions = []
do_not_approve_additions.append('os-create-server-ext')
- do_not_approve_additions.append('os-hypervisors')
do_not_approve_additions.append('os-volumes')
tests = self._get_extensions_tested()
@@ -1127,7 +1127,39 @@ class SecurityGroupsSampleJsonTest(ServersSampleBase):
subs, response)
-class SecurityGroupsSampleXmlTest(SecurityGroupsSampleJsonTest):
+class SecurityGroupsSampleXmlTest(ApiSampleTestBase):
+ ctype = 'xml'
+
+
+class SecurityGroupDefaultRulesSampleJsonTest(ServersSampleBase):
+ extension_name = ('nova.api.openstack.compute.contrib'
+ '.security_group_default_rules'
+ '.Security_group_default_rules')
+
+ def test_security_group_default_rules_create(self):
+ response = self._do_post('os-security-group-default-rules',
+ 'security-group-default-rules-create-req',
+ {})
+ self.assertEqual(response.status, 200)
+ return self._verify_response(
+ 'security-group-default-rules-create-resp', {}, response)
+
+ def test_security_group_default_rules_list(self):
+ self.test_security_group_default_rules_create()
+ response = self._do_get('os-security-group-default-rules')
+ return self._verify_response('security-group-default-rules-list-resp',
+ {}, response)
+
+ def test_security_group_default_rules_show(self):
+ self.test_security_group_default_rules_create()
+ rule_id = '1'
+ response = self._do_get('os-security-group-default-rules/%s' % rule_id)
+ return self._verify_response('security-group-default-rules-show-resp',
+ {}, response)
+
+
+class SecurityGroupDefaultRulesSampleXmlTest(
+ SecurityGroupDefaultRulesSampleJsonTest):
ctype = 'xml'
@@ -2136,7 +2168,7 @@ class AdminActionsSamplesJsonTest(ServersSampleBase):
def fake_live_migration_dest_check(self, context, instance_ref, dest):
"""Skip live migration scheduler checks."""
- return
+ return dest
def fake_live_migration_common(self, context, instance_ref, dest):
"""Skip live migration scheduler checks."""
@@ -3174,6 +3206,30 @@ class InstanceActionsSampleXmlTest(InstanceActionsSampleJsonTest):
ctype = 'xml'
+class ImageSizeSampleJsonTests(ApiSampleTestBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".image_size.Image_size")
+
+ def test_show(self):
+ # Get api sample of one single image details request.
+ image_id = fake.get_valid_image_id()
+ response = self._do_get('images/%s' % image_id)
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ subs['image_id'] = image_id
+ return self._verify_response('image-get-resp', subs, response)
+
+ def test_detail(self):
+ # Get api sample of all images details request.
+ response = self._do_get('images/detail')
+ subs = self._get_regexes()
+ return self._verify_response('images-details-get-resp', subs, response)
+
+
+class ImageSizeSampleXmlTests(ImageSizeSampleJsonTests):
+ ctype = 'xml'
+
+
class ConfigDriveSampleJsonTest(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib.config_drive."
"Config_drive")
@@ -3301,4 +3357,235 @@ class FlavorAccessSampleJsonTests(ApiSampleTestBase):
class FlavorAccessSampleXmlTests(FlavorAccessSampleJsonTests):
+ ctype = 'xml'
+
+
+class HypervisorsSampleJsonTests(ApiSampleTestBase):
+ extension_name = ("nova.api.openstack.compute.contrib.hypervisors."
+ "Hypervisors")
+
+ def test_hypervisors_list(self):
+ response = self._do_get('os-hypervisors')
+ self.assertEqual(response.status, 200)
+ return self._verify_response('hypervisors-list-resp',
+ {}, response)
+
+ def test_hypervisors_search(self):
+ response = self._do_get('os-hypervisors/fake/search')
+ self.assertEqual(response.status, 200)
+ return self._verify_response('hypervisors-search-resp',
+ {}, response)
+
+ def test_hypervisors_servers(self):
+ response = self._do_get('os-hypervisors/fake/servers')
+ self.assertEqual(response.status, 200)
+ return self._verify_response('hypervisors-servers-resp',
+ {}, response)
+
+ def test_hypervisors_show(self):
+ hypervisor_id = 1
+ subs = {
+ 'hypervisor_id': hypervisor_id
+ }
+ response = self._do_get('os-hypervisors/%s' % hypervisor_id)
+ self.assertEqual(response.status, 200)
+ subs.update(self._get_regexes())
+ return self._verify_response('hypervisors-show-resp',
+ subs, response)
+
+ def test_hypervisors_statistics(self):
+ response = self._do_get('os-hypervisors/statistics')
+ self.assertEqual(response.status, 200)
+ return self._verify_response('hypervisors-statistics-resp',
+ {}, response)
+
+ def test_hypervisors_uptime(self):
+ def fake_get_host_uptime(self, context, hyp):
+ return (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
+ " 0.20, 0.12, 0.14")
+
+ self.stubs.Set(compute_api.HostAPI,
+ 'get_host_uptime', fake_get_host_uptime)
+ hypervisor_id = 1
+ response = self._do_get('os-hypervisors/%s/uptime' % hypervisor_id)
+ self.assertEqual(response.status, 200)
+ subs = {
+ 'hypervisor_id': hypervisor_id,
+ }
+ return self._verify_response('hypervisors-uptime-resp',
+ subs, response)
+
+
+class HypervisorsSampleXmlTests(HypervisorsSampleJsonTests):
ctype = "xml"
+
+
+class AttachInterfacesSampleJsonTest(ServersSampleBase):
+ extension_name = ('nova.api.openstack.compute.contrib.attach_interfaces.'
+ 'Attach_interfaces')
+
+ def setUp(self):
+ super(AttachInterfacesSampleJsonTest, self).setUp()
+
+ def fake_list_ports(self, *args, **kwargs):
+ uuid = kwargs.get('device_id', None)
+ if not uuid:
+ raise InstanceNotFound(instance_id=None)
+ port_data = {
+ "id": "ce531f90-199f-48c0-816c-13e38010b442",
+ "network_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
+ "admin_state_up": True,
+ "status": "ACTIVE",
+ "mac_address": "fa:16:3e:4c:2c:30",
+ "fixed_ips": [
+ {
+ "ip_address": "192.168.1.3",
+ "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
+ }
+ ],
+ "device_id": uuid,
+ }
+ ports = {'ports': [port_data]}
+ return ports
+
+ def fake_show_port(self, context, port_id=None):
+ if not port_id:
+ raise PortNotFound(port_id=None)
+ port_data = {
+ "id": port_id,
+ "network_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
+ "admin_state_up": True,
+ "status": "ACTIVE",
+ "mac_address": "fa:16:3e:4c:2c:30",
+ "fixed_ips": [
+ {
+ "ip_address": "192.168.1.3",
+ "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
+ }
+ ],
+ "device_id": 'bece68a3-2f8b-4e66-9092-244493d6aba7',
+ }
+ port = {'port': port_data}
+ return port
+
+ def fake_attach_interface(self, context, instance,
+ network_id, port_id,
+ requested_ip='192.168.1.3'):
+ if not network_id:
+ network_id = "fake_net_uuid"
+ if not port_id:
+ port_id = "fake_port_uuid"
+ network_info = [
+ {
+ 'bridge': 'br-100',
+ 'id': network_id,
+ 'cidr': '192.168.1.0/24',
+ 'vlan': '101',
+ 'injected': 'False',
+ 'multi_host': 'False',
+ 'bridge_interface': 'bridge_interface'
+ },
+ {
+ "vif_uuid": port_id,
+ "network_id": network_id,
+ "admin_state_up": True,
+ "status": "ACTIVE",
+ "mac_address": "fa:16:3e:4c:2c:30",
+ "fixed_ips": [
+ {
+ "ip_address": requested_ip,
+ "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
+ }
+ ],
+ "device_id": instance['uuid'],
+ }
+ ]
+ return network_info
+
+ def fake_detach_interface(self, context, instance, port_id):
+ pass
+
+ self.stubs.Set(network_api.API, 'list_ports', fake_list_ports)
+ self.stubs.Set(network_api.API, 'show_port', fake_show_port)
+ self.stubs.Set(compute_api.API, 'attach_interface',
+ fake_attach_interface)
+ self.stubs.Set(compute_api.API, 'detach_interface',
+ fake_detach_interface)
+ self.flags(quantum_auth_strategy=None)
+ self.flags(quantum_url='http://anyhost/')
+ self.flags(quantum_url_timeout=30)
+
+ def generalize_subs(self, subs, vanilla_regexes):
+ subs['subnet_id'] = vanilla_regexes['uuid']
+ subs['net_id'] = vanilla_regexes['uuid']
+ subs['port_id'] = vanilla_regexes['uuid']
+ subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
+ subs['ip_address'] = vanilla_regexes['ip']
+ return subs
+
+ def test_list_interfaces(self):
+ instance_uuid = self._post_server()
+ response = self._do_get('servers/%s/os-interface' % instance_uuid)
+ self.assertEqual(response.status, 200)
+ subs = {
+ 'ip_address': '192.168.1.3',
+ 'subnet_id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef',
+ 'mac_addr': 'fa:16:3e:4c:2c:30',
+ 'net_id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6',
+ 'port_id': 'ce531f90-199f-48c0-816c-13e38010b442',
+ 'port_state': 'ACTIVE'
+ }
+ self._verify_response('attach-interfaces-list-resp', subs, response)
+
+ def _stub_show_for_instance(self, instance_uuid, port_id):
+ show_port = network_api.API().show_port(None, port_id)
+ show_port['port']['device_id'] = instance_uuid
+ self.stubs.Set(network_api.API, 'show_port', lambda *a, **k: show_port)
+
+ def test_show_interfaces(self):
+ instance_uuid = self._post_server()
+ port_id = 'ce531f90-199f-48c0-816c-13e38010b442'
+ self._stub_show_for_instance(instance_uuid, port_id)
+ response = self._do_get('servers/%s/os-interface/%s' %
+ (instance_uuid, port_id))
+ self.assertEqual(response.status, 200)
+ subs = {
+ 'ip_address': '192.168.1.3',
+ 'subnet_id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef',
+ 'mac_addr': 'fa:16:3e:4c:2c:30',
+ 'net_id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6',
+ 'port_id': port_id,
+ 'port_state': 'ACTIVE'
+ }
+ self._verify_response('attach-interfaces-show-resp', subs, response)
+
+ def test_create_interfaces(self, instance_uuid=None):
+ if instance_uuid is None:
+ instance_uuid = self._post_server()
+ subs = {
+ 'net_id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6',
+ 'port_id': 'ce531f90-199f-48c0-816c-13e38010b442',
+ 'subnet_id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef',
+ 'ip_address': '192.168.1.3',
+ 'port_state': 'ACTIVE',
+ 'mac_addr': 'fa:16:3e:4c:2c:30',
+ }
+ self._stub_show_for_instance(instance_uuid, subs['port_id'])
+ response = self._do_post('servers/%s/os-interface' % instance_uuid,
+ 'attach-interfaces-create-req', subs)
+ self.assertEqual(response.status, 200)
+ subs.update(self._get_regexes())
+ self._verify_response('attach-interfaces-create-resp',
+ subs, response)
+
+ def test_delete_interfaces(self):
+ instance_uuid = self._post_server()
+ port_id = 'ce531f90-199f-48c0-816c-13e38010b442'
+ response = self._do_delete('servers/%s/os-interface/%s' %
+ (instance_uuid, port_id))
+ self.assertEqual(response.status, 202)
+ self.assertEqual(response.read(), '')
+
+
+class AttachInterfacesSampleXmlTest(AttachInterfacesSampleJsonTest):
+ ctype = 'xml'
diff --git a/nova/tests/integrated/test_extensions.py b/nova/tests/integrated/test_extensions.py
index ca5ff8374..e1dc96e93 100644
--- a/nova/tests/integrated/test_extensions.py
+++ b/nova/tests/integrated/test_extensions.py
@@ -15,9 +15,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo.config import cfg
+
# Import extensions to pull in osapi_compute_extension CONF option used below.
-from nova.openstack.common import cfg
-from nova.openstack.common.log import logging
+from nova.openstack.common import log as logging
from nova.tests.integrated import integrated_helpers
CONF = cfg.CONF
diff --git a/nova/tests/integrated/test_login.py b/nova/tests/integrated/test_login.py
index 80b40e616..8f1601ee0 100644
--- a/nova/tests/integrated/test_login.py
+++ b/nova/tests/integrated/test_login.py
@@ -16,7 +16,7 @@
# under the License.
-from nova.openstack.common.log import logging
+from nova.openstack.common import log as logging
from nova.tests.integrated import integrated_helpers
diff --git a/nova/tests/integrated/test_multiprocess_api.py b/nova/tests/integrated/test_multiprocess_api.py
index ae4fcc32f..91e37116d 100644
--- a/nova/tests/integrated/test_multiprocess_api.py
+++ b/nova/tests/integrated/test_multiprocess_api.py
@@ -16,12 +16,13 @@
"""
Test multiprocess enabled API service.
"""
+import fixtures
import os
import signal
import time
import traceback
-from nova.openstack.common.log import logging
+from nova.openstack.common import log as logging
from nova import service
from nova.tests.integrated import integrated_helpers
@@ -94,8 +95,16 @@ class MultiprocessWSGITest(integrated_helpers._IntegratedTestBase):
# Make sure all processes are stopped
os.kill(self.pid, signal.SIGTERM)
- # Make sure we reap our test process
- self._reap_test()
+ try:
+ # Make sure we reap our test process
+ self._reap_test()
+ except fixtures.TimeoutException:
+ # If the child gets stuck or is too slow in existing
+ # after receiving the SIGTERM, gracefully handle the
+ # timeout exception and try harder to kill it. We need
+ # to do this otherwise the child process can hold up
+ # the test run
+ os.kill(self.pid, signal.SIGKILL)
super(MultiprocessWSGITest, self).tearDown()
diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py
index b70db93f2..95c2da2a1 100644
--- a/nova/tests/integrated/test_servers.py
+++ b/nova/tests/integrated/test_servers.py
@@ -18,7 +18,7 @@
import time
import zlib
-from nova.openstack.common.log import logging
+from nova.openstack.common import log as logging
from nova.tests import fake_network
from nova.tests.integrated.api import client
from nova.tests.integrated import integrated_helpers
diff --git a/nova/tests/integrated/test_xml.py b/nova/tests/integrated/test_xml.py
index 1c1dd1b06..d9f10c630 100644
--- a/nova/tests/integrated/test_xml.py
+++ b/nova/tests/integrated/test_xml.py
@@ -19,7 +19,7 @@ from lxml import etree
from nova.api.openstack import common
from nova.api.openstack import xmlutil
-from nova.openstack.common.log import logging
+from nova.openstack.common import log as logging
from nova.tests.integrated import integrated_helpers
diff --git a/nova/tests/matchers.py b/nova/tests/matchers.py
index be65da823..280b2842c 100644
--- a/nova/tests/matchers.py
+++ b/nova/tests/matchers.py
@@ -20,6 +20,8 @@
import pprint
+from testtools import content
+
from lxml import etree
@@ -226,8 +228,8 @@ class XMLMismatch(object):
def get_details(self):
return {
- 'expected': self.expected,
- 'actual': self.actual,
+ 'expected': content.text_content(self.expected),
+ 'actual': content.text_content(self.actual),
}
diff --git a/nova/tests/network/test_linux_net.py b/nova/tests/network/test_linux_net.py
index 50c98e78c..cba08d9ce 100644
--- a/nova/tests/network/test_linux_net.py
+++ b/nova/tests/network/test_linux_net.py
@@ -19,12 +19,12 @@ import calendar
import os
import mox
+from oslo.config import cfg
from nova import context
from nova import db
from nova.network import driver
from nova.network import linux_net
-from nova.openstack.common import cfg
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
diff --git a/nova/tests/network/test_manager.py b/nova/tests/network/test_manager.py
index f5a1704ec..ba997ac9d 100644
--- a/nova/tests/network/test_manager.py
+++ b/nova/tests/network/test_manager.py
@@ -18,6 +18,7 @@
import fixtures
import mox
+from oslo.config import cfg
from nova import context
from nova import db
@@ -28,7 +29,6 @@ from nova.network import floating_ips
from nova.network import linux_net
from nova.network import manager as network_manager
from nova.network import model as net_model
-from nova.openstack.common import cfg
from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
@@ -40,7 +40,6 @@ from nova.tests import fake_network
from nova.tests import matchers
from nova import utils
-
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@@ -299,7 +298,7 @@ class FlatNetworkTestCase(test.TestCase):
[{'id': 0}]})
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
- [{'id':0, 'name':'test'}]})
+ [{'id': 0, 'name': 'test'}]})
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
@@ -340,7 +339,7 @@ class FlatNetworkTestCase(test.TestCase):
[{'id': 0}]})
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
- [{'id':0, 'name':'test'}]})
+ [{'id': 0, 'name': 'test'}]})
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
@@ -425,7 +424,7 @@ class FlatNetworkTestCase(test.TestCase):
[{'id': 0}]})
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
- [{'id':0, 'name':'test'}]})
+ [{'id': 0, 'name': 'test'}]})
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
@@ -456,6 +455,23 @@ class FlatNetworkTestCase(test.TestCase):
self.assertEqual(len(addresses), 1)
self.assertEqual(addresses[0], fixedip)
+ def test_allocate_floating_ip(self):
+ self.assertEqual(self.network.allocate_floating_ip(self.context,
+ 1, None), None)
+
+ def test_deallocate_floating_ip(self):
+ self.assertEqual(self.network.deallocate_floating_ip(self.context,
+ 1, None), None)
+
+ def test_associate_floating_ip(self):
+ self.assertEqual(self.network.associate_floating_ip(self.context,
+ None, None), None)
+
+ def test_disassociate_floating_ip(self):
+ self.assertEqual(self.network.disassociate_floating_ip(self.context,
+ None, None),
+ None)
+
class VlanNetworkTestCase(test.TestCase):
def setUp(self):
diff --git a/nova/tests/network/test_network_info.py b/nova/tests/network/test_network_info.py
index 3e19a4461..d2ab6a746 100644
--- a/nova/tests/network/test_network_info.py
+++ b/nova/tests/network/test_network_info.py
@@ -327,7 +327,7 @@ class NetworkInfoTests(test.TestCase):
def test_create_model(self):
ninfo = model.NetworkInfo([fake_network_cache_model.new_vif(),
fake_network_cache_model.new_vif(
- {'address':'bb:bb:bb:bb:bb:bb'})])
+ {'address': 'bb:bb:bb:bb:bb:bb'})])
self.assertEqual(ninfo.fixed_ips(),
[fake_network_cache_model.new_ip({'address': '10.10.0.2'}),
fake_network_cache_model.new_ip(
@@ -338,13 +338,13 @@ class NetworkInfoTests(test.TestCase):
vif['network']['subnets'][0]['ips'][0].add_floating_ip('192.168.1.1')
ninfo = model.NetworkInfo([vif,
fake_network_cache_model.new_vif(
- {'address':'bb:bb:bb:bb:bb:bb'})])
+ {'address': 'bb:bb:bb:bb:bb:bb'})])
self.assertEqual(ninfo.floating_ips(), ['192.168.1.1'])
def test_hydrate(self):
ninfo = model.NetworkInfo([fake_network_cache_model.new_vif(),
fake_network_cache_model.new_vif(
- {'address':'bb:bb:bb:bb:bb:bb'})])
+ {'address': 'bb:bb:bb:bb:bb:bb'})])
deserialized = model.NetworkInfo.hydrate(ninfo)
self.assertEqual(ninfo.fixed_ips(),
[fake_network_cache_model.new_ip({'address': '10.10.0.2'}),
diff --git a/nova/tests/network/test_quantumv2.py b/nova/tests/network/test_quantumv2.py
index f3f306694..b535363a8 100644
--- a/nova/tests/network/test_quantumv2.py
+++ b/nova/tests/network/test_quantumv2.py
@@ -18,16 +18,15 @@
import uuid
import mox
+from oslo.config import cfg
+from quantumclient.v2_0 import client
from nova import context
from nova import exception
from nova.network import model
from nova.network import quantumv2
from nova.network.quantumv2 import api as quantumapi
-from nova.openstack.common import cfg
from nova import test
-from quantumclient.v2_0 import client
-
CONF = cfg.CONF
@@ -230,11 +229,10 @@ class TestQuantumv2(test.TestCase):
'router_id': 'router_id1'}
def tearDown(self):
- try:
- self.mox.UnsetStubs()
- self.mox.VerifyAll()
- finally:
- CONF.reset()
+ self.addCleanup(CONF.reset)
+ self.addCleanup(self.mox.VerifyAll)
+ self.addCleanup(self.mox.UnsetStubs)
+ self.addCleanup(self.stubs.UnsetAll)
super(TestQuantumv2, self).tearDown()
def _verify_nw_info(self, nw_inf, index=0):
@@ -354,9 +352,28 @@ class TestQuantumv2(test.TestCase):
self.assertEquals('my_mac%s' % id_suffix, nw_inf[0]['address'])
self.assertEquals(0, len(nw_inf[0]['network']['subnets']))
+ def test_refresh_quantum_extensions_cache(self):
+ api = quantumapi.API()
+ self.moxed_client.list_extensions().AndReturn(
+ {'extensions': [{'name': 'nvp-qos'}]})
+ self.mox.ReplayAll()
+ api._refresh_quantum_extensions_cache()
+ self.assertEquals({'nvp-qos': {'name': 'nvp-qos'}}, api.extensions)
+
+ def test_populate_quantum_extension_values_rxtx_factor(self):
+ api = quantumapi.API()
+ self.moxed_client.list_extensions().AndReturn(
+ {'extensions': [{'name': 'nvp-qos'}]})
+ self.mox.ReplayAll()
+ instance = {'instance_type': {'rxtx_factor': 1}}
+ port_req_body = {'port': {}}
+ api._populate_quantum_extension_values(instance, port_req_body)
+ self.assertEquals(port_req_body['port']['rxtx_factor'], 1)
+
def _stub_allocate_for_instance(self, net_idx=1, **kwargs):
api = quantumapi.API()
self.mox.StubOutWithMock(api, 'get_instance_nw_info')
+ self.mox.StubOutWithMock(api, '_populate_quantum_extension_values')
# Net idx is 1-based for compatibility with existing unit tests
nets = self.nets[net_idx - 1]
ports = {}
@@ -428,6 +445,9 @@ class TestQuantumv2(test.TestCase):
if macs:
port_req_body['port']['mac_address'] = macs.pop()
res_port = {'port': {'id': 'fake'}}
+ api._populate_quantum_extension_values(
+ self.instance, port_req_body).AndReturn(None)
+
self.moxed_client.create_port(
MyComparator(port_req_body)).AndReturn(res_port)
@@ -547,6 +567,7 @@ class TestQuantumv2(test.TestCase):
In this case, the code should delete the first created port.
"""
api = quantumapi.API()
+ self.mox.StubOutWithMock(api, '_populate_quantum_extension_values')
self.moxed_client.list_networks(
tenant_id=self.instance['project_id'],
shared=False).AndReturn(
@@ -565,6 +586,9 @@ class TestQuantumv2(test.TestCase):
},
}
port = {'id': 'portid_' + network['id']}
+
+ api._populate_quantum_extension_values(
+ self.instance, port_req_body).AndReturn(None)
if index == 0:
self.moxed_client.create_port(
MyComparator(port_req_body)).AndReturn({'port': port})
@@ -614,7 +638,9 @@ class TestQuantumv2(test.TestCase):
{'ports': port_data})
for port in port_data:
self.moxed_client.delete_port(port['id'])
+
self.mox.ReplayAll()
+
api = quantumapi.API()
api.deallocate_for_instance(self.context, self.instance)
@@ -626,6 +652,56 @@ class TestQuantumv2(test.TestCase):
# Test to deallocate in two ports env.
self._deallocate_for_instance(2)
+ def _test_deallocate_port_for_instance(self, number):
+ port_data = number == 1 and self.port_data1 or self.port_data2
+ self.moxed_client.delete_port(port_data[0]['id'])
+
+ nets = [port_data[0]['network_id']]
+ quantumv2.get_client(mox.IgnoreArg(), admin=True).AndReturn(
+ self.moxed_client)
+ self.moxed_client.list_ports(
+ tenant_id=self.instance['project_id'],
+ device_id=self.instance['uuid']).AndReturn(
+ {'ports': port_data[1:]})
+ quantumv2.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
+ self.moxed_client)
+ self.moxed_client.list_networks(
+ tenant_id=self.instance['project_id'],
+ shared=False).AndReturn(
+ {'networks': [self.nets2[1]]})
+ self.moxed_client.list_networks(shared=True).AndReturn(
+ {'networks': []})
+ for port in port_data[1:]:
+ self.moxed_client.list_subnets(id=['my_subid2']).AndReturn({})
+
+ self.mox.ReplayAll()
+
+ api = quantumapi.API()
+ nwinfo = api.deallocate_port_for_instance(self.context, self.instance,
+ port_data[0]['id'])
+ self.assertEqual(len(nwinfo), len(port_data[1:]))
+ if len(port_data) > 1:
+ self.assertEqual(nwinfo[0]['network']['id'], 'my_netid2')
+
+ def test_deallocate_port_for_instance_1(self):
+ # Test to deallocate the first and only port
+ self._test_deallocate_port_for_instance(1)
+
+ def test_deallocate_port_for_instance_2(self):
+ # Test to deallocate the first port of two
+ self._test_deallocate_port_for_instance(2)
+
+ def test_list_ports(self):
+ search_opts = {'parm': 'value'}
+ self.moxed_client.list_ports(**search_opts)
+ self.mox.ReplayAll()
+ quantumapi.API().list_ports(self.context, **search_opts)
+
+ def test_show_port(self):
+ self.moxed_client.show_port('foo')
+ self.mox.ReplayAll()
+ quantumapi.API().show_port(self.context, 'foo')
+
def test_validate_networks(self):
requested_networks = [('my_netid1', 'test', None),
('my_netid2', 'test2', None)]
diff --git a/nova/tests/network/test_rpcapi.py b/nova/tests/network/test_rpcapi.py
index 33663b8da..a161fe5e0 100644
--- a/nova/tests/network/test_rpcapi.py
+++ b/nova/tests/network/test_rpcapi.py
@@ -18,9 +18,10 @@
Unit Tests for nova.network.rpcapi
"""
+from oslo.config import cfg
+
from nova import context
from nova.network import rpcapi as network_rpcapi
-from nova.openstack.common import cfg
from nova.openstack.common import rpc
from nova import test
diff --git a/nova/tests/policy_fixture.py b/nova/tests/policy_fixture.py
index 282a28b44..91813defd 100644
--- a/nova/tests/policy_fixture.py
+++ b/nova/tests/policy_fixture.py
@@ -15,8 +15,8 @@
import os
import fixtures
+from oslo.config import cfg
-from nova.openstack.common import cfg
from nova.openstack.common import policy as common_policy
import nova.policy
from nova.tests import fake_policy
diff --git a/nova/tests/scheduler/test_chance_scheduler.py b/nova/tests/scheduler/test_chance_scheduler.py
index dcbe86f75..0ee617044 100644
--- a/nova/tests/scheduler/test_chance_scheduler.py
+++ b/nova/tests/scheduler/test_chance_scheduler.py
@@ -165,3 +165,33 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.driver.schedule_prep_resize(fake_context, {}, {}, {},
instance, {}, None)
self.assertEqual(info['called'], 0)
+
+ def test_select_hosts(self):
+ ctxt = context.RequestContext('fake', 'fake', False)
+ ctxt_elevated = 'fake-context-elevated'
+ fake_args = (1, 2, 3)
+ instance_opts = {'fake_opt1': 'meow', 'launch_index': -1}
+ instance1 = {'uuid': 'fake-uuid1'}
+ instance2 = {'uuid': 'fake-uuid2'}
+ request_spec = {'instance_uuids': ['fake-uuid1', 'fake-uuid2'],
+ 'instance_properties': instance_opts}
+
+ self.mox.StubOutWithMock(ctxt, 'elevated')
+ self.mox.StubOutWithMock(self.driver, 'hosts_up')
+ self.mox.StubOutWithMock(random, 'random')
+
+ ctxt.elevated().AndReturn(ctxt_elevated)
+ # instance 1
+ self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(
+ ['host1', 'host2', 'host3', 'host4'])
+ random.random().AndReturn(.5)
+
+ # instance 2
+ ctxt.elevated().AndReturn(ctxt_elevated)
+ self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(
+ ['host1', 'host2', 'host3', 'host4'])
+ random.random().AndReturn(.2)
+
+ self.mox.ReplayAll()
+ hosts = self.driver.select_hosts(ctxt, request_spec, {})
+ self.assertEquals(['host3', 'host1'], hosts)
diff --git a/nova/tests/scheduler/test_filter_scheduler.py b/nova/tests/scheduler/test_filter_scheduler.py
index 4b07581fb..849f63f5d 100644
--- a/nova/tests/scheduler/test_filter_scheduler.py
+++ b/nova/tests/scheduler/test_filter_scheduler.py
@@ -656,3 +656,43 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertEquals(1, len(hosts))
self.assertEquals(50, hosts[0].weight)
+
+ def test_select_hosts_happy_day(self):
+ """select_hosts is basically a wrapper around the _select() method.
+ Similar to the _select tests, this just does a happy path test to
+ ensure there is nothing glaringly wrong."""
+
+ self.next_weight = 1.0
+
+ selected_hosts = []
+
+ def _fake_weigh_objects(_self, functions, hosts, options):
+ self.next_weight += 2.0
+ host_state = hosts[0]
+ selected_hosts.append(host_state.host)
+ return [weights.WeighedHost(host_state, self.next_weight)]
+
+ sched = fakes.FakeFilterScheduler()
+ fake_context = context.RequestContext('user', 'project',
+ is_admin=True)
+
+ self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
+ fake_get_filtered_hosts)
+ self.stubs.Set(weights.HostWeightHandler,
+ 'get_weighed_objects', _fake_weigh_objects)
+ fakes.mox_host_manager_db_calls(self.mox, fake_context)
+
+ request_spec = {'num_instances': 10,
+ 'instance_type': {'memory_mb': 512, 'root_gb': 512,
+ 'ephemeral_gb': 0,
+ 'vcpus': 1},
+ 'instance_properties': {'project_id': 1,
+ 'root_gb': 512,
+ 'memory_mb': 512,
+ 'ephemeral_gb': 0,
+ 'vcpus': 1,
+ 'os_type': 'Linux'}}
+ self.mox.ReplayAll()
+ hosts = sched.select_hosts(fake_context, request_spec, {})
+ self.assertEquals(len(hosts), 10)
+ self.assertEquals(hosts, selected_hosts)
diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py
index edd2e0d61..8fe998daa 100644
--- a/nova/tests/scheduler/test_host_filters.py
+++ b/nova/tests/scheduler/test_host_filters.py
@@ -16,11 +16,12 @@ Tests For Scheduler Host Filters.
"""
import httplib
+
+from oslo.config import cfg
import stubout
from nova import context
from nova import db
-from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova.scheduler import filters
@@ -1226,7 +1227,7 @@ class HostFiltersTestCase(test.TestCase):
def test_trusted_filter_untrusted_and_untrusted_passes(self):
self.oat_data = {"hosts": [{"host_name": "host1",
"trust_lvl": "untrusted",
- "vtime":timeutils.isotime()}]}
+ "vtime": timeutils.isotime()}]}
self._stub_service_is_up(True)
filt_cls = self.class_map['TrustedFilter']()
extra_specs = {'trust:trusted_host': 'untrusted'}
@@ -1414,3 +1415,42 @@ class HostFiltersTestCase(test.TestCase):
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {'group_hosts': ['host1']}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ def test_aggregate_multi_tenancy_isolation_with_meta_passes(self):
+ self._stub_service_is_up(True)
+ filt_cls = self.class_map['AggregateMultiTenancyIsolation']()
+ aggr_meta = {'filter_tenant_id': 'my_tenantid'}
+ self._create_aggregate_with_host(name='fake1', metadata=aggr_meta,
+ hosts=['host1'])
+ filter_properties = {'context': self.context,
+ 'request_spec': {
+ 'instance_properties': {
+ 'project_id': 'my_tenantid'}}}
+ host = fakes.FakeHostState('host1', 'compute', {})
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+ def test_aggregate_multi_tenancy_isolation_fails(self):
+ self._stub_service_is_up(True)
+ filt_cls = self.class_map['AggregateMultiTenancyIsolation']()
+ aggr_meta = {'filter_tenant_id': 'other_tenantid'}
+ self._create_aggregate_with_host(name='fake1', metadata=aggr_meta,
+ hosts=['host1'])
+ filter_properties = {'context': self.context,
+ 'request_spec': {
+ 'instance_properties': {
+ 'project_id': 'my_tenantid'}}}
+ host = fakes.FakeHostState('host1', 'compute', {})
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ def test_aggregate_multi_tenancy_isolation_no_meta_passes(self):
+ self._stub_service_is_up(True)
+ filt_cls = self.class_map['AggregateMultiTenancyIsolation']()
+ aggr_meta = {}
+ self._create_aggregate_with_host(name='fake1', metadata=aggr_meta,
+ hosts=['host1'])
+ filter_properties = {'context': self.context,
+ 'request_spec': {
+ 'instance_properties': {
+ 'project_id': 'my_tenantid'}}}
+ host = fakes.FakeHostState('host1', 'compute', {})
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/scheduler/test_least_cost.py b/nova/tests/scheduler/test_least_cost.py
index d159d8f4c..d920b7255 100644
--- a/nova/tests/scheduler/test_least_cost.py
+++ b/nova/tests/scheduler/test_least_cost.py
@@ -15,14 +15,15 @@
"""
Tests For Least Cost functions.
"""
+
+from oslo.config import cfg
+
from nova import context
-from nova.openstack.common import cfg
from nova.scheduler import weights
from nova.scheduler.weights import least_cost
from nova import test
from nova.tests.scheduler import fakes
-
test_least_cost_opts = [
cfg.FloatOpt('compute_fake_weigher1_weight',
default=2.0,
diff --git a/nova/tests/scheduler/test_rpcapi.py b/nova/tests/scheduler/test_rpcapi.py
index e9a1680a8..62038c722 100644
--- a/nova/tests/scheduler/test_rpcapi.py
+++ b/nova/tests/scheduler/test_rpcapi.py
@@ -18,8 +18,9 @@
Unit Tests for nova.scheduler.rpcapi
"""
+from oslo.config import cfg
+
from nova import context
-from nova.openstack.common import cfg
from nova.openstack.common import rpc
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova import test
@@ -90,3 +91,9 @@ class SchedulerRpcAPITestCase(test.TestCase):
def test_get_backdoor_port(self):
self._test_scheduler_api('get_backdoor_port', rpc_method='call',
host='fake_host', version='2.5')
+
+ def test_select_hosts(self):
+ self._test_scheduler_api('select_hosts', rpc_method='call',
+ request_spec='fake_request_spec',
+ filter_properties='fake_prop',
+ version='2.6')
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index 01d3f6a50..5d0228c62 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -31,6 +31,7 @@ from nova.conductor import api as conductor_api
from nova import context
from nova import db
from nova import exception
+from nova.image import glance
from nova.openstack.common import jsonutils
from nova.openstack.common.notifier import api as notifier
from nova.openstack.common import rpc
@@ -39,6 +40,7 @@ from nova.scheduler import manager
from nova import servicegroup
from nova import test
from nova.tests import fake_instance_actions
+from nova.tests.image import fake as fake_image
from nova.tests import matchers
from nova.tests.scheduler import fakes
@@ -337,6 +339,22 @@ class SchedulerTestCase(test.TestCase):
def setUp(self):
super(SchedulerTestCase, self).setUp()
self.stubs.Set(compute_api, 'API', fakes.FakeComputeAPI)
+
+ def fake_show(meh, context, id):
+ if id:
+ return {'id': id, 'min_disk': None, 'min_ram': None,
+ 'name': 'fake_name',
+ 'status': 'active',
+ 'properties': {'kernel_id': 'fake_kernel_id',
+ 'ramdisk_id': 'fake_ramdisk_id',
+ 'something_else': 'meow'}}
+ else:
+ raise exception.ImageNotFound(image_id=id)
+
+ fake_image.stub_out_image_service(self.stubs)
+ self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+ self.image_service = glance.get_default_image_service()
+
self.driver = self.driver_cls()
self.context = context.RequestContext('fake_user', 'fake_project')
self.topic = 'fake_topic'
@@ -384,7 +402,8 @@ class SchedulerTestCase(test.TestCase):
'ephemeral_gb': 0,
'vm_state': '',
'task_state': '',
- 'instance_type': {'memory_mb': 1024}}
+ 'instance_type': {'memory_mb': 1024},
+ 'image_ref': 'fake-image-ref'}
def test_live_migration_basic(self):
# Test basic schedule_live_migration functionality.
@@ -402,7 +421,8 @@ class SchedulerTestCase(test.TestCase):
instance = jsonutils.to_primitive(self._live_migration_instance())
self.driver._live_migration_src_check(self.context, instance)
- self.driver._live_migration_dest_check(self.context, instance, dest)
+ self.driver._live_migration_dest_check(self.context, instance,
+ dest).AndReturn(dest)
self.driver._live_migration_common_check(self.context, instance,
dest)
self.driver.compute_rpcapi.check_can_live_migrate_destination(
@@ -595,17 +615,12 @@ class SchedulerTestCase(test.TestCase):
# Confirms exception raises in case dest and src is same host.
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
- self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
- self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
block_migration = False
instance = self._live_migration_instance()
# make dest same as src
dest = instance['host']
self.driver._live_migration_src_check(self.context, instance)
- db.service_get_by_compute_host(self.context,
- dest).AndReturn('fake_service3')
- self.servicegroup_api.service_is_up('fake_service3').AndReturn(True)
self.mox.ReplayAll()
self.assertRaises(exception.UnableToMigrateToSelf,
@@ -668,7 +683,8 @@ class SchedulerTestCase(test.TestCase):
instance = self._live_migration_instance()
self.driver._live_migration_src_check(self.context, instance)
- self.driver._live_migration_dest_check(self.context, instance, dest)
+ self.driver._live_migration_dest_check(self.context, instance,
+ dest).AndReturn(dest)
db.service_get_by_compute_host(self.context, dest).AndReturn(
{'compute_node': [{'hypervisor_type': 'xen',
@@ -700,7 +716,8 @@ class SchedulerTestCase(test.TestCase):
instance = self._live_migration_instance()
self.driver._live_migration_src_check(self.context, instance)
- self.driver._live_migration_dest_check(self.context, instance, dest)
+ self.driver._live_migration_dest_check(self.context, instance,
+ dest).AndReturn(dest)
db.service_get_by_compute_host(self.context, dest).AndReturn(
{'compute_node': [{'hypervisor_type': 'xen',
@@ -716,6 +733,94 @@ class SchedulerTestCase(test.TestCase):
block_migration=block_migration,
disk_over_commit=disk_over_commit)
+ def test_live_migration_dest_check_auto_set_host(self):
+ # Confirm dest is picked by scheduler if not set.
+ self.mox.StubOutWithMock(self.driver, 'select_hosts')
+
+ instance = self._live_migration_instance()
+ request_spec = {'instance_properties': instance,
+ 'instance_type': instance['instance_type'],
+ 'instance_uuids': [instance['uuid']],
+ 'image': self.image_service.show(self.context,
+ instance['image_ref'])
+ }
+ ignore_hosts = [instance['host']]
+ filter_properties = {'ignore_hosts': ignore_hosts}
+
+ self.driver.select_hosts(self.context, request_spec,
+ filter_properties).AndReturn(['fake_host2'])
+
+ self.mox.ReplayAll()
+ result = self.driver._live_migration_dest_check(self.context, instance,
+ None, ignore_hosts)
+ self.assertEqual('fake_host2', result)
+
+ def test_live_migration_auto_set_dest(self):
+ # Confirm scheduler picks target host if none given.
+ self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
+ self.mox.StubOutWithMock(self.driver, 'select_hosts')
+ self.mox.StubOutWithMock(self.driver, '_live_migration_common_check')
+ self.mox.StubOutWithMock(rpc, 'call')
+ self.mox.StubOutWithMock(self.driver.compute_rpcapi, 'live_migration')
+
+ dest = None
+ block_migration = False
+ disk_over_commit = False
+ instance = self._live_migration_instance()
+ request_spec = {'instance_properties': instance,
+ 'instance_type': instance['instance_type'],
+ 'instance_uuids': [instance['uuid']],
+ 'image': self.image_service.show(self.context,
+ instance['image_ref'])
+ }
+
+ self.driver._live_migration_src_check(self.context, instance)
+
+ # First selected host raises exception.InvalidHypervisorType
+ self.driver.select_hosts(self.context, request_spec,
+ {'ignore_hosts': [instance['host']]}).AndReturn(['fake_host2'])
+ self.driver._live_migration_common_check(self.context, instance,
+ 'fake_host2').AndRaise(exception.InvalidHypervisorType())
+
+ # Second selected host raises exception.InvalidCPUInfo
+ self.driver.select_hosts(self.context, request_spec,
+ {'ignore_hosts': [instance['host'],
+ 'fake_host2']}).AndReturn(['fake_host3'])
+ self.driver._live_migration_common_check(self.context, instance,
+ 'fake_host3')
+ rpc.call(self.context, "compute.fake_host3",
+ {"method": 'check_can_live_migrate_destination',
+ "args": {'instance': instance,
+ 'block_migration': block_migration,
+ 'disk_over_commit': disk_over_commit},
+ "version": compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION},
+ None).AndRaise(exception.InvalidCPUInfo(reason=""))
+
+ # Third selected host pass all checks
+ self.driver.select_hosts(self.context, request_spec,
+ {'ignore_hosts': [instance['host'],
+ 'fake_host2',
+ 'fake_host3']}).AndReturn(['fake_host4'])
+ self.driver._live_migration_common_check(self.context, instance,
+ 'fake_host4')
+ rpc.call(self.context, "compute.fake_host4",
+ {"method": 'check_can_live_migrate_destination',
+ "args": {'instance': instance,
+ 'block_migration': block_migration,
+ 'disk_over_commit': disk_over_commit},
+ "version": compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION},
+ None).AndReturn({})
+ self.driver.compute_rpcapi.live_migration(self.context,
+ host=instance['host'], instance=instance, dest='fake_host4',
+ block_migration=block_migration, migrate_data={})
+
+ self.mox.ReplayAll()
+ result = self.driver.schedule_live_migration(self.context,
+ instance=instance, dest=dest,
+ block_migration=block_migration,
+ disk_over_commit=disk_over_commit)
+ self.assertEqual(result, None)
+
def test_handle_schedule_error_adds_instance_fault(self):
instance = {'uuid': 'fake-uuid'}
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
diff --git a/nova/tests/servicegroup/test_mc_servicegroup.py b/nova/tests/servicegroup/test_mc_servicegroup.py
new file mode 100644
index 000000000..255184219
--- /dev/null
+++ b/nova/tests/servicegroup/test_mc_servicegroup.py
@@ -0,0 +1,220 @@
+# Copyright (c) 2013 Akira Yoshiyama <akirayoshiyama at gmail dot com>
+#
+# This is derived from test_db_servicegroup.py.
+# Copyright (c) IBM 2012 Alexey Roytman <roytman at il dot ibm dot com>
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import eventlet
+import fixtures
+
+from nova import context
+from nova import db
+from nova.openstack.common import timeutils
+from nova import service
+from nova import servicegroup
+from nova import test
+
+
+class ServiceFixture(fixtures.Fixture):
+
+ def __init__(self, host, binary, topic):
+ super(ServiceFixture, self).__init__()
+ self.host = host
+ self.binary = binary
+ self.topic = topic
+ self.serv = None
+
+ def setUp(self):
+ super(ServiceFixture, self).setUp()
+ self.serv = service.Service(self.host,
+ self.binary,
+ self.topic,
+ 'nova.tests.test_service.FakeManager',
+ 1, 1)
+ self.addCleanup(self.serv.kill)
+
+
+class MemcachedServiceGroupTestCase(test.TestCase):
+
+ def setUp(self):
+ super(MemcachedServiceGroupTestCase, self).setUp()
+ servicegroup.API._driver = None
+ self.flags(servicegroup_driver='mc')
+ self.down_time = 3
+ self.flags(enable_new_services=True)
+ self.flags(service_down_time=self.down_time)
+ self.servicegroup_api = servicegroup.API(test=True)
+ self._host = 'foo'
+ self._binary = 'nova-fake'
+ self._topic = 'unittest'
+ self._ctx = context.get_admin_context()
+
+ def test_memcached_driver(self):
+ serv = self.useFixture(
+ ServiceFixture(self._host, self._binary, self._topic)).serv
+ serv.start()
+ service_ref = db.service_get_by_args(self._ctx,
+ self._host,
+ self._binary)
+ hostkey = str("%s:%s" % (self._topic, self._host))
+ self.servicegroup_api._driver.mc.set(hostkey,
+ timeutils.utcnow(),
+ time=self.down_time)
+
+ self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
+ eventlet.sleep(self.down_time + 1)
+ service_ref = db.service_get_by_args(self._ctx,
+ self._host,
+ self._binary)
+
+ self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
+ serv.stop()
+ eventlet.sleep(self.down_time + 1)
+ service_ref = db.service_get_by_args(self._ctx,
+ self._host,
+ self._binary)
+ self.assertFalse(self.servicegroup_api.service_is_up(service_ref))
+
+ def test_get_all(self):
+ host1 = self._host + '_1'
+ host2 = self._host + '_2'
+ host3 = self._host + '_3'
+
+ serv1 = self.useFixture(
+ ServiceFixture(host1, self._binary, self._topic)).serv
+ serv1.start()
+
+ serv2 = self.useFixture(
+ ServiceFixture(host2, self._binary, self._topic)).serv
+ serv2.start()
+
+ serv3 = self.useFixture(
+ ServiceFixture(host3, self._binary, self._topic)).serv
+ serv3.start()
+
+ service_ref1 = db.service_get_by_args(self._ctx,
+ host1,
+ self._binary)
+ service_ref2 = db.service_get_by_args(self._ctx,
+ host2,
+ self._binary)
+ service_ref3 = db.service_get_by_args(self._ctx,
+ host3,
+ self._binary)
+
+ host1key = str("%s:%s" % (self._topic, host1))
+ host2key = str("%s:%s" % (self._topic, host2))
+ host3key = str("%s:%s" % (self._topic, host3))
+ self.servicegroup_api._driver.mc.set(host1key,
+ timeutils.utcnow(),
+ time=self.down_time)
+ self.servicegroup_api._driver.mc.set(host2key,
+ timeutils.utcnow(),
+ time=self.down_time)
+ self.servicegroup_api._driver.mc.set(host3key,
+ timeutils.utcnow(),
+ time=-1)
+
+ services = self.servicegroup_api.get_all(self._topic)
+
+ self.assertTrue(host1 in services)
+ self.assertTrue(host2 in services)
+ self.assertFalse(host3 in services)
+
+ service_id = self.servicegroup_api.get_one(self._topic)
+ self.assertTrue(service_id in services)
+
+ def test_service_is_up(self):
+ serv = self.useFixture(
+ ServiceFixture(self._host, self._binary, self._topic)).serv
+ serv.start()
+ service_ref = db.service_get_by_args(self._ctx,
+ self._host,
+ self._binary)
+ fake_now = 1000
+ down_time = 5
+ self.flags(service_down_time=down_time)
+ self.mox.StubOutWithMock(timeutils, 'utcnow_ts')
+ self.servicegroup_api = servicegroup.API()
+ hostkey = str("%s:%s" % (self._topic, self._host))
+
+ # Up (equal)
+ timeutils.utcnow_ts().AndReturn(fake_now)
+ timeutils.utcnow_ts().AndReturn(fake_now + down_time - 1)
+ self.mox.ReplayAll()
+ self.servicegroup_api._driver.mc.set(hostkey,
+ timeutils.utcnow(),
+ time=down_time)
+ result = self.servicegroup_api.service_is_up(service_ref)
+ self.assertTrue(result)
+
+ self.mox.ResetAll()
+ # Up
+ timeutils.utcnow_ts().AndReturn(fake_now)
+ timeutils.utcnow_ts().AndReturn(fake_now + down_time - 2)
+ self.mox.ReplayAll()
+ self.servicegroup_api._driver.mc.set(hostkey,
+ timeutils.utcnow(),
+ time=down_time)
+ result = self.servicegroup_api.service_is_up(service_ref)
+ self.assertTrue(result)
+
+ self.mox.ResetAll()
+ # Down
+ timeutils.utcnow_ts().AndReturn(fake_now)
+ timeutils.utcnow_ts().AndReturn(fake_now + down_time)
+ self.mox.ReplayAll()
+ self.servicegroup_api._driver.mc.set(hostkey,
+ timeutils.utcnow(),
+ time=down_time)
+ result = self.servicegroup_api.service_is_up(service_ref)
+ self.assertFalse(result)
+
+ self.mox.ResetAll()
+ # Down
+ timeutils.utcnow_ts().AndReturn(fake_now)
+ timeutils.utcnow_ts().AndReturn(fake_now + down_time + 1)
+ self.mox.ReplayAll()
+ self.servicegroup_api._driver.mc.set(hostkey,
+ timeutils.utcnow(),
+ time=down_time)
+ result = self.servicegroup_api.service_is_up(service_ref)
+ self.assertFalse(result)
+
+ self.mox.ResetAll()
+
+ def test_report_state(self):
+ serv = self.useFixture(
+ ServiceFixture(self._host, self._binary, self._topic)).serv
+ serv.start()
+ service_ref = db.service_get_by_args(self._ctx,
+ self._host,
+ self._binary)
+ self.servicegroup_api = servicegroup.API()
+
+ # updating model_disconnected
+ serv.model_disconnected = True
+ self.servicegroup_api._driver._report_state(serv)
+ self.assertFalse(serv.model_disconnected)
+
+ # handling exception
+ serv.model_disconnected = True
+ self.servicegroup_api._driver.mc = None
+ self.servicegroup_api._driver._report_state(serv)
+ self.assertTrue(serv.model_disconnected)
+
+ delattr(serv, 'model_disconnected')
+ self.servicegroup_api._driver.mc = None
+ self.servicegroup_api._driver._report_state(serv)
+ self.assertTrue(serv.model_disconnected)
diff --git a/nova/tests/servicegroup/test_zk_driver.py b/nova/tests/servicegroup/test_zk_driver.py
index 753153bb5..7330da66a 100644
--- a/nova/tests/servicegroup/test_zk_driver.py
+++ b/nova/tests/servicegroup/test_zk_driver.py
@@ -42,7 +42,7 @@ class ZKServiceGroupTestCase(test.TestCase):
except ImportError:
self.skipTest("Unable to test due to lack of ZooKeeper")
self.flags(servicegroup_driver='zk')
- self.flags(address='localhost:2181', group="zk")
+ self.flags(address='localhost:2181', group="zookeeper")
def test_join_leave(self):
self.servicegroup_api = servicegroup.API()
diff --git a/nova/tests/test_availability_zones.py b/nova/tests/test_availability_zones.py
index 4192fa08f..85dc1eefd 100644
--- a/nova/tests/test_availability_zones.py
+++ b/nova/tests/test_availability_zones.py
@@ -19,10 +19,11 @@
Tests for availability zones
"""
+from oslo.config import cfg
+
from nova import availability_zones as az
from nova import context
from nova import db
-from nova.openstack.common import cfg
from nova import test
CONF = cfg.CONF
diff --git a/nova/tests/test_baremetal_migrations.conf b/nova/tests/test_baremetal_migrations.conf
new file mode 100644
index 000000000..774f14994
--- /dev/null
+++ b/nova/tests/test_baremetal_migrations.conf
@@ -0,0 +1,9 @@
+[DEFAULT]
+# Set up any number of migration data stores you want, one
+# The "name" used in the test is the config variable key.
+#sqlite=sqlite:///test_migrations.db
+sqlite=sqlite://
+#mysql=mysql://root:@localhost/test_migrations
+#postgresql=postgresql://user:pass@localhost/test_migrations
+[walk_style]
+snake_walk=yes
diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py
index 835527219..346e0b2b7 100644
--- a/nova/tests/test_db_api.py
+++ b/nova/tests/test_db_api.py
@@ -22,6 +22,7 @@
import datetime
import uuid as stdlib_uuid
+from oslo.config import cfg
from sqlalchemy import MetaData
from sqlalchemy.schema import Table
from sqlalchemy.sql.expression import select
@@ -29,7 +30,6 @@ from sqlalchemy.sql.expression import select
from nova import context
from nova import db
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.openstack.common import timeutils
from nova import test
@@ -334,6 +334,16 @@ class DbApiTestCase(test.TestCase):
self.assertRaises(exception.DuplicateVlan,
db.network_create_safe, ctxt, values2)
+ def test_network_update_with_duplicate_vlan(self):
+ ctxt = context.get_admin_context()
+ values1 = {'host': 'localhost', 'project_id': 'project1', 'vlan': 1}
+ values2 = {'host': 'something', 'project_id': 'project1', 'vlan': 2}
+ network_ref = db.network_create_safe(ctxt, values1)
+ db.network_create_safe(ctxt, values2)
+ self.assertRaises(exception.DuplicateVlan,
+ db.network_update,
+ ctxt, network_ref["id"], values2)
+
def test_instance_update_with_instance_uuid(self):
# test instance_update() works when an instance UUID is passed.
ctxt = context.get_admin_context()
@@ -1792,6 +1802,13 @@ class TaskLogTestCase(test.TestCase):
self.end, self.host)
self.assertEqual(result['task_name'], 'fake')
+ def test_task_log_begin_task_duplicate(self):
+ params = (self.context, 'fake', self.begin, self.end, self.host)
+ db.task_log_begin_task(*params, message=self.message)
+ self.assertRaises(exception.TaskAlreadyRunning,
+ db.task_log_begin_task,
+ *params, message=self.message)
+
def test_task_log_end_task(self):
errors = 1
db.task_log_end_task(self.context, self.task_name, self.begin,
diff --git a/nova/tests/test_hypervapi.py b/nova/tests/test_hypervapi.py
index 025d3a454..5912bb301 100644
--- a/nova/tests/test_hypervapi.py
+++ b/nova/tests/test_hypervapi.py
@@ -26,13 +26,14 @@ import shutil
import time
import uuid
+from oslo.config import cfg
+
from nova.api.metadata import base as instance_metadata
from nova.compute import power_state
from nova.compute import task_states
from nova import context
from nova import db
from nova.image import glance
-from nova.openstack.common import cfg
from nova import test
from nova.tests import fake_network
from nova.tests.hyperv import db_fakes
diff --git a/nova/tests/test_imagebackend.py b/nova/tests/test_imagebackend.py
index 87e51819d..871e25dc8 100644
--- a/nova/tests/test_imagebackend.py
+++ b/nova/tests/test_imagebackend.py
@@ -15,20 +15,22 @@
# License for the specific language governing permissions and limitations
# under the License.
-import fixtures
import os
-from nova.openstack.common import cfg
+import fixtures
+from oslo.config import cfg
+
+from nova.openstack.common import uuidutils
from nova import test
from nova.tests import fake_libvirt_utils
+from nova.tests import fake_utils
from nova.virt.libvirt import imagebackend
-from nova.virt.libvirt import utils as libvirt_utils
CONF = cfg.CONF
class _ImageTestCase(object):
- INSTANCES_PATH = '/fake'
+ INSTANCES_PATH = '/instances_path'
def mock_create_image(self, image):
def create_image(fn, base, size, *args, **kwargs):
@@ -39,14 +41,19 @@ class _ImageTestCase(object):
super(_ImageTestCase, self).setUp()
self.flags(disable_process_locking=True,
instances_path=self.INSTANCES_PATH)
- self.INSTANCE = {'name': 'instance'}
+ self.INSTANCE = {'name': 'instance',
+ 'uuid': uuidutils.generate_uuid()}
self.NAME = 'fake.vm'
self.TEMPLATE = 'template'
+ self.OLD_STYLE_INSTANCE_PATH = \
+ fake_libvirt_utils.get_instance_path(self.INSTANCE, forceold=True)
self.PATH = os.path.join(
- libvirt_utils.get_instance_path(self.INSTANCE), self.NAME)
- self.TEMPLATE_DIR = os.path.join(CONF.instances_path,
- '_base')
+ fake_libvirt_utils.get_instance_path(self.INSTANCE), self.NAME)
+
+ # TODO(mikal): rename template_dir to base_dir and template_path
+ # to cached_image_path. This will be less confusing.
+ self.TEMPLATE_DIR = os.path.join(CONF.instances_path, '_base')
self.TEMPLATE_PATH = os.path.join(self.TEMPLATE_DIR, 'template')
self.useFixture(fixtures.MonkeyPatch(
@@ -55,6 +62,8 @@ class _ImageTestCase(object):
def test_cache(self):
self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
@@ -72,6 +81,8 @@ class _ImageTestCase(object):
def test_cache_image_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
@@ -84,6 +95,8 @@ class _ImageTestCase(object):
def test_cache_base_dir_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
@@ -100,6 +113,8 @@ class _ImageTestCase(object):
def test_cache_template_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
@@ -112,6 +127,27 @@ class _ImageTestCase(object):
self.mox.VerifyAll()
+ def test_prealloc_image(self):
+ CONF.set_override('preallocate_images', 'space')
+
+ fake_utils.fake_execute_clear_log()
+ fake_utils.stub_out_utils_execute(self.stubs)
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ def fake_fetch(target, *args, **kwargs):
+ return
+
+ self.stubs.Set(os.path, 'exists', lambda _: True)
+
+ # Call twice to verify testing fallocate is only called once.
+ image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
+ image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
+
+ self.assertEqual(fake_utils.fake_execute_get_log(),
+ ['fallocate -n -l 1 %s.fallocate_test' % self.PATH,
+ 'fallocate -n -l %s %s' % (self.SIZE, self.PATH),
+ 'fallocate -n -l %s %s' % (self.SIZE, self.PATH)])
+
class RawTestCase(_ImageTestCase, test.TestCase):
@@ -197,6 +233,11 @@ class Qcow2TestCase(_ImageTestCase, test.TestCase):
def test_create_image_with_size(self):
fn = self.prepare_mocks()
fn(target=self.TEMPLATE_PATH)
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ os.path.exists(self.PATH).AndReturn(False)
imagebackend.libvirt_utils.create_cow_image(self.TEMPLATE_PATH,
self.PATH)
imagebackend.disk.extend(self.PATH, self.SIZE)
@@ -218,6 +259,7 @@ class LvmTestCase(_ImageTestCase, test.TestCase):
super(LvmTestCase, self).setUp()
self.flags(libvirt_images_volume_group=self.VG)
self.LV = '%s_%s' % (self.INSTANCE['name'], self.NAME)
+ self.OLD_STYLE_INSTANCE_PATH = None
self.PATH = os.path.join('/dev', self.VG, self.LV)
self.disk = imagebackend.disk
@@ -342,9 +384,26 @@ class LvmTestCase(_ImageTestCase, test.TestCase):
ephemeral_size=None)
self.mox.VerifyAll()
+ def test_prealloc_image(self):
+ CONF.set_override('preallocate_images', 'space')
+
+ fake_utils.fake_execute_clear_log()
+ fake_utils.stub_out_utils_execute(self.stubs)
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ def fake_fetch(target, *args, **kwargs):
+ return
+
+ self.stubs.Set(os.path, 'exists', lambda _: True)
+
+ image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
+
+ self.assertEqual(fake_utils.fake_execute_get_log(), [])
+
class BackendTestCase(test.TestCase):
- INSTANCE = {'name': 'fake-instance'}
+ INSTANCE = {'name': 'fake-instance',
+ 'uuid': uuidutils.generate_uuid()}
NAME = 'fake-name.suffix'
def get_image(self, use_cow, image_type):
diff --git a/nova/tests/test_imagecache.py b/nova/tests/test_imagecache.py
index 611519514..0c5c6d02c 100644
--- a/nova/tests/test_imagecache.py
+++ b/nova/tests/test_imagecache.py
@@ -20,18 +20,17 @@ import contextlib
import cStringIO
import hashlib
import json
-import logging
import os
import time
-from nova import test
+from oslo.config import cfg
from nova.compute import vm_states
from nova import conductor
from nova import db
-from nova.openstack.common import cfg
from nova.openstack.common import importutils
-from nova.openstack.common import log
+from nova.openstack.common import log as logging
+from nova import test
from nova import utils
from nova.virt.libvirt import imagecache
from nova.virt.libvirt import utils as virtutils
@@ -40,17 +39,17 @@ CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
CONF.import_opt('host', 'nova.netconf')
-LOG = log.getLogger(__name__)
+LOG = logging.getLogger(__name__)
class ImageCacheManagerTestCase(test.TestCase):
def setUp(self):
super(ImageCacheManagerTestCase, self).setUp()
- self.stock_instance_names = {'instance-00000001': '123',
- 'instance-00000002': '456',
- 'instance-00000003': '789',
- 'banana-42-hamster': '444'}
+ self.stock_instance_names = set(['instance-00000001',
+ 'instance-00000002',
+ 'instance-00000003',
+ 'banana-42-hamster'])
def test_read_stored_checksum_missing(self):
self.stubs.Set(os.path, 'exists', lambda x: False)
@@ -181,6 +180,9 @@ class ImageCacheManagerTestCase(test.TestCase):
self.assertTrue(image_cache_manager.used_images['2'] ==
(1, 1, ['inst-2', 'inst-3']))
+ self.assertTrue('inst-1' in image_cache_manager.instance_names)
+ self.assertTrue('123' in image_cache_manager.instance_names)
+
self.assertEqual(len(image_cache_manager.image_popularity), 2)
self.assertEqual(image_cache_manager.image_popularity['1'], 1)
self.assertEqual(image_cache_manager.image_popularity['2'], 2)
@@ -200,7 +202,7 @@ class ImageCacheManagerTestCase(test.TestCase):
self.assertTrue(image_cache_manager.used_images['1'] ==
(1, 0, ['inst-1']))
self.assertTrue(image_cache_manager.instance_names ==
- set(['inst-1', 'inst-1_resize']))
+ set(['inst-1', '123', 'inst-1_resize', '123_resize']))
self.assertEqual(len(image_cache_manager.image_popularity), 1)
self.assertEqual(image_cache_manager.image_popularity['1'], 1)
@@ -338,10 +340,10 @@ class ImageCacheManagerTestCase(test.TestCase):
@contextlib.contextmanager
def _intercept_log_messages(self):
try:
- mylog = log.getLogger('nova')
+ mylog = logging.getLogger('nova')
stream = cStringIO.StringIO()
- handler = logging.StreamHandler(stream)
- handler.setFormatter(log.LegacyFormatter())
+ handler = logging.logging.StreamHandler(stream)
+ handler.setFormatter(logging.LegacyFormatter())
mylog.logger.addHandler(handler)
yield stream
finally:
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index fd90e5fa9..945d627f4 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -27,6 +27,7 @@ import shutil
import tempfile
from lxml import etree
+from oslo.config import cfg
from xml.dom import minidom
from nova.api.ec2 import cloud
@@ -38,11 +39,11 @@ from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import fileutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
+from nova.openstack.common import uuidutils
from nova import test
from nova.tests import fake_libvirt_utils
from nova.tests import fake_network
@@ -52,6 +53,7 @@ from nova import utils
from nova import version
from nova.virt.disk import api as disk
from nova.virt import driver
+from nova.virt import event as virtevent
from nova.virt import fake
from nova.virt import firewall as base_firewall
from nova.virt import images
@@ -61,7 +63,7 @@ from nova.virt.libvirt import driver as libvirt_driver
from nova.virt.libvirt import firewall
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import utils as libvirt_utils
-
+from nova.virt import netutils
try:
import libvirt
@@ -99,7 +101,8 @@ class FakeVirDomainSnapshot(object):
class FakeVirtDomain(object):
- def __init__(self, fake_xml=None):
+ def __init__(self, fake_xml=None, uuidstr=None):
+ self.uuidstr = uuidstr
if fake_xml:
self._fake_dom_xml = fake_xml
else:
@@ -131,11 +134,15 @@ class FakeVirtDomain(object):
def XMLDesc(self, *args):
return self._fake_dom_xml
+ def UUIDString(self):
+ return self.uuidstr
+
class CacheConcurrencyTestCase(test.TestCase):
def setUp(self):
super(CacheConcurrencyTestCase, self).setUp()
- self.flags(instances_path='nova.compute.manager')
+
+ self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
# utils.synchronized() will create the lock_path for us if it
# doesn't already exist. It will also delete it when it's done,
@@ -165,19 +172,18 @@ class CacheConcurrencyTestCase(test.TestCase):
fake_libvirt_utils))
def tearDown(self):
- # Make sure the lock_path for this test is cleaned up
- if os.path.exists(self.lock_path):
- shutil.rmtree(self.lock_path)
-
super(CacheConcurrencyTestCase, self).tearDown()
def test_same_fname_concurrency(self):
# Ensures that the same fname cache runs at a sequentially.
+ uuid = uuidutils.generate_uuid()
+
backend = imagebackend.Backend(False)
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
sig1 = eventlet.event.Event()
- thr1 = eventlet.spawn(backend.image({'name': 'instance'},
+ thr1 = eventlet.spawn(backend.image({'name': 'instance',
+ 'uuid': uuid},
'name').cache,
_concurrency, 'fname', None,
signal=sig1, wait=wait1, done=done1)
@@ -188,7 +194,8 @@ class CacheConcurrencyTestCase(test.TestCase):
wait2 = eventlet.event.Event()
done2 = eventlet.event.Event()
sig2 = eventlet.event.Event()
- thr2 = eventlet.spawn(backend.image({'name': 'instance'},
+ thr2 = eventlet.spawn(backend.image({'name': 'instance',
+ 'uuid': uuid},
'name').cache,
_concurrency, 'fname', None,
signal=sig2, wait=wait2, done=done2)
@@ -209,11 +216,14 @@ class CacheConcurrencyTestCase(test.TestCase):
def test_different_fname_concurrency(self):
# Ensures that two different fname caches are concurrent.
+ uuid = uuidutils.generate_uuid()
+
backend = imagebackend.Backend(False)
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
sig1 = eventlet.event.Event()
- thr1 = eventlet.spawn(backend.image({'name': 'instance'},
+ thr1 = eventlet.spawn(backend.image({'name': 'instance',
+ 'uuid': uuid},
'name').cache,
_concurrency, 'fname2', None,
signal=sig1, wait=wait1, done=done1)
@@ -224,7 +234,8 @@ class CacheConcurrencyTestCase(test.TestCase):
wait2 = eventlet.event.Event()
done2 = eventlet.event.Event()
sig2 = eventlet.event.Event()
- thr2 = eventlet.spawn(backend.image({'name': 'instance'},
+ thr2 = eventlet.spawn(backend.image({'name': 'instance',
+ 'uuid': uuid},
'name').cache,
_concurrency, 'fname1', None,
signal=sig2, wait=wait2, done=done2)
@@ -293,7 +304,8 @@ class LibvirtConnTestCase(test.TestCase):
'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'root_gb': 10,
'ephemeral_gb': 20,
- 'instance_type_id': '5'} # m1.small
+ 'instance_type_id': '5', # m1.small
+ 'extra_specs': {}}
def tearDown(self):
nova.tests.image.fake.FakeImageService_reset()
@@ -338,6 +350,8 @@ class LibvirtConnTestCase(test.TestCase):
initiator = 'fake.initiator.iqn'
ip = 'fakeip'
host = 'fakehost'
+ wwpns = ['100010604b019419']
+ wwnns = ['200010604b019419']
self.flags(my_ip=ip)
self.flags(host=host)
@@ -345,7 +359,9 @@ class LibvirtConnTestCase(test.TestCase):
expected = {
'ip': ip,
'initiator': initiator,
- 'host': host
+ 'host': host,
+ 'wwpns': wwpns,
+ 'wwnns': wwnns
}
volume = {
'id': 'fake'
@@ -1278,7 +1294,7 @@ class LibvirtConnTestCase(test.TestCase):
def convert_image(source, dest, out_format):
libvirt_driver.libvirt_utils.files[dest] = ''
- images.convert_image = convert_image
+ self.stubs.Set(images, 'convert_image', convert_image)
self.mox.ReplayAll()
@@ -1330,7 +1346,7 @@ class LibvirtConnTestCase(test.TestCase):
def convert_image(source, dest, out_format):
libvirt_driver.libvirt_utils.files[dest] = ''
- images.convert_image = convert_image
+ self.stubs.Set(images, 'convert_image', convert_image)
self.mox.ReplayAll()
@@ -1652,10 +1668,7 @@ class LibvirtConnTestCase(test.TestCase):
tree = etree.fromstring(xml)
interfaces = tree.findall("./devices/interface")
self.assertEquals(len(interfaces), 2)
- parameters = interfaces[0].findall('./filterref/parameter')
self.assertEquals(interfaces[0].get('type'), 'bridge')
- self.assertEquals(parameters[0].get('name'), 'IP')
- self.assertTrue(_ipv4_like(parameters[0].get('value'), '192.168'))
def _check_xml_and_container(self, instance):
user_context = context.RequestContext(self.user_id,
@@ -1932,12 +1945,8 @@ class LibvirtConnTestCase(test.TestCase):
'type'), 'pty')
check_list.append(check)
- parameter = './devices/interface/filterref/parameter'
common_checks = [
(lambda t: t.find('.').tag, 'domain'),
- (lambda t: t.find(parameter).get('name'), 'IP'),
- (lambda t: _ipv4_like(t.find(parameter).get('value'), '192.168'),
- True),
(lambda t: t.find('./memory').text, '2097152')]
if rescue:
common_checks += [
@@ -1978,6 +1987,14 @@ class LibvirtConnTestCase(test.TestCase):
'%s != %s failed common check %d' %
(check(tree), expected_result, i))
+ filterref = './devices/interface/filterref'
+ (network, mapping) = network_info[0]
+ nic_id = mapping['mac'].replace(':', '')
+ fw = firewall.NWFilterFirewall(fake.FakeVirtAPI(), conn)
+ instance_filter_name = fw._instance_filter_name(instance_ref,
+ nic_id)
+ self.assertEqual(tree.find(filterref).get('filter'),
+ instance_filter_name)
# This test is supposed to make sure we don't
# override a specifically set uri
#
@@ -2352,8 +2369,8 @@ class LibvirtConnTestCase(test.TestCase):
ret = conn.pre_live_migration(c, inst_ref, vol, nw_info,
migrate_data)
self.assertEqual(ret, None)
- self.assertTrue(os.path.exists('%s/%s/' %
- (tmpdir, inst_ref['name'])))
+ self.assertTrue(os.path.exists('%s/%s/' % (tmpdir,
+ inst_ref['uuid'])))
db.instance_destroy(self.context, inst_ref['uuid'])
def test_pre_block_migration_works_correctly(self):
@@ -2389,7 +2406,7 @@ class LibvirtConnTestCase(test.TestCase):
dummyjson)
self.assertTrue(os.path.exists('%s/%s/' %
- (tmpdir, instance_ref['name'])))
+ (tmpdir, instance_ref['uuid'])))
db.instance_destroy(self.context, instance_ref['uuid'])
@@ -2863,21 +2880,29 @@ class LibvirtConnTestCase(test.TestCase):
# NOTE(vish): verifies destroy doesn't raise if the instance disappears
conn._destroy(instance)
- def test_available_least_handles_missing(self):
+ def test_disk_over_committed_size_total(self):
# Ensure destroy calls managedSaveRemove for saved instance.
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
def list_instances():
- return ['fake']
+ return ['fake1', 'fake2']
self.stubs.Set(conn, 'list_instances', list_instances)
+ fake_disks = {'fake1': [{'type': 'qcow2', 'path': '/somepath/disk1',
+ 'virt_disk_size': '10737418240',
+ 'backing_file': '/somepath/disk1',
+ 'disk_size':'83886080'}],
+ 'fake2': [{'type': 'raw', 'path': '/somepath/disk2',
+ 'virt_disk_size': '10737418240',
+ 'backing_file': '/somepath/disk2',
+ 'disk_size':'10737418240'}]}
+
def get_info(instance_name):
- raise exception.InstanceNotFound(instance_id='fake')
+ return jsonutils.dumps(fake_disks.get(instance_name))
self.stubs.Set(conn, 'get_instance_disk_info', get_info)
- result = conn.get_disk_available_least()
- space = fake_libvirt_utils.get_fs_info(CONF.instances_path)['free']
- self.assertEqual(result, space / 1024 ** 3)
+ result = conn.get_disk_over_committed_size_total()
+ self.assertEqual(result, 10653532160)
def test_cpu_info(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -3344,6 +3369,83 @@ class LibvirtConnTestCase(test.TestCase):
got = conn.get_instance_capabilities()
self.assertEqual(want, got)
+ def test_event_dispatch(self):
+ # Validate that the libvirt self-pipe for forwarding
+ # events between threads is working sanely
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ got_events = []
+
+ def handler(event):
+ got_events.append(event)
+
+ conn.register_event_listener(handler)
+
+ conn._init_events_pipe()
+
+ event1 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_STARTED)
+ event2 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_PAUSED)
+ conn._queue_event(event1)
+ conn._queue_event(event2)
+ conn._dispatch_events()
+
+ want_events = [event1, event2]
+ self.assertEqual(want_events, got_events)
+
+ event3 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_RESUMED)
+ event4 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_STOPPED)
+
+ conn._queue_event(event3)
+ conn._queue_event(event4)
+ conn._dispatch_events()
+
+ want_events = [event1, event2, event3, event4]
+ self.assertEqual(want_events, got_events)
+
+ def test_event_lifecycle(self):
+ # Validate that libvirt events are correctly translated
+ # to Nova events
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ got_events = []
+
+ def handler(event):
+ got_events.append(event)
+
+ conn.register_event_listener(handler)
+ conn._init_events_pipe()
+ fake_dom_xml = """
+ <domain type='kvm'>
+ <uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ </disk>
+ </devices>
+ </domain>
+ """
+ dom = FakeVirtDomain(fake_dom_xml,
+ "cef19ce0-0ca2-11df-855d-b19fbce37686")
+
+ conn._event_lifecycle_callback(conn._conn,
+ dom,
+ libvirt.VIR_DOMAIN_EVENT_STOPPED,
+ 0,
+ conn)
+ conn._dispatch_events()
+ self.assertEqual(len(got_events), 1)
+ self.assertEqual(type(got_events[0]), virtevent.LifecycleEvent)
+ self.assertEqual(got_events[0].uuid,
+ "cef19ce0-0ca2-11df-855d-b19fbce37686")
+ self.assertEqual(got_events[0].transition,
+ virtevent.EVENT_LIFECYCLE_STOPPED)
+
class HostStateTestCase(test.TestCase):
@@ -3366,11 +3468,8 @@ class HostStateTestCase(test.TestCase):
def get_cpu_info(self):
return HostStateTestCase.cpu_info
- def get_local_gb_total(self):
- return 100
-
- def get_local_gb_used(self):
- return 20
+ def get_local_gb_info(self):
+ return {'total': 100, 'used': 20, 'free': 80}
def get_memory_mb_total(self):
return 497
@@ -3430,9 +3529,10 @@ class NWFilterFakes:
def filterDefineXMLMock(self, xml):
class FakeNWFilterInternal:
- def __init__(self, parent, name):
+ def __init__(self, parent, name, xml):
self.name = name
self.parent = parent
+ self.xml = xml
def undefine(self):
del self.parent.filters[self.name]
@@ -3440,7 +3540,7 @@ class NWFilterFakes:
tree = etree.fromstring(xml)
name = tree.get('name')
if name not in self.filters:
- self.filters[name] = FakeNWFilterInternal(self, name)
+ self.filters[name] = FakeNWFilterInternal(self, name, xml)
return True
@@ -3963,6 +4063,67 @@ class NWFilterTestCase(test.TestCase):
db.instance_destroy(admin_ctxt, instance_ref['uuid'])
+ def test_nwfilter_parameters(self):
+ admin_ctxt = context.get_admin_context()
+
+ fakefilter = NWFilterFakes()
+ self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock
+ self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName
+
+ instance_ref = self._create_instance()
+ inst_id = instance_ref['id']
+ inst_uuid = instance_ref['uuid']
+
+ self.security_group = self.setup_and_return_security_group()
+
+ db.instance_add_security_group(self.context, inst_uuid,
+ self.security_group['id'])
+
+ instance = db.instance_get(self.context, inst_id)
+
+ network_info = _fake_network_info(self.stubs, 1)
+ self.fw.setup_basic_filtering(instance, network_info)
+
+ (network, mapping) = network_info[0]
+ nic_id = mapping['mac'].replace(':', '')
+ instance_filter_name = self.fw._instance_filter_name(instance, nic_id)
+ f = fakefilter.nwfilterLookupByName(instance_filter_name)
+ tree = etree.fromstring(f.xml)
+
+ for fref in tree.findall('filterref'):
+ parameters = fref.findall('./parameter')
+ for parameter in parameters:
+ if parameter.get('name') == 'IP':
+ self.assertTrue(_ipv4_like(parameter.get('value'),
+ '192.168'))
+ elif parameter.get('name') == 'DHCPSERVER':
+ dhcp_server = mapping['dhcp_server']
+ self.assertEqual(parameter.get('value'), dhcp_server)
+ elif parameter.get('name') == 'RASERVER':
+ ra_server = mapping.get('gateway_v6') + "/128"
+ self.assertEqual(parameter.get('value'), ra_server)
+ elif parameter.get('name') == 'PROJNET':
+ ipv4_cidr = network['cidr']
+ net, mask = netutils.get_net_and_mask(ipv4_cidr)
+ self.assertEqual(parameter.get('value'), net)
+ elif parameter.get('name') == 'PROJMASK':
+ ipv4_cidr = network['cidr']
+ net, mask = netutils.get_net_and_mask(ipv4_cidr)
+ self.assertEqual(parameter.get('value'), mask)
+ elif parameter.get('name') == 'PROJNET6':
+ ipv6_cidr = network['cidr_v6']
+ net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr)
+ self.assertEqual(parameter.get('value'), net)
+ elif parameter.get('name') == 'PROJMASK6':
+ ipv6_cidr = network['cidr_v6']
+ net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr)
+ self.assertEqual(parameter.get('value'), prefix)
+ else:
+ raise exception.InvalidParameterValue('unknown parameter '
+ 'in filter')
+
+ db.instance_destroy(admin_ctxt, instance_ref['uuid'])
+
class LibvirtUtilsTestCase(test.TestCase):
def test_get_iscsi_initiator(self):
@@ -4163,6 +4324,77 @@ disk size: 4.4M''', ''))
libvirt_utils.fetch_image(context, target, image_id,
user_id, project_id)
+ def test_fetch_raw_image(self):
+
+ def fake_execute(*cmd, **kwargs):
+ self.executes.append(cmd)
+ return None, None
+
+ def fake_rename(old, new):
+ self.executes.append(('mv', old, new))
+
+ def fake_unlink(path):
+ self.executes.append(('rm', path))
+
+ def fake_rm_on_errror(path):
+ self.executes.append(('rm', '-f', path))
+
+ def fake_qemu_img_info(path):
+ class FakeImgInfo(object):
+ pass
+
+ file_format = path.split('.')[-1]
+ if file_format == 'part':
+ file_format = path.split('.')[-2]
+ elif file_format == 'converted':
+ file_format = 'raw'
+ if 'backing' in path:
+ backing_file = 'backing'
+ else:
+ backing_file = None
+
+ FakeImgInfo.file_format = file_format
+ FakeImgInfo.backing_file = backing_file
+
+ return FakeImgInfo()
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+ self.stubs.Set(os, 'rename', fake_rename)
+ self.stubs.Set(os, 'unlink', fake_unlink)
+ self.stubs.Set(images, 'fetch', lambda *_: None)
+ self.stubs.Set(images, 'qemu_img_info', fake_qemu_img_info)
+ self.stubs.Set(utils, 'delete_if_exists', fake_rm_on_errror)
+
+ context = 'opaque context'
+ image_id = '4'
+ user_id = 'fake'
+ project_id = 'fake'
+
+ target = 't.qcow2'
+ self.executes = []
+ expected_commands = [('qemu-img', 'convert', '-O', 'raw',
+ 't.qcow2.part', 't.qcow2.converted'),
+ ('rm', 't.qcow2.part'),
+ ('mv', 't.qcow2.converted', 't.qcow2')]
+ images.fetch_to_raw(context, image_id, target, user_id, project_id)
+ self.assertEqual(self.executes, expected_commands)
+
+ target = 't.raw'
+ self.executes = []
+ expected_commands = [('mv', 't.raw.part', 't.raw')]
+ images.fetch_to_raw(context, image_id, target, user_id, project_id)
+ self.assertEqual(self.executes, expected_commands)
+
+ target = 'backing.qcow2'
+ self.executes = []
+ expected_commands = [('rm', '-f', 'backing.qcow2.part')]
+ self.assertRaises(exception.ImageUnacceptable,
+ images.fetch_to_raw,
+ context, image_id, target, user_id, project_id)
+ self.assertEqual(self.executes, expected_commands)
+
+ del self.executes
+
def test_get_disk_backing_file(self):
with_actual_path = False
@@ -4266,11 +4498,11 @@ class LibvirtDriverTestCase(test.TestCase):
disk_info = [{'type': 'qcow2', 'path': '/test/disk',
'virt_disk_size': '10737418240',
'backing_file': '/base/disk',
- 'disk_size':'83886080'},
+ 'disk_size': '83886080'},
{'type': 'raw', 'path': '/test/disk.local',
'virt_disk_size': '10737418240',
'backing_file': '/base/disk.local',
- 'disk_size':'83886080'}]
+ 'disk_size': '83886080'}]
disk_info_text = jsonutils.dumps(disk_info)
def fake_get_instance_disk_info(instance, xml=None):
diff --git a/nova/tests/test_libvirt_blockinfo.py b/nova/tests/test_libvirt_blockinfo.py
index 68f1fa394..5560e63fd 100644
--- a/nova/tests/test_libvirt_blockinfo.py
+++ b/nova/tests/test_libvirt_blockinfo.py
@@ -178,6 +178,21 @@ class LibvirtBlockInfoTest(test.TestCase):
}
self.assertEqual(mapping, expect)
+ def test_get_disk_mapping_lxc(self):
+ # A simple disk mapping setup, but for lxc
+
+ user_context = context.RequestContext(self.user_id, self.project_id)
+ instance_ref = db.instance_create(user_context, self.test_instance)
+
+ mapping = blockinfo.get_disk_mapping("lxc", instance_ref,
+ "lxc", "lxc",
+ None)
+ expect = {
+ 'disk': {'bus': 'lxc', 'dev': None, 'type': 'disk'},
+ 'root': {'bus': 'lxc', 'dev': None, 'type': 'disk'}
+ }
+ self.assertEqual(mapping, expect)
+
def test_get_disk_mapping_simple_iso(self):
# A simple disk mapping setup, but with a ISO for root device
diff --git a/nova/tests/test_libvirt_config.py b/nova/tests/test_libvirt_config.py
index 56719de11..f98e6dd51 100644
--- a/nova/tests/test_libvirt_config.py
+++ b/nova/tests/test_libvirt_config.py
@@ -473,6 +473,34 @@ class LibvirtConfigGuestDiskTest(LibvirtConfigBaseTest):
<target bus="virtio" dev="/dev/vda"/>
</disk>""")
+ def test_config_iotune(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.source_type = "file"
+ obj.source_path = "/tmp/hello"
+ obj.target_dev = "/dev/hda"
+ obj.target_bus = "ide"
+ obj.disk_read_bytes_sec = 1024000
+ obj.disk_read_iops_sec = 1000
+ obj.disk_total_bytes_sec = 2048000
+ obj.disk_write_bytes_sec = 1024000
+ obj.disk_write_iops_sec = 1000
+ obj.disk_total_iops_sec = 2000
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <disk type="file" device="disk">
+ <source file="/tmp/hello"/>
+ <target bus="ide" dev="/dev/hda"/>
+ <iotune>
+ <read_bytes_sec>1024000</read_bytes_sec>
+ <read_iops_sec>1000</read_iops_sec>
+ <write_bytes_sec>1024000</write_bytes_sec>
+ <write_iops_sec>1000</write_iops_sec>
+ <total_bytes_sec>2048000</total_bytes_sec>
+ <total_iops_sec>2000</total_iops_sec>
+ </iotune>
+ </disk>""")
+
class LibvirtConfigGuestFilesysTest(LibvirtConfigBaseTest):
@@ -570,6 +598,12 @@ class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest):
obj.model = "virtio"
obj.target_dev = "vnet0"
obj.driver_name = "vhost"
+ obj.vif_inbound_average = 1024000
+ obj.vif_inbound_peak = 10240000
+ obj.vif_inbound_burst = 1024000
+ obj.vif_outbound_average = 1024000
+ obj.vif_outbound_peak = 10240000
+ obj.vif_outbound_burst = 1024000
xml = obj.to_xml()
self.assertXmlEqual(xml, """
@@ -578,6 +612,10 @@ class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest):
<model type="virtio"/>
<driver name="vhost"/>
<target dev="vnet0"/>
+ <bandwidth>
+ <inbound average="1024000" peak="10240000" burst="1024000"/>
+ <outbound average="1024000" peak="10240000" burst="1024000"/>
+ </bandwidth>
</interface>""")
def test_config_bridge(self):
@@ -589,6 +627,12 @@ class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest):
obj.target_dev = "tap12345678"
obj.filtername = "clean-traffic"
obj.filterparams.append({"key": "IP", "value": "192.168.122.1"})
+ obj.vif_inbound_average = 1024000
+ obj.vif_inbound_peak = 10240000
+ obj.vif_inbound_burst = 1024000
+ obj.vif_outbound_average = 1024000
+ obj.vif_outbound_peak = 10240000
+ obj.vif_outbound_burst = 1024000
xml = obj.to_xml()
self.assertXmlEqual(xml, """
@@ -600,6 +644,10 @@ class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest):
<filterref filter="clean-traffic">
<parameter name="IP" value="192.168.122.1"/>
</filterref>
+ <bandwidth>
+ <inbound average="1024000" peak="10240000" burst="1024000"/>
+ <outbound average="1024000" peak="10240000" burst="1024000"/>
+ </bandwidth>
</interface>""")
def test_config_bridge_ovs(self):
@@ -776,6 +824,9 @@ class LibvirtConfigGuestTest(LibvirtConfigBaseTest):
obj.virt_type = "kvm"
obj.memory = 1024 * 1024 * 100
obj.vcpus = 2
+ obj.cpu_shares = 100
+ obj.cpu_quota = 50000
+ obj.cpu_period = 25000
obj.name = "demo"
obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
obj.os_type = "linux"
@@ -820,6 +871,11 @@ class LibvirtConfigGuestTest(LibvirtConfigBaseTest):
<acpi/>
<apic/>
</features>
+ <cputune>
+ <shares>100</shares>
+ <quota>50000</quota>
+ <period>25000</period>
+ </cputune>
<devices>
<disk type="file" device="disk">
<source file="/tmp/img"/>
diff --git a/nova/tests/test_libvirt_vif.py b/nova/tests/test_libvirt_vif.py
index 749fda33a..cb7943ea8 100644
--- a/nova/tests/test_libvirt_vif.py
+++ b/nova/tests/test_libvirt_vif.py
@@ -15,10 +15,10 @@
# under the License.
from lxml import etree
+from oslo.config import cfg
from nova import exception
from nova.network import model as network_model
-from nova.openstack.common import cfg
from nova import test
from nova.tests import fakelibvirt
from nova import utils
diff --git a/nova/tests/test_libvirt_volume.py b/nova/tests/test_libvirt_volume.py
index 0098215b2..28d0c8088 100644
--- a/nova/tests/test_libvirt_volume.py
+++ b/nova/tests/test_libvirt_volume.py
@@ -15,12 +15,18 @@
# License for the specific language governing permissions and limitations
# under the License.
+import fixtures
import os
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
+from nova import exception
+from nova.storage import linuxscsi
from nova import test
+from nova.tests import fake_libvirt_utils
from nova import utils
from nova.virt import fake
+from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt import volume
CONF = cfg.CONF
@@ -109,6 +115,7 @@ class LibvirtVolumeTestCase(test.TestCase):
libvirt_driver.disconnect_volume(connection_info, "vde")
expected_commands = [('iscsiadm', '-m', 'node', '-T', iqn,
'-p', location),
+ ('iscsiadm', '-m', 'session'),
('iscsiadm', '-m', 'node', '-T', iqn,
'-p', location, '--login'),
('iscsiadm', '-m', 'node', '-T', iqn,
@@ -147,6 +154,7 @@ class LibvirtVolumeTestCase(test.TestCase):
libvirt_driver.disconnect_volume(connection_info, "vde")
expected_commands = [('iscsiadm', '-m', 'node', '-T', iqn,
'-p', location),
+ ('iscsiadm', '-m', 'session'),
('iscsiadm', '-m', 'node', '-T', iqn,
'-p', location, '--login'),
('iscsiadm', '-m', 'node', '-T', iqn,
@@ -336,6 +344,51 @@ class LibvirtVolumeTestCase(test.TestCase):
self.assertEqual(tree.find('./auth/secret').get('uuid'), flags_uuid)
libvirt_driver.disconnect_volume(connection_info, "vde")
+ def test_libvirt_kvm_volume(self):
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ name = 'volume-00000001'
+ location = '10.0.2.15:3260'
+ iqn = 'iqn.2010-10.org.openstack:%s' % name
+ vol = {'id': 1, 'name': name}
+ connection_info = self.iscsi_connection(vol, location, iqn)
+ disk_info = {
+ "bus": "virtio",
+ "dev": "vde",
+ "type": "disk",
+ }
+ conf = libvirt_driver.connect_volume(connection_info, disk_info)
+ tree = conf.format_dom()
+ dev_str = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (location, iqn)
+ self.assertEqual(tree.get('type'), 'block')
+ self.assertEqual(tree.find('./source').get('dev'), dev_str)
+ libvirt_driver.disconnect_volume(connection_info, 'vde')
+
+ def test_libvirt_kvm_volume_with_multipath(self):
+ self.flags(libvirt_iscsi_use_multipath=True)
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+ devs = ['/dev/mapper/sda', '/dev/mapper/sdb']
+ self.stubs.Set(self.fake_conn, 'get_all_block_devices', lambda: devs)
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ name = 'volume-00000001'
+ location = '10.0.2.15:3260'
+ iqn = 'iqn.2010-10.org.openstack:%s' % name
+ vol = {'id': 1, 'name': name}
+ connection_info = self.iscsi_connection(vol, location, iqn)
+ mpdev_filepath = '/dev/mapper/foo'
+ connection_info['data']['device_path'] = mpdev_filepath
+ disk_info = {
+ "bus": "virtio",
+ "dev": "vde",
+ "type": "disk",
+ }
+ target_portals = ['fake_portal1', 'fake_portal2']
+ libvirt_driver._get_multipath_device_name = lambda x: mpdev_filepath
+ conf = libvirt_driver.connect_volume(connection_info, disk_info)
+ tree = conf.format_dom()
+ self.assertEqual(tree.find('./source').get('dev'), mpdev_filepath)
+ libvirt_driver.disconnect_volume(connection_info, 'vde')
+
def test_libvirt_nfs_driver(self):
# NOTE(vish) exists is to make driver assume connecting worked
mnt_base = '/mnt'
@@ -420,3 +473,118 @@ class LibvirtVolumeTestCase(test.TestCase):
('stat', export_mnt_base),
('mount', '-t', 'glusterfs', export_string, export_mnt_base)]
self.assertEqual(self.executes, expected_commands)
+
+ def fibrechan_connection(self, volume, location, wwn):
+ return {
+ 'driver_volume_type': 'fibrechan',
+ 'data': {
+ 'volume_id': volume['id'],
+ 'target_portal': location,
+ 'target_wwn': wwn,
+ 'target_lun': 1,
+ }
+ }
+
+ def test_libvirt_fibrechan_driver(self):
+ self.stubs.Set(libvirt_utils, 'get_fc_hbas',
+ fake_libvirt_utils.get_fc_hbas)
+ self.stubs.Set(libvirt_utils, 'get_fc_hbas_info',
+ fake_libvirt_utils.get_fc_hbas_info)
+ # NOTE(vish) exists is to make driver assume connecting worked
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+ self.stubs.Set(os.path, 'realpath', lambda x: '/dev/sdb')
+ libvirt_driver = volume.LibvirtFibreChannelVolumeDriver(self.fake_conn)
+ multipath_devname = '/dev/md-1'
+ devices = {"device": multipath_devname,
+ "devices": [{'device': '/dev/sdb',
+ 'address': '1:0:0:1',
+ 'host': 1, 'channel': 0,
+ 'id': 0, 'lun': 1}]}
+ self.stubs.Set(linuxscsi, 'find_multipath_device', lambda x: devices)
+ self.stubs.Set(linuxscsi, 'remove_device', lambda x: None)
+ location = '10.0.2.15:3260'
+ name = 'volume-00000001'
+ wwn = '1234567890123456'
+ vol = {'id': 1, 'name': name}
+ connection_info = self.fibrechan_connection(vol, location, wwn)
+ mount_device = "vde"
+ disk_info = {
+ "bus": "virtio",
+ "dev": mount_device,
+ "type": "disk"
+ }
+ conf = libvirt_driver.connect_volume(connection_info, disk_info)
+ tree = conf.format_dom()
+ dev_str = '/dev/disk/by-path/pci-0000:05:00.2-fc-0x%s-lun-1' % wwn
+ self.assertEqual(tree.get('type'), 'block')
+ self.assertEqual(tree.find('./source').get('dev'), multipath_devname)
+ connection_info["data"]["devices"] = devices["devices"]
+ libvirt_driver.disconnect_volume(connection_info, mount_device)
+ expected_commands = []
+ self.assertEqual(self.executes, expected_commands)
+
+ self.stubs.Set(libvirt_utils, 'get_fc_hbas',
+ lambda: [])
+ self.stubs.Set(libvirt_utils, 'get_fc_hbas_info',
+ lambda: [])
+ self.assertRaises(exception.NovaException,
+ libvirt_driver.connect_volume,
+ connection_info, disk_info)
+
+ self.stubs.Set(libvirt_utils, 'get_fc_hbas', lambda: [])
+ self.stubs.Set(libvirt_utils, 'get_fc_hbas_info', lambda: [])
+ self.assertRaises(exception.NovaException,
+ libvirt_driver.connect_volume,
+ connection_info, disk_info)
+
+ def test_libvirt_fibrechan_getpci_num(self):
+ libvirt_driver = volume.LibvirtFibreChannelVolumeDriver(self.fake_conn)
+ hba = {'device_path': "/sys/devices/pci0000:00/0000:00:03.0"
+ "/0000:05:00.3/host2/fc_host/host2"}
+ pci_num = libvirt_driver._get_pci_num(hba)
+ self.assertEqual("0000:05:00.3", pci_num)
+
+ hba = {'device_path': "/sys/devices/pci0000:00/0000:00:03.0"
+ "/0000:05:00.3/0000:06:00.6/host2/fc_host/host2"}
+ pci_num = libvirt_driver._get_pci_num(hba)
+ self.assertEqual("0000:06:00.6", pci_num)
+
+ def test_libvirt_scality_driver(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ TEST_MOUNT = os.path.join(tempdir, 'fake_mount')
+ TEST_CONFIG = os.path.join(tempdir, 'fake_config')
+ TEST_VOLDIR = 'volumes'
+ TEST_VOLNAME = 'volume_name'
+ TEST_CONN_INFO = {
+ 'data': {
+ 'sofs_path': os.path.join(TEST_VOLDIR, TEST_VOLNAME)
+ }
+ }
+ TEST_VOLPATH = os.path.join(TEST_MOUNT,
+ TEST_VOLDIR,
+ TEST_VOLNAME)
+ TEST_DISK_INFO = {
+ "bus": "virtio",
+ "dev": "vde",
+ "type": "disk",
+ }
+
+ open(TEST_CONFIG, "w+").close()
+ os.makedirs(os.path.join(TEST_MOUNT, 'sys'))
+
+ def _access_wrapper(path, flags):
+ if path == '/sbin/mount.sofs':
+ return True
+ else:
+ return os.access(path, flags)
+
+ self.stubs.Set(os, 'access', _access_wrapper)
+
+ self.flags(scality_sofs_config=TEST_CONFIG,
+ scality_sofs_mount_point=TEST_MOUNT)
+ driver = volume.LibvirtScalityVolumeDriver(self.fake_conn)
+ conf = driver.connect_volume(TEST_CONN_INFO, TEST_DISK_INFO)
+
+ tree = conf.format_dom()
+ self.assertEqual(tree.get('type'), 'file')
+ self.assertEqual(tree.find('./source').get('file'), TEST_VOLPATH)
diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py
index 95399e33d..3d01f1336 100644
--- a/nova/tests/test_metadata.py
+++ b/nova/tests/test_metadata.py
@@ -25,6 +25,7 @@ import hmac
import json
import re
+from oslo.config import cfg
import webob
from nova.api.metadata import base
@@ -36,7 +37,6 @@ from nova import db
from nova.db.sqlalchemy import api
from nova import exception
from nova.network import api as network_api
-from nova.openstack.common import cfg
from nova import test
from nova.tests import fake_network
diff --git a/nova/tests/test_migration_utils.py b/nova/tests/test_migration_utils.py
index 45b6d86d4..ddaaa2552 100644
--- a/nova/tests/test_migration_utils.py
+++ b/nova/tests/test_migration_utils.py
@@ -16,7 +16,9 @@
# under the License.
from migrate.changeset import UniqueConstraint
-from sqlalchemy import MetaData, Table, Column, Integer, BigInteger
+from sqlalchemy import Integer, BigInteger, DateTime, String
+from sqlalchemy import MetaData, Table, Column
+from sqlalchemy.sql import select
from nova.db.sqlalchemy import utils
from nova import exception
@@ -124,3 +126,102 @@ class TestMigrationUtils(test_migrations.BaseMigrationTestCase):
self.assertEqual(len(constraints), 0)
self.assertEqual(len(test_table.constraints), 1)
test_table.drop()
+
+ def _populate_db_for_drop_duplicate_entries(self, engine, meta,
+ table_name):
+ values = [
+ {'id': 11, 'a': 3, 'b': 10, 'c': 'abcdef'},
+ {'id': 12, 'a': 5, 'b': 10, 'c': 'abcdef'},
+ {'id': 13, 'a': 6, 'b': 10, 'c': 'abcdef'},
+ {'id': 14, 'a': 7, 'b': 10, 'c': 'abcdef'},
+ {'id': 21, 'a': 1, 'b': 20, 'c': 'aa'},
+ {'id': 31, 'a': 1, 'b': 20, 'c': 'bb'},
+ {'id': 41, 'a': 1, 'b': 30, 'c': 'aef'},
+ {'id': 42, 'a': 2, 'b': 30, 'c': 'aef'},
+ {'id': 43, 'a': 3, 'b': 30, 'c': 'aef'}
+ ]
+
+ test_table = Table(table_name, meta,
+ Column('id', Integer, primary_key=True,
+ nullable=False),
+ Column('a', Integer),
+ Column('b', Integer),
+ Column('c', String),
+ Column('deleted', Integer, default=0),
+ Column('deleted_at', DateTime),
+ Column('updated_at', DateTime))
+
+ test_table.create()
+ engine.execute(test_table.insert(), values)
+ return test_table, values
+
+ def test_drop_old_duplicate_entries_from_table(self):
+ table_name = "__test_tmp_table__"
+
+ for key, engine in self.engines.items():
+ meta = MetaData()
+ meta.bind = engine
+ test_table, values = self.\
+ _populate_db_for_drop_duplicate_entries(engine, meta,
+ table_name)
+
+ utils.drop_old_duplicate_entries_from_table(engine, table_name,
+ False, 'b', 'c')
+
+ uniq_values = set()
+ expected_ids = []
+ for value in sorted(values, key=lambda x: x['id'], reverse=True):
+ uniq_value = (('b', value['b']), ('c', value['c']))
+ if uniq_value in uniq_values:
+ continue
+ uniq_values.add(uniq_value)
+ expected_ids.append(value['id'])
+
+ real_ids = [row[0] for row in
+ engine.execute(select([test_table.c.id])).fetchall()]
+
+ self.assertEqual(len(real_ids), len(expected_ids))
+ for id_ in expected_ids:
+ self.assertTrue(id_ in real_ids)
+
+ def test_drop_old_duplicate_entries_from_table_soft_delete(self):
+ table_name = "__test_tmp_table__"
+
+ for key, engine in self.engines.items():
+ meta = MetaData()
+ meta.bind = engine
+ table, values = self.\
+ _populate_db_for_drop_duplicate_entries(engine, meta,
+ table_name)
+ utils.drop_old_duplicate_entries_from_table(engine, table_name,
+ True, 'b', 'c')
+ uniq_values = set()
+ expected_values = []
+ soft_deleted_values = []
+
+ for value in sorted(values, key=lambda x: x['id'], reverse=True):
+ uniq_value = (('b', value['b']), ('c', value['c']))
+ if uniq_value in uniq_values:
+ soft_deleted_values.append(value)
+ continue
+ uniq_values.add(uniq_value)
+ expected_values.append(value)
+
+ base_select = table.select()
+
+ rows_select = base_select.\
+ where(table.c.deleted != table.c.id)
+ row_ids = [row['id'] for row in
+ engine.execute(rows_select).fetchall()]
+ self.assertEqual(len(row_ids), len(expected_values))
+ for value in expected_values:
+ self.assertTrue(value['id'] in row_ids)
+
+ deleted_rows_select = base_select.\
+ where(table.c.deleted == table.c.id)
+ deleted_rows_ids = [row['id'] for row in
+ engine.execute(deleted_rows_select).fetchall()]
+ self.assertEqual(len(deleted_rows_ids),
+ len(values) - len(row_ids))
+ for value in soft_deleted_values:
+ self.assertTrue(value['id'] in deleted_rows_ids)
diff --git a/nova/tests/test_migrations.py b/nova/tests/test_migrations.py
index e71b97513..f800d2eb7 100644
--- a/nova/tests/test_migrations.py
+++ b/nova/tests/test_migrations.py
@@ -44,31 +44,40 @@ import collections
import commands
import ConfigParser
import datetime
+import netaddr
import os
import sqlalchemy
+import sqlalchemy.exc
import urlparse
+import uuid
from migrate.versioning import repository
-import nova.db.migration as migration
import nova.db.sqlalchemy.migrate_repo
-from nova.db.sqlalchemy.migration import versioning_api as migration_api
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import test
+import nova.virt.baremetal.db.sqlalchemy.migrate_repo
LOG = logging.getLogger(__name__)
def _get_connect_string(backend,
- user="openstack_citest",
- passwd="openstack_citest",
- database="openstack_citest"):
+ user=None,
+ passwd=None,
+ database=None):
"""
Try to get a connection with a very specific set of values, if we get
these then we'll run the tests, otherwise they are skipped
"""
+ if not user:
+ user = "openstack_citest"
+ if not passwd:
+ passwd = "openstack_citest"
+ if not database:
+ database = "openstack_citest"
+
if backend == "postgres":
backend = "postgresql+psycopg2"
elif backend == "mysql":
@@ -108,6 +117,13 @@ def _have_mysql():
return present.lower() in ('', 'true')
+def _have_postgresql():
+ present = os.environ.get('NOVA_TEST_POSTGRESQL_PRESENT')
+ if present is None:
+ return _is_backend_avail('postgres')
+ return present.lower() in ('', 'true')
+
+
def get_table(engine, name):
"""Returns an sqlalchemy table dynamically from db.
@@ -118,32 +134,66 @@ def get_table(engine, name):
return sqlalchemy.Table(name, metadata, autoload=True)
+def get_mysql_connection_info(conn_pieces):
+ database = conn_pieces.path.strip('/')
+ loc_pieces = conn_pieces.netloc.split('@')
+ host = loc_pieces[1]
+ auth_pieces = loc_pieces[0].split(':')
+ user = auth_pieces[0]
+ password = ""
+ if len(auth_pieces) > 1:
+ if auth_pieces[1].strip():
+ password = "-p\"%s\"" % auth_pieces[1]
+
+ return (user, password, database, host)
+
+
+def get_pgsql_connection_info(conn_pieces):
+ database = conn_pieces.path.strip('/')
+ loc_pieces = conn_pieces.netloc.split('@')
+ host = loc_pieces[1]
+
+ auth_pieces = loc_pieces[0].split(':')
+ user = auth_pieces[0]
+ password = ""
+ if len(auth_pieces) > 1:
+ password = auth_pieces[1].strip()
+
+ return (user, password, database, host)
+
+
class BaseMigrationTestCase(test.TestCase):
"""Base class fort testing migrations and migration utils."""
- DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
- 'test_migrations.conf')
- # Test machines can set the NOVA_TEST_MIGRATIONS_CONF variable
- # to override the location of the config file for migration testing
- CONFIG_FILE_PATH = os.environ.get('NOVA_TEST_MIGRATIONS_CONF',
- DEFAULT_CONFIG_FILE)
- MIGRATE_FILE = nova.db.sqlalchemy.migrate_repo.__file__
- REPOSITORY = repository.Repository(
- os.path.abspath(os.path.dirname(MIGRATE_FILE)))
+ def __init__(self, *args, **kwargs):
+ super(BaseMigrationTestCase, self).__init__(*args, **kwargs)
- def setUp(self):
- super(BaseMigrationTestCase, self).setUp()
+ self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
+ 'test_migrations.conf')
+ # Test machines can set the NOVA_TEST_MIGRATIONS_CONF variable
+ # to override the location of the config file for migration testing
+ self.CONFIG_FILE_PATH = os.environ.get('NOVA_TEST_MIGRATIONS_CONF',
+ self.DEFAULT_CONFIG_FILE)
+ self.MIGRATE_FILE = nova.db.sqlalchemy.migrate_repo.__file__
+ self.REPOSITORY = repository.Repository(
+ os.path.abspath(os.path.dirname(self.MIGRATE_FILE)))
+ self.INIT_VERSION = 0
self.snake_walk = False
self.test_databases = {}
+ self.migration = None
+ self.migration_api = None
+
+ def setUp(self):
+ super(BaseMigrationTestCase, self).setUp()
# Load test databases from the config file. Only do this
# once. No need to re-run this on each test...
- LOG.debug('config_path is %s' % BaseMigrationTestCase.CONFIG_FILE_PATH)
- if os.path.exists(BaseMigrationTestCase.CONFIG_FILE_PATH):
+ LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH)
+ if os.path.exists(self.CONFIG_FILE_PATH):
cp = ConfigParser.RawConfigParser()
try:
- cp.read(BaseMigrationTestCase.CONFIG_FILE_PATH)
+ cp.read(self.CONFIG_FILE_PATH)
defaults = cp.defaults()
for key, value in defaults.items():
self.test_databases[key] = value
@@ -190,34 +240,20 @@ class BaseMigrationTestCase(test.TestCase):
# We can execute the MySQL client to destroy and re-create
# the MYSQL database, which is easier and less error-prone
# than using SQLAlchemy to do this via MetaData...trust me.
- database = conn_pieces.path.strip('/')
- loc_pieces = conn_pieces.netloc.split('@')
- host = loc_pieces[1]
- auth_pieces = loc_pieces[0].split(':')
- user = auth_pieces[0]
- password = ""
- if len(auth_pieces) > 1:
- if auth_pieces[1].strip():
- password = "-p\"%s\"" % auth_pieces[1]
+ (user, password, database, host) = \
+ get_mysql_connection_info(conn_pieces)
sql = ("drop database if exists %(database)s; "
- "create database %(database)s;") % locals()
+ "create database %(database)s;") % locals()
cmd = ("mysql -u \"%(user)s\" %(password)s -h %(host)s "
"-e \"%(sql)s\"") % locals()
execute_cmd(cmd)
elif conn_string.startswith('postgresql'):
- database = conn_pieces.path.strip('/')
- loc_pieces = conn_pieces.netloc.split('@')
- host = loc_pieces[1]
-
- auth_pieces = loc_pieces[0].split(':')
- user = auth_pieces[0]
- password = ""
- if len(auth_pieces) > 1:
- password = auth_pieces[1].strip()
# note(krtaylor): File creation problems with tests in
# venv using .pgpass authentication, changed to
# PGPASSWORD environment variable which is no longer
# planned to be deprecated
+ (user, password, database, host) = \
+ get_pgsql_connection_info(conn_pieces)
os.environ['PGPASSWORD'] = password
os.environ['PGUSER'] = user
# note(boris-42): We must create and drop database, we can't
@@ -234,18 +270,6 @@ class BaseMigrationTestCase(test.TestCase):
os.unsetenv('PGPASSWORD')
os.unsetenv('PGUSER')
-
-class TestMigrations(BaseMigrationTestCase):
- """Test sqlalchemy-migrate migrations."""
-
- def test_walk_versions(self):
- """
- Walks all version scripts for each tested database, ensuring
- that there are no errors in the version scripts for each engine
- """
- for key, engine in self.engines.items():
- self._walk_versions(engine, self.snake_walk)
-
def test_mysql_connect_fail(self):
"""
Test that we can trigger a mysql connection failure and we fail
@@ -254,16 +278,18 @@ class TestMigrations(BaseMigrationTestCase):
if _is_backend_avail('mysql', user="openstack_cifail"):
self.fail("Shouldn't have connected")
- def test_mysql_opportunistically(self):
+ def _test_mysql_opportunistically(self, database=None):
# Test that table creation on mysql only builds InnoDB tables
- if not _is_backend_avail('mysql'):
+ if not _have_mysql():
self.skipTest("mysql not available")
# add this to the global lists to make reset work with it, it's removed
# automatically in tearDown so no need to clean it up here.
- connect_string = _get_connect_string("mysql")
+ connect_string = _get_connect_string("mysql", database=database)
+ (user, password, database, host) = \
+ get_mysql_connection_info(urlparse.urlparse(connect_string))
engine = sqlalchemy.create_engine(connect_string)
- self.engines["mysqlcitest"] = engine
- self.test_databases["mysqlcitest"] = connect_string
+ self.engines[database] = engine
+ self.test_databases[database] = connect_string
# build a fully populated mysql database with all the tables
self._reset_databases()
@@ -273,14 +299,16 @@ class TestMigrations(BaseMigrationTestCase):
# sanity check
total = connection.execute("SELECT count(*) "
"from information_schema.TABLES "
- "where TABLE_SCHEMA='openstack_citest'")
+ "where TABLE_SCHEMA='%(database)s'" %
+ locals())
self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?")
noninnodb = connection.execute("SELECT count(*) "
"from information_schema.TABLES "
- "where TABLE_SCHEMA='openstack_citest' "
+ "where TABLE_SCHEMA='%(database)s' "
"and ENGINE!='InnoDB' "
- "and TABLE_NAME!='migrate_version'")
+ "and TABLE_NAME!='migrate_version'" %
+ locals())
count = noninnodb.scalar()
self.assertEqual(count, 0, "%d non InnoDB tables created" % count)
connection.close()
@@ -293,16 +321,18 @@ class TestMigrations(BaseMigrationTestCase):
if _is_backend_avail('postgresql', user="openstack_cifail"):
self.fail("Shouldn't have connected")
- def test_postgresql_opportunistically(self):
+ def _test_postgresql_opportunistically(self, database=None):
# Test postgresql database migration walk
- if not _is_backend_avail('postgres'):
+ if not _have_postgresql():
self.skipTest("postgresql not available")
# add this to the global lists to make reset work with it, it's removed
# automatically in tearDown so no need to clean it up here.
- connect_string = _get_connect_string("postgres")
+ connect_string = _get_connect_string("postgres", database=database)
engine = sqlalchemy.create_engine(connect_string)
- self.engines["postgresqlcitest"] = engine
- self.test_databases["postgresqlcitest"] = connect_string
+ (user, password, database, host) = \
+ get_mysql_connection_info(urlparse.urlparse(connect_string))
+ self.engines[database] = engine
+ self.test_databases[database] = connect_string
# build a fully populated postgresql database with all the tables
self._reset_databases()
@@ -315,19 +345,21 @@ class TestMigrations(BaseMigrationTestCase):
# upgrades successfully.
# Place the database under version control
- migration_api.version_control(engine, TestMigrations.REPOSITORY,
- migration.INIT_VERSION)
- self.assertEqual(migration.INIT_VERSION,
- migration_api.db_version(engine,
- TestMigrations.REPOSITORY))
+ self.migration_api.version_control(engine,
+ self.REPOSITORY,
+ self.INIT_VERSION)
+ self.assertEqual(self.INIT_VERSION,
+ self.migration_api.db_version(engine,
+ self.REPOSITORY))
- migration_api.upgrade(engine, TestMigrations.REPOSITORY,
- migration.INIT_VERSION + 1)
+ self.migration_api.upgrade(engine,
+ self.REPOSITORY,
+ self.INIT_VERSION + 1)
- LOG.debug('latest version is %s' % TestMigrations.REPOSITORY.latest)
+ LOG.debug('latest version is %s' % self.REPOSITORY.latest)
- for version in xrange(migration.INIT_VERSION + 2,
- TestMigrations.REPOSITORY.latest + 1):
+ for version in xrange(self.INIT_VERSION + 2,
+ self.REPOSITORY.latest + 1):
# upgrade -> downgrade -> upgrade
self._migrate_up(engine, version, with_data=True)
if snake_walk:
@@ -338,8 +370,8 @@ class TestMigrations(BaseMigrationTestCase):
# Now walk it back down to 0 from the latest, testing
# the downgrade paths.
for version in reversed(
- xrange(migration.INIT_VERSION + 2,
- TestMigrations.REPOSITORY.latest + 1)):
+ xrange(self.INIT_VERSION + 2,
+ self.REPOSITORY.latest + 1)):
# downgrade -> upgrade -> downgrade
self._migrate_down(engine, version)
if snake_walk:
@@ -347,12 +379,12 @@ class TestMigrations(BaseMigrationTestCase):
self._migrate_down(engine, version)
def _migrate_down(self, engine, version):
- migration_api.downgrade(engine,
- TestMigrations.REPOSITORY,
+ self.migration_api.downgrade(engine,
+ self.REPOSITORY,
version)
self.assertEqual(version,
- migration_api.db_version(engine,
- TestMigrations.REPOSITORY))
+ self.migration_api.db_version(engine,
+ self.REPOSITORY))
def _migrate_up(self, engine, version, with_data=False):
"""migrate up to a new version of the db.
@@ -370,13 +402,13 @@ class TestMigrations(BaseMigrationTestCase):
if prerun:
data = prerun(engine)
- migration_api.upgrade(engine,
- TestMigrations.REPOSITORY,
- version)
+ self.migration_api.upgrade(engine,
+ self.REPOSITORY,
+ version)
self.assertEqual(
version,
- migration_api.db_version(engine,
- TestMigrations.REPOSITORY))
+ self.migration_api.db_version(engine,
+ self.REPOSITORY))
if with_data:
check = getattr(self, "_check_%d" % version, None)
@@ -387,6 +419,50 @@ class TestMigrations(BaseMigrationTestCase):
(version, engine))
raise
+
+class TestNovaMigrations(BaseMigrationTestCase):
+ """Test sqlalchemy-migrate migrations."""
+
+ def __init__(self, *args, **kwargs):
+ super(TestNovaMigrations, self).__init__(*args, **kwargs)
+
+ self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
+ 'test_migrations.conf')
+ # Test machines can set the NOVA_TEST_MIGRATIONS_CONF variable
+ # to override the location of the config file for migration testing
+ self.CONFIG_FILE_PATH = os.environ.get('NOVA_TEST_MIGRATIONS_CONF',
+ self.DEFAULT_CONFIG_FILE)
+ self.MIGRATE_FILE = nova.db.sqlalchemy.migrate_repo.__file__
+ self.REPOSITORY = repository.Repository(
+ os.path.abspath(os.path.dirname(self.MIGRATE_FILE)))
+
+ def setUp(self):
+ super(TestNovaMigrations, self).setUp()
+
+ if self.migration is None:
+ self.migration = __import__('nova.db.migration',
+ globals(), locals(), ['INIT_VERSION'], -1)
+ self.INIT_VERSION = self.migration.INIT_VERSION
+ if self.migration_api is None:
+ temp = __import__('nova.db.sqlalchemy.migration',
+ globals(), locals(), ['versioning_api'], -1)
+ self.migration_api = temp.versioning_api
+
+ def tearDown(self):
+ super(TestNovaMigrations, self).tearDown()
+
+ def test_walk_versions(self):
+ for key, engine in self.engines.items():
+ self._walk_versions(engine, self.snake_walk)
+
+ def test_mysql_opportunistically(self):
+ self._test_mysql_opportunistically(
+ database='openstack_citest')
+
+ def test_postgresql_opportunistically(self):
+ self._test_postgresql_opportunistically(
+ database='openstack_citest')
+
def _prerun_134(self, engine):
now = timeutils.utcnow()
data = [{
@@ -440,6 +516,24 @@ class TestMigrations(BaseMigrationTestCase):
self.assertEqual(data[0]['mac'], bw['mac'])
+ # migration 141, update migrations instance uuid
+ def _prerun_141(self, engine):
+ data = {
+ 'instance_uuid': str(uuid.uuid4())
+ }
+ migrations = get_table(engine, 'migrations')
+ engine.execute(migrations.insert(), data)
+ result = migrations.insert().values(data).execute()
+ data['id'] = result.inserted_primary_key[0]
+ return data
+
+ def _check_141(self, engine, data):
+ migrations = get_table(engine, 'migrations')
+ row = migrations.select(
+ migrations.c.id == data['id']).execute().first()
+ # Check that change to String(36) went alright
+ self.assertEqual(data['instance_uuid'], row['instance_uuid'])
+
# migration 146, availability zone transition
def _prerun_146(self, engine):
data = {
@@ -507,27 +601,60 @@ class TestMigrations(BaseMigrationTestCase):
# migration 149, changes IPAddr storage format
def _prerun_149(self, engine):
provider_fw_rules = get_table(engine, 'provider_fw_rules')
- data = [
- {'protocol': 'tcp', 'from_port': 1234,
- 'to_port': 1234, 'cidr': "127.0.0.1"},
- {'protocol': 'tcp', 'from_port': 1234,
- 'to_port': 1234, 'cidr': "255.255.255.255"},
- {'protocol': 'tcp', 'from_port': 1234,
- 'to_port': 1234, 'cidr': "2001:db8::1:2"},
- {'protocol': 'tcp', 'from_port': 1234,
- 'to_port': 1234, 'cidr': "::1"}
- ]
- engine.execute(provider_fw_rules.insert(), data)
+ console_pools = get_table(engine, 'console_pools')
+ data = {
+ 'provider_fw_rules':
+ [
+ {'protocol': 'tcp', 'from_port': 1234,
+ 'to_port': 1234, 'cidr': "127.0.0.1/30"},
+ {'protocol': 'tcp', 'from_port': 1234,
+ 'to_port': 1234, 'cidr': "128.128.128.128/16"},
+ {'protocol': 'tcp', 'from_port': 1234,
+ 'to_port': 1234, 'cidr': "128.128.128.128/32"},
+ {'protocol': 'tcp', 'from_port': 1234,
+ 'to_port': 1234, 'cidr': "2001:db8::1:2/48"},
+ {'protocol': 'tcp', 'from_port': 1234,
+ 'to_port': 1234, 'cidr': "::1/64"},
+ {'protocol': 'tcp', 'from_port': 1234, 'to_port': 1234,
+ 'cidr': "0000:0000:0000:2013:0000:6535:abcd:ef11/64"},
+ {'protocol': 'tcp', 'from_port': 1234, 'to_port': 1234,
+ 'cidr': "0000:1020:0000:2013:0000:6535:abcd:ef11/128"},
+ ],
+ 'console_pools':
+ [
+ {'address': '10.10.10.10'},
+ {'address': '128.100.100.100'},
+ {'address': '2002:2002:2002:2002:2002:2002:2002:2002'},
+ {'address': '::1'},
+ {'address': '0000:0000:0000:2013:0000:6535:abcd:ef11'}
+ ]
+ }
+
+ engine.execute(provider_fw_rules.insert(), data['provider_fw_rules'])
+
+ for pool in data['console_pools']:
+ engine.execute(console_pools.insert(), pool)
+
return data
def _check_149(self, engine, data):
provider_fw_rules = get_table(engine, 'provider_fw_rules')
result = provider_fw_rules.select().execute()
- iplist = map(lambda x: x['cidr'], data)
+ iplist = map(lambda x: str(netaddr.IPNetwork(x['cidr'])),
+ data['provider_fw_rules'])
+
+ for row in result:
+ self.assertIn(str(netaddr.IPNetwork(row['cidr'])), iplist)
+
+ console_pools = get_table(engine, 'console_pools')
+ result = console_pools.select().execute()
+
+ iplist = map(lambda x: str(netaddr.IPAddress(x['address'])),
+ data['console_pools'])
for row in result:
- self.assertIn(row['cidr'], iplist)
+ self.assertIn(str(netaddr.IPAddress(row['address'])), iplist)
# migration 151 - changes period_beginning and period_ending to DateTime
def _prerun_151(self, engine):
@@ -703,3 +830,116 @@ class TestMigrations(BaseMigrationTestCase):
# override __eq__, but if we stringify them then they do.
self.assertEqual(str(base_column.type),
str(shadow_column.type))
+
+ # migration 156 - introduce CIDR type
+ def _prerun_156(self, engine):
+ # assume the same data as from 149
+ data = {
+ 'provider_fw_rules':
+ [
+ {'protocol': 'tcp', 'from_port': 1234,
+ 'to_port': 1234, 'cidr': "127.0.0.1/30"},
+ {'protocol': 'tcp', 'from_port': 1234,
+ 'to_port': 1234, 'cidr': "128.128.128.128/16"},
+ {'protocol': 'tcp', 'from_port': 1234,
+ 'to_port': 1234, 'cidr': "128.128.128.128/32"},
+ {'protocol': 'tcp', 'from_port': 1234,
+ 'to_port': 1234, 'cidr': "2001:db8::1:2/48"},
+ {'protocol': 'tcp', 'from_port': 1234,
+ 'to_port': 1234, 'cidr': "::1/64"},
+ {'protocol': 'tcp', 'from_port': 1234, 'to_port': 1234,
+ 'cidr': "0000:0000:0000:2013:0000:6535:abcd:ef11/64"},
+ {'protocol': 'tcp', 'from_port': 1234, 'to_port': 1234,
+ 'cidr': "0000:1020:0000:2013:0000:6535:abcd:ef11/128"},
+ ],
+ 'console_pools':
+ [
+ {'address': '10.10.10.10'},
+ {'address': '128.100.100.100'},
+ {'address': '2002:2002:2002:2002:2002:2002:2002:2002'},
+ {'address': '::1'},
+ {'address': '0000:0000:0000:2013:0000:6535:abcd:ef11'}
+ ]
+ }
+ return data
+
+ def _check_156(self, engine, data):
+ # recheck the 149 data
+ self._check_149(engine, data)
+
+ def _prerun_158(self, engine):
+ networks = get_table(engine, 'networks')
+ data = [
+ {'vlan': 1, 'deleted': 0},
+ {'vlan': 1, 'deleted': 0},
+ {'vlan': 1, 'deleted': 0},
+ ]
+
+ for item in data:
+ networks.insert().values(item).execute()
+ return data
+
+ def _check_158(self, engine, data):
+ networks = get_table(engine, 'networks')
+ rows = networks.select().\
+ where(networks.c.deleted != networks.c.id).\
+ execute().\
+ fetchall()
+ self.assertEqual(len(rows), 1)
+
+
+class TestBaremetalMigrations(BaseMigrationTestCase):
+ """Test sqlalchemy-migrate migrations."""
+
+ def __init__(self, *args, **kwargs):
+ super(TestBaremetalMigrations, self).__init__(*args, **kwargs)
+
+ self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
+ 'test_baremetal_migrations.conf')
+ # Test machines can set the NOVA_TEST_MIGRATIONS_CONF variable
+ # to override the location of the config file for migration testing
+ self.CONFIG_FILE_PATH = os.environ.get(
+ 'BAREMETAL_TEST_MIGRATIONS_CONF',
+ self.DEFAULT_CONFIG_FILE)
+ self.MIGRATE_FILE = \
+ nova.virt.baremetal.db.sqlalchemy.migrate_repo.__file__
+ self.REPOSITORY = repository.Repository(
+ os.path.abspath(os.path.dirname(self.MIGRATE_FILE)))
+
+ def setUp(self):
+ super(TestBaremetalMigrations, self).setUp()
+
+ if self.migration is None:
+ self.migration = __import__('nova.virt.baremetal.db.migration',
+ globals(), locals(), ['INIT_VERSION'], -1)
+ self.INIT_VERSION = self.migration.INIT_VERSION
+ if self.migration_api is None:
+ temp = __import__('nova.virt.baremetal.db.sqlalchemy.migration',
+ globals(), locals(), ['versioning_api'], -1)
+ self.migration_api = temp.versioning_api
+
+ def tearDown(self):
+ super(TestBaremetalMigrations, self).tearDown()
+
+ def test_walk_versions(self):
+ for key, engine in self.engines.items():
+ self._walk_versions(engine, self.snake_walk)
+
+ def test_mysql_opportunistically(self):
+ self._test_mysql_opportunistically(
+ database='openstack_baremetal_citest')
+
+ def test_postgresql_opportunistically(self):
+ self._test_postgresql_opportunistically(
+ database='openstack_baremetal_citest')
+
+ def _prerun_002(self, engine):
+ data = [{'id': 1, 'key': 'fake-key', 'image_path': '/dev/null',
+ 'pxe_config_path': '/dev/null/', 'root_mb': 0, 'swap_mb': 0}]
+ table = get_table(engine, 'bm_deployments')
+ engine.execute(table.insert(), data)
+ return data
+
+ def _check_002(self, engine, data):
+ self.assertRaises(sqlalchemy.exc.NoSuchTableError,
+ get_table, engine, 'bm_deployments')
diff --git a/nova/tests/test_notifications.py b/nova/tests/test_notifications.py
index aec6c8f67..9b38756c8 100644
--- a/nova/tests/test_notifications.py
+++ b/nova/tests/test_notifications.py
@@ -19,6 +19,8 @@
import copy
+from oslo.config import cfg
+
from nova.compute import instance_types
from nova.compute import task_states
from nova.compute import vm_states
@@ -26,7 +28,6 @@ from nova import context
from nova import db
from nova.network import api as network_api
from nova import notifications
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier_api
from nova.openstack.common.notifier import test_notifier
diff --git a/nova/tests/test_objectstore.py b/nova/tests/test_objectstore.py
index 37d8c5d7d..0775bb66f 100644
--- a/nova/tests/test_objectstore.py
+++ b/nova/tests/test_objectstore.py
@@ -27,9 +27,9 @@ import tempfile
from boto import exception as boto_exception
from boto.s3 import connection as s3
+from oslo.config import cfg
from nova.objectstore import s3server
-from nova.openstack.common import cfg
from nova import test
from nova import wsgi
diff --git a/nova/tests/test_pipelib.py b/nova/tests/test_pipelib.py
index 5cd715552..4b76581a4 100644
--- a/nova/tests/test_pipelib.py
+++ b/nova/tests/test_pipelib.py
@@ -13,10 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo.config import cfg
+
from nova.cloudpipe import pipelib
from nova import context
from nova import crypto
-from nova.openstack.common import cfg
from nova import test
from nova import utils
diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py
index 08b33e201..1ea4eea21 100644
--- a/nova/tests/test_quota.py
+++ b/nova/tests/test_quota.py
@@ -18,6 +18,8 @@
import datetime
+from oslo.config import cfg
+
from nova import compute
from nova.compute import instance_types
from nova import context
@@ -25,7 +27,6 @@ from nova import db
from nova.db.sqlalchemy import api as sqa_api
from nova.db.sqlalchemy import models as sqa_models
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import rpc
from nova.openstack.common import timeutils
from nova import quota
diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py
index 71beed51e..efe84fbb1 100644
--- a/nova/tests/test_service.py
+++ b/nova/tests/test_service.py
@@ -20,19 +20,19 @@
Unit Tests for remote procedure calls using queue
"""
-import mox
import sys
+import mox
+from oslo.config import cfg
+
from nova import context
from nova import db
from nova import exception
from nova import manager
-from nova.openstack.common import cfg
from nova import service
from nova import test
from nova import wsgi
-
test_service_opts = [
cfg.StrOpt("fake_manager",
default="nova.tests.test_service.FakeManager",
diff --git a/nova/tests/test_test.py b/nova/tests/test_test.py
index 9e2d3560c..759b05e3b 100644
--- a/nova/tests/test_test.py
+++ b/nova/tests/test_test.py
@@ -18,7 +18,8 @@
"""Tests for the testing base code."""
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common import rpc
from nova import test
diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py
index 0aa2a310c..940ddf6ec 100644
--- a/nova/tests/test_utils.py
+++ b/nova/tests/test_utils.py
@@ -26,10 +26,10 @@ import tempfile
import mox
import netaddr
+from oslo.config import cfg
import nova
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import timeutils
from nova import test
from nova import utils
@@ -449,6 +449,39 @@ class GenericUtilsTestCase(test.TestCase):
self.assertEqual(fake_execute.uid, 2)
self.assertEqual(fake_execute.uid, os.getuid())
+ def test_safe_parse_xml(self):
+
+ normal_body = ("""
+ <?xml version="1.0" ?><foo>
+ <bar>
+ <v1>hey</v1>
+ <v2>there</v2>
+ </bar>
+ </foo>""").strip()
+
+ def killer_body():
+ return (("""<!DOCTYPE x [
+ <!ENTITY a "%(a)s">
+ <!ENTITY b "%(b)s">
+ <!ENTITY c "%(c)s">]>
+ <foo>
+ <bar>
+ <v1>%(d)s</v1>
+ </bar>
+ </foo>""") % {
+ 'a': 'A' * 10,
+ 'b': '&a;' * 10,
+ 'c': '&b;' * 10,
+ 'd': '&c;' * 9999,
+ }).strip()
+
+ dom = utils.safe_minidom_parse_string(normal_body)
+ self.assertEqual(normal_body, str(dom.toxml()))
+
+ self.assertRaises(ValueError,
+ utils.safe_minidom_parse_string,
+ killer_body())
+
def test_xhtml_escape(self):
self.assertEqual('&quot;foo&quot;', utils.xhtml_escape('"foo"'))
self.assertEqual('&apos;foo&apos;', utils.xhtml_escape("'foo'"))
@@ -491,6 +524,17 @@ class GenericUtilsTestCase(test.TestCase):
self.assertFalse(utils.is_valid_ipv6("foo"))
self.assertFalse(utils.is_valid_ipv6("127.0.0.1"))
+ def test_is_valid_ipv6_cidr(self):
+ self.assertTrue(utils.is_valid_ipv6_cidr("2600::/64"))
+ self.assertTrue(utils.is_valid_ipv6_cidr(
+ "abcd:ef01:2345:6789:abcd:ef01:192.168.254.254/48"))
+ self.assertTrue(utils.is_valid_ipv6_cidr(
+ "0000:0000:0000:0000:0000:0000:0000:0001/32"))
+ self.assertTrue(utils.is_valid_ipv6_cidr(
+ "0000:0000:0000:0000:0000:0000:0000:0001"))
+ self.assertFalse(utils.is_valid_ipv6_cidr("foo"))
+ self.assertFalse(utils.is_valid_ipv6_cidr("127.0.0.1"))
+
def test_get_shortened_ipv6(self):
self.assertEquals("abcd:ef01:2345:6789:abcd:ef01:c0a8:fefe",
utils.get_shortened_ipv6(
@@ -505,6 +549,18 @@ class GenericUtilsTestCase(test.TestCase):
self.assertRaises(netaddr.AddrFormatError, utils.get_shortened_ipv6,
"failure")
+ def test_get_shortened_ipv6_cidr(self):
+ self.assertEquals("2600::/64", utils.get_shortened_ipv6_cidr(
+ "2600:0000:0000:0000:0000:0000:0000:0000/64"))
+ self.assertEquals("2600::/64", utils.get_shortened_ipv6_cidr(
+ "2600::1/64"))
+ self.assertRaises(netaddr.AddrFormatError,
+ utils.get_shortened_ipv6_cidr,
+ "127.0.0.1")
+ self.assertRaises(netaddr.AddrFormatError,
+ utils.get_shortened_ipv6_cidr,
+ "failure")
+
class MonkeyPatchTestCase(test.TestCase):
"""Unit test for utils.monkey_patch()."""
diff --git a/nova/tests/test_versions.py b/nova/tests/test_versions.py
index 780e5ae12..1536af4c0 100644
--- a/nova/tests/test_versions.py
+++ b/nova/tests/test_versions.py
@@ -17,7 +17,8 @@
import __builtin__
import StringIO
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova import test
from nova import version
diff --git a/nova/tests/test_virt_drivers.py b/nova/tests/test_virt_drivers.py
index a94fdc3c5..f6e0bca38 100644
--- a/nova/tests/test_virt_drivers.py
+++ b/nova/tests/test_virt_drivers.py
@@ -28,6 +28,7 @@ from nova import test
from nova.tests import fake_libvirt_utils
from nova.tests.image import fake as fake_image
from nova.tests import utils as test_utils
+from nova.virt import event as virtevent
from nova.virt import fake
LOG = logging.getLogger(__name__)
@@ -184,11 +185,16 @@ class VirtDriverLoaderTestCase(_FakeDriverBackendTestCase, test.TestCase):
class _VirtDriverTestCase(_FakeDriverBackendTestCase):
def setUp(self):
super(_VirtDriverTestCase, self).setUp()
+
+ self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
self.connection = importutils.import_object(self.driver_module,
fake.FakeVirtAPI())
self.ctxt = test_utils.get_test_admin_context()
self.image_service = fake_image.FakeImageService()
+ def tearDown(self):
+ super(_VirtDriverTestCase, self).tearDown()
+
def _get_running_instance(self):
instance_ref = test_utils.get_test_instance()
network_info = test_utils.get_test_network_info()
@@ -544,6 +550,72 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
def test_remove_from_aggregate(self):
self.connection.remove_from_aggregate(self.ctxt, 'aggregate', 'host')
+ def test_events(self):
+ got_events = []
+
+ def handler(event):
+ got_events.append(event)
+
+ self.connection.register_event_listener(handler)
+
+ event1 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_STARTED)
+ event2 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_PAUSED)
+
+ self.connection.emit_event(event1)
+ self.connection.emit_event(event2)
+ want_events = [event1, event2]
+ self.assertEqual(want_events, got_events)
+
+ event3 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_RESUMED)
+ event4 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_STOPPED)
+
+ self.connection.emit_event(event3)
+ self.connection.emit_event(event4)
+
+ want_events = [event1, event2, event3, event4]
+ self.assertEqual(want_events, got_events)
+
+ def test_event_bad_object(self):
+ # Passing in something which does not inherit
+ # from virtevent.Event
+
+ def handler(event):
+ pass
+
+ self.connection.register_event_listener(handler)
+
+ badevent = {
+ "foo": "bar"
+ }
+
+ self.assertRaises(ValueError,
+ self.connection.emit_event,
+ badevent)
+
+ def test_event_bad_callback(self):
+ # Check that if a callback raises an exception,
+ # it does not propagate back out of the
+ # 'emit_event' call
+
+ def handler(event):
+ raise Exception("Hit Me!")
+
+ self.connection.register_event_listener(handler)
+
+ event1 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_STARTED)
+
+ self.connection.emit_event(event1)
+
class AbstractDriverTestCase(_VirtDriverTestCase, test.TestCase):
def setUp(self):
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 2a484d832..84c3caa63 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -23,6 +23,8 @@ import functools
import os
import re
+from oslo.config import cfg
+
from nova.compute import api as compute_api
from nova.compute import instance_types
from nova.compute import power_state
@@ -31,7 +33,6 @@ from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
diff --git a/nova/tests/utils.py b/nova/tests/utils.py
index 56be3ab71..9daa72f34 100644
--- a/nova/tests/utils.py
+++ b/nova/tests/utils.py
@@ -16,15 +16,15 @@
import platform
-import nova.context
-import nova.db
+from oslo.config import cfg
from nova.compute import instance_types
+import nova.context
+import nova.db
from nova import exception
from nova.image import glance
from nova.network import minidns
from nova.network import model as network_model
-from nova.openstack.common import cfg
CONF = cfg.CONF
CONF.import_opt('use_ipv6', 'nova.netconf')
@@ -83,7 +83,8 @@ def get_test_instance(context=None, instance_type=None):
'bridge': 'br101',
'image_ref': 'cedef40a-ed67-4d10-800e-17455edce175',
'instance_type_id': '5',
- 'system_metadata': metadata}
+ 'system_metadata': metadata,
+ 'extra_specs': {}}
instance_ref = nova.db.instance_create(context, test_instance)
return instance_ref
diff --git a/nova/tests/vmwareapi/db_fakes.py b/nova/tests/vmwareapi/db_fakes.py
index dd19f4929..f3191670d 100644
--- a/nova/tests/vmwareapi/db_fakes.py
+++ b/nova/tests/vmwareapi/db_fakes.py
@@ -81,18 +81,6 @@ def stub_out_db_instance_api(stubs):
}
return FakeModel(base_options)
- def fake_network_get_by_instance(context, instance_id):
- """Stubs out the db.network_get_by_instance method."""
-
- fields = {
- 'bridge': 'vmnet0',
- 'netmask': '255.255.255.0',
- 'gateway': '10.10.10.1',
- 'broadcast': '10.10.10.255',
- 'dns1': 'fake',
- 'vlan': 100}
- return FakeModel(fields)
-
def fake_instance_type_get_all(context, inactive=0, filters=None):
return INSTANCE_TYPES.values()
@@ -100,6 +88,5 @@ def stub_out_db_instance_api(stubs):
return INSTANCE_TYPES[name]
stubs.Set(db, 'instance_create', fake_instance_create)
- stubs.Set(db, 'network_get_by_instance', fake_network_get_by_instance)
stubs.Set(db, 'instance_type_get_all', fake_instance_type_get_all)
stubs.Set(db, 'instance_type_get_by_name', fake_instance_type_get_by_name)
diff --git a/nova/utils.py b/nova/utils.py
index 699544daa..2c7d0b427 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -36,6 +36,10 @@ import struct
import sys
import tempfile
import time
+from xml.dom import minidom
+from xml.parsers import expat
+from xml import sax
+from xml.sax import expatreader
from xml.sax import saxutils
from eventlet import event
@@ -43,8 +47,9 @@ from eventlet.green import subprocess
from eventlet import greenthread
import netaddr
+from oslo.config import cfg
+
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
@@ -652,6 +657,46 @@ class DynamicLoopingCall(LoopingCallBase):
return self.done
+class ProtectedExpatParser(expatreader.ExpatParser):
+ """An expat parser which disables DTD's and entities by default."""
+
+ def __init__(self, forbid_dtd=True, forbid_entities=True,
+ *args, **kwargs):
+ # Python 2.x old style class
+ expatreader.ExpatParser.__init__(self, *args, **kwargs)
+ self.forbid_dtd = forbid_dtd
+ self.forbid_entities = forbid_entities
+
+ def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
+ raise ValueError("Inline DTD forbidden")
+
+ def entity_decl(self, entityName, is_parameter_entity, value, base,
+ systemId, publicId, notationName):
+ raise ValueError("<!ENTITY> forbidden")
+
+ def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
+ # expat 1.2
+ raise ValueError("<!ENTITY> forbidden")
+
+ def reset(self):
+ expatreader.ExpatParser.reset(self)
+ if self.forbid_dtd:
+ self._parser.StartDoctypeDeclHandler = self.start_doctype_decl
+ if self.forbid_entities:
+ self._parser.EntityDeclHandler = self.entity_decl
+ self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
+
+
+def safe_minidom_parse_string(xml_string):
+ """Parse an XML string using minidom safely.
+
+ """
+ try:
+ return minidom.parseString(xml_string, parser=ProtectedExpatParser())
+ except sax.SAXParseException as se:
+ raise expat.ExpatError()
+
+
def xhtml_escape(value):
"""Escapes a string so it is valid within XML or XHTML.
@@ -896,11 +941,24 @@ def is_valid_ipv6(address):
return netaddr.valid_ipv6(address)
+def is_valid_ipv6_cidr(address):
+ try:
+ str(netaddr.IPNetwork(address, version=6).cidr)
+ return True
+ except Exception:
+ return False
+
+
def get_shortened_ipv6(address):
addr = netaddr.IPAddress(address, version=6)
return str(addr.ipv6())
+def get_shortened_ipv6_cidr(address):
+ net = netaddr.IPNetwork(address, version=6)
+ return str(net.cidr)
+
+
def is_valid_cidr(address):
"""Check if the provided ipv4 or ipv6 address is a valid
CIDR address or not"""
@@ -925,6 +983,15 @@ def is_valid_cidr(address):
return True
+def get_ip_version(network):
+ """Returns the IP version of a network (IPv4 or IPv6). Raises
+ AddrFormatError if invalid network."""
+ if netaddr.IPNetwork(network).version == 6:
+ return "IPv6"
+ elif netaddr.IPNetwork(network).version == 4:
+ return "IPv4"
+
+
def monkey_patch():
"""If the Flags.monkey_patch set as True,
this function patches a decorator
@@ -1363,7 +1430,7 @@ def check_string_length(value, name, min_length=0, max_length=None):
msg = _("%s is not a string or unicode") % name
raise exception.InvalidInput(message=msg)
- if len(value) < min_length:
+ if len(value) < min_length:
msg = _("%(name)s has less than %(min_length)s "
"characters.") % locals()
raise exception.InvalidInput(message=msg)
diff --git a/nova/version.py b/nova/version.py
index 722a20e16..d54c112be 100644
--- a/nova/version.py
+++ b/nova/version.py
@@ -31,7 +31,8 @@ def _load_config():
# this module
import ConfigParser
- from nova.openstack.common import cfg
+ from oslo.config import cfg
+
from nova.openstack.common import log as logging
global loaded, NOVA_VENDOR, NOVA_PRODUCT, NOVA_PACKAGE
diff --git a/nova/virt/baremetal/baremetal_states.py b/nova/virt/baremetal/baremetal_states.py
index 28a41ab47..115ed091c 100644
--- a/nova/virt/baremetal/baremetal_states.py
+++ b/nova/virt/baremetal/baremetal_states.py
@@ -26,7 +26,12 @@ health.
"""
+NULL = None
+INIT = 'initializing'
ACTIVE = 'active'
BUILDING = 'building'
+DEPLOYING = 'deploying'
+DEPLOYFAIL = 'deploy failed'
+DEPLOYDONE = 'deploy complete'
DELETED = 'deleted'
ERROR = 'error'
diff --git a/nova/virt/baremetal/db/api.py b/nova/virt/baremetal/db/api.py
index 002425333..91edc05d9 100644
--- a/nova/virt/baremetal/db/api.py
+++ b/nova/virt/baremetal/db/api.py
@@ -42,9 +42,9 @@ these objects be simple dictionaries.
"""
-from nova.openstack.common import cfg
-from nova import utils
+from oslo.config import cfg
+from nova import utils
# NOTE(deva): we can't move baremetal_db_backend into an OptGroup yet
# because utils.LazyPluggable doesn't support reading from
@@ -73,6 +73,16 @@ def bm_node_get_all(context, service_host=None):
service_host=service_host)
+def bm_node_get_associated(context, service_host=None):
+ return IMPL.bm_node_get_associated(context,
+ service_host=service_host)
+
+
+def bm_node_get_unassociated(context, service_host=None):
+ return IMPL.bm_node_get_unassociated(context,
+ service_host=service_host)
+
+
def bm_node_find_free(context, service_host=None,
memory_mb=None, cpus=None, local_gb=None):
return IMPL.bm_node_find_free(context,
@@ -91,6 +101,10 @@ def bm_node_get_by_instance_uuid(context, instance_uuid):
instance_uuid)
+def bm_node_get_by_node_uuid(context, node_uuid):
+ return IMPL.bm_node_get_by_node_uuid(context, node_uuid)
+
+
def bm_node_create(context, values):
return IMPL.bm_node_create(context, values)
@@ -103,8 +117,8 @@ def bm_node_update(context, bm_node_id, values):
return IMPL.bm_node_update(context, bm_node_id, values)
-def bm_node_set_uuid_safe(context, bm_node_id, uuid):
- return IMPL.bm_node_set_uuid_safe(context, bm_node_id, uuid)
+def bm_node_associate_and_update(context, node_uuid, values):
+ return IMPL.bm_node_associate_and_update(context, node_uuid, values)
def bm_pxe_ip_create(context, address, server_address):
@@ -170,17 +184,3 @@ def bm_interface_get_by_vif_uuid(context, vif_uuid):
def bm_interface_get_all_by_bm_node_id(context, bm_node_id):
return IMPL.bm_interface_get_all_by_bm_node_id(context, bm_node_id)
-
-
-def bm_deployment_create(context, key, image_path, pxe_config_path, root_mb,
- swap_mb):
- return IMPL.bm_deployment_create(context, key, image_path,
- pxe_config_path, root_mb, swap_mb)
-
-
-def bm_deployment_get(context, dep_id):
- return IMPL.bm_deployment_get(context, dep_id)
-
-
-def bm_deployment_destroy(context, dep_id):
- return IMPL.bm_deployment_destroy(context, dep_id)
diff --git a/nova/virt/baremetal/db/sqlalchemy/api.py b/nova/virt/baremetal/db/sqlalchemy/api.py
index 198c06256..bbfc17a35 100644
--- a/nova/virt/baremetal/db/sqlalchemy/api.py
+++ b/nova/virt/baremetal/db/sqlalchemy/api.py
@@ -20,9 +20,12 @@
"""Implementation of SQLAlchemy backend."""
+import uuid
+
from sqlalchemy.sql.expression import asc
from sqlalchemy.sql.expression import literal_column
+import nova.context
from nova.db.sqlalchemy import api as sqlalchemy_api
from nova import exception
from nova.openstack.common import log as logging
@@ -59,7 +62,7 @@ def model_query(context, *args, **kwargs):
raise Exception(
_("Unrecognized read_deleted value '%s'") % read_deleted)
- if project_only and sqlalchemy_api.is_user_context(context):
+ if project_only and nova.context.is_user_context(context):
query = query.filter_by(project_id=context.project_id)
return query
@@ -89,6 +92,24 @@ def bm_node_get_all(context, service_host=None):
@sqlalchemy_api.require_admin_context
+def bm_node_get_associated(context, service_host=None):
+ query = model_query(context, models.BareMetalNode, read_deleted="no").\
+ filter(models.BareMetalNode.instance_uuid != None)
+ if service_host:
+ query = query.filter_by(service_host=service_host)
+ return query.all()
+
+
+@sqlalchemy_api.require_admin_context
+def bm_node_get_unassociated(context, service_host=None):
+ query = model_query(context, models.BareMetalNode, read_deleted="no").\
+ filter(models.BareMetalNode.instance_uuid == None)
+ if service_host:
+ query = query.filter_by(service_host=service_host)
+ return query.all()
+
+
+@sqlalchemy_api.require_admin_context
def bm_node_find_free(context, service_host=None,
cpus=None, memory_mb=None, local_gb=None):
query = model_query(context, models.BareMetalNode, read_deleted="no")
@@ -114,7 +135,7 @@ def bm_node_get(context, bm_node_id):
first()
if not result:
- raise exception.InstanceNotFound(instance_id=bm_node_id)
+ raise exception.NodeNotFound(node_id=bm_node_id)
return result
@@ -135,7 +156,21 @@ def bm_node_get_by_instance_uuid(context, instance_uuid):
@sqlalchemy_api.require_admin_context
+def bm_node_get_by_node_uuid(context, bm_node_uuid):
+ result = model_query(context, models.BareMetalNode, read_deleted="no").\
+ filter_by(uuid=bm_node_uuid).\
+ first()
+
+ if not result:
+ raise exception.NodeNotFoundByUUID(node_uuid=bm_node_uuid)
+
+ return result
+
+
+@sqlalchemy_api.require_admin_context
def bm_node_create(context, values):
+ if not values.get('uuid'):
+ values['uuid'] = str(uuid.uuid4())
bm_node_ref = models.BareMetalNode()
bm_node_ref.update(values)
_save(bm_node_ref)
@@ -144,13 +179,16 @@ def bm_node_create(context, values):
@sqlalchemy_api.require_admin_context
def bm_node_update(context, bm_node_id, values):
- model_query(context, models.BareMetalNode, read_deleted="no").\
+ rows = model_query(context, models.BareMetalNode, read_deleted="no").\
filter_by(id=bm_node_id).\
update(values)
+ if not rows:
+ raise exception.NodeNotFound(node_id=bm_node_id)
+
@sqlalchemy_api.require_admin_context
-def bm_node_set_uuid_safe(context, bm_node_id, values):
+def bm_node_associate_and_update(context, node_uuid, values):
"""Associate an instance to a node safely
Associate an instance to a node only if that node is not yet assocated.
@@ -161,27 +199,32 @@ def bm_node_set_uuid_safe(context, bm_node_id, values):
"""
if 'instance_uuid' not in values:
raise exception.NovaException(_(
- "instance_uuid must be supplied to bm_node_set_uuid_safe"))
+ "instance_uuid must be supplied to bm_node_associate_and_update"))
session = db_session.get_session()
with session.begin():
query = model_query(context, models.BareMetalNode,
session=session, read_deleted="no").\
- filter_by(id=bm_node_id)
+ filter_by(uuid=node_uuid)
count = query.filter_by(instance_uuid=None).\
update(values, synchronize_session=False)
if count != 1:
raise exception.NovaException(_(
- "Failed to associate instance %(uuid)s to baremetal node "
- "%(id)s.") % {'id': bm_node_id,
- 'uuid': values['instance_uuid']})
+ "Failed to associate instance %(i_uuid)s to baremetal node "
+ "%(n_uuid)s.") % {'i_uuid': values['instance_uuid'],
+ 'n_uuid': node_uuid})
ref = query.first()
return ref
@sqlalchemy_api.require_admin_context
def bm_node_destroy(context, bm_node_id):
+ # First, delete all interfaces belonging to the node.
+ # Delete physically since these have unique columns.
+ model_query(context, models.BareMetalInterface, read_deleted="no").\
+ filter_by(bm_node_id=bm_node_id).\
+ delete()
model_query(context, models.BareMetalNode).\
filter_by(id=bm_node_id).\
update({'deleted': True,
@@ -244,7 +287,7 @@ def bm_pxe_ip_get_by_bm_node_id(context, bm_node_id):
first()
if not result:
- raise exception.InstanceNotFound(instance_id=bm_node_id)
+ raise exception.NodeNotFound(node_id=bm_node_id)
return result
@@ -259,7 +302,7 @@ def bm_pxe_ip_associate(context, bm_node_id):
filter_by(id=bm_node_id).\
first()
if not node_ref:
- raise exception.InstanceNotFound(instance_id=bm_node_id)
+ raise exception.NodeNotFound(node_id=bm_node_id)
# Check if the node already has a pxe_ip
ip_ref = model_query(context, models.BareMetalPxeIp,
@@ -382,37 +425,6 @@ def bm_interface_get_all_by_bm_node_id(context, bm_node_id):
all()
if not result:
- raise exception.InstanceNotFound(instance_id=bm_node_id)
-
- return result
-
-
-@sqlalchemy_api.require_admin_context
-def bm_deployment_create(context, key, image_path, pxe_config_path, root_mb,
- swap_mb):
- ref = models.BareMetalDeployment()
- ref.key = key
- ref.image_path = image_path
- ref.pxe_config_path = pxe_config_path
- ref.root_mb = root_mb
- ref.swap_mb = swap_mb
- _save(ref)
- return ref.id
+ raise exception.NodeNotFound(node_id=bm_node_id)
-
-@sqlalchemy_api.require_admin_context
-def bm_deployment_get(context, dep_id):
- result = model_query(context, models.BareMetalDeployment,
- read_deleted="no").\
- filter_by(id=dep_id).\
- first()
return result
-
-
-@sqlalchemy_api.require_admin_context
-def bm_deployment_destroy(context, dep_id):
- model_query(context, models.BareMetalDeployment).\
- filter_by(id=dep_id).\
- update({'deleted': True,
- 'deleted_at': timeutils.utcnow(),
- 'updated_at': literal_column('updated_at')})
diff --git a/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/002_drop_bm_deployments.py b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/002_drop_bm_deployments.py
new file mode 100644
index 000000000..2a560e24e
--- /dev/null
+++ b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/002_drop_bm_deployments.py
@@ -0,0 +1,73 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Column, Index, MetaData, Table
+from sqlalchemy import Integer, String, DateTime, Boolean
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ bm_nodes = Table('bm_nodes', meta, autoload=True)
+
+ image_path = Column('image_path', String(length=255))
+ pxe_config_path = Column('pxe_config_path', String(length=255))
+ deploy_key = Column('deploy_key', String(length=255))
+ root_mb = Column('root_mb', Integer())
+ swap_mb = Column('swap_mb', Integer())
+
+ for c in [image_path, pxe_config_path, deploy_key, root_mb, swap_mb]:
+ bm_nodes.create_column(c)
+
+ deploy_key_idx = Index('deploy_key_idx', bm_nodes.c.deploy_key)
+ deploy_key_idx.create(migrate_engine)
+
+ bm_deployments = Table('bm_deployments', meta, autoload=True)
+ bm_deployments.drop()
+
+
+def downgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ bm_nodes = Table('bm_nodes', meta, autoload=True)
+
+ image_path = Column('image_path', String(length=255))
+ pxe_config_path = Column('pxe_config_path', String(length=255))
+ deploy_key = Column('deploy_key', String(length=255))
+ root_mb = Column('root_mb', Integer())
+ swap_mb = Column('swap_mb', Integer())
+
+ for c in [image_path, pxe_config_path, deploy_key, root_mb, swap_mb]:
+ bm_nodes.drop_column(c)
+
+ bm_deployments = Table('bm_deployments', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('bm_node_id', Integer),
+ Column('key', String(length=255)),
+ Column('image_path', String(length=255)),
+ Column('pxe_config_path', String(length=255)),
+ Column('root_mb', Integer),
+ Column('swap_mb', Integer),
+ mysql_engine='InnoDB',
+ )
+ bm_deployments.create()
diff --git a/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/003_add_uuid_to_bm_nodes.py b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/003_add_uuid_to_bm_nodes.py
new file mode 100644
index 000000000..cc9a9316d
--- /dev/null
+++ b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/003_add_uuid_to_bm_nodes.py
@@ -0,0 +1,40 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Column, MetaData, String, Table, Index
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ t = Table('bm_nodes', meta, autoload=True)
+ uuid_col = Column('uuid', String(36))
+ t.create_column(uuid_col)
+
+ uuid_ux = Index('uuid_ux', t.c.uuid, unique=True)
+ uuid_ux.create(migrate_engine)
+
+
+def downgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ t = Table('bm_nodes', meta, autoload=True)
+ uuid_col = Column('uuid', String(length=36))
+
+ t.drop_column(uuid_col)
diff --git a/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/004_add_instance_name_to_bm_nodes.py b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/004_add_instance_name_to_bm_nodes.py
new file mode 100644
index 000000000..68fbe0960
--- /dev/null
+++ b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/004_add_instance_name_to_bm_nodes.py
@@ -0,0 +1,37 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Column, MetaData, String, Table
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ t = Table('bm_nodes', meta, autoload=True)
+ name_col = Column('instance_name', String(255))
+ t.create_column(name_col)
+
+
+def downgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ t = Table('bm_nodes', meta, autoload=True)
+ name_col = Column('instance_name', String(length=255))
+
+ t.drop_column(name_col)
diff --git a/nova/virt/baremetal/db/sqlalchemy/models.py b/nova/virt/baremetal/db/sqlalchemy/models.py
index 7f9ffb901..756376cb7 100644
--- a/nova/virt/baremetal/db/sqlalchemy/models.py
+++ b/nova/virt/baremetal/db/sqlalchemy/models.py
@@ -34,8 +34,10 @@ class BareMetalNode(BASE, models.NovaBase):
__tablename__ = 'bm_nodes'
id = Column(Integer, primary_key=True)
+ uuid = Column(String(36))
service_host = Column(String(255))
instance_uuid = Column(String(36), nullable=True)
+ instance_name = Column(String(255), nullable=True)
cpus = Column(Integer)
memory_mb = Column(Integer)
local_gb = Column(Integer)
@@ -47,6 +49,11 @@ class BareMetalNode(BASE, models.NovaBase):
task_state = Column(String(255))
prov_vlan_id = Column(Integer)
terminal_port = Column(Integer)
+ image_path = Column(String(255), nullable=True)
+ pxe_config_path = Column(String(255), nullable=True)
+ deploy_key = Column(String(255), nullable=True)
+ root_mb = Column(Integer)
+ swap_mb = Column(Integer)
class BareMetalPxeIp(BASE, models.NovaBase):
@@ -65,13 +72,3 @@ class BareMetalInterface(BASE, models.NovaBase):
datapath_id = Column(String(255))
port_no = Column(Integer)
vif_uuid = Column(String(36), unique=True)
-
-
-class BareMetalDeployment(BASE, models.NovaBase):
- __tablename__ = 'bm_deployments'
- id = Column(Integer, primary_key=True)
- key = Column(String(255))
- image_path = Column(String(255))
- pxe_config_path = Column(String(255))
- root_mb = Column(Integer)
- swap_mb = Column(Integer)
diff --git a/nova/virt/baremetal/db/sqlalchemy/session.py b/nova/virt/baremetal/db/sqlalchemy/session.py
index 06d777354..585096c94 100644
--- a/nova/virt/baremetal/db/sqlalchemy/session.py
+++ b/nova/virt/baremetal/db/sqlalchemy/session.py
@@ -19,7 +19,8 @@
"""Session Handling for SQLAlchemy backend."""
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common.db.sqlalchemy import session as nova_session
from nova import paths
diff --git a/nova/virt/baremetal/driver.py b/nova/virt/baremetal/driver.py
index 9160485a6..8dff0a785 100755
--- a/nova/virt/baremetal/driver.py
+++ b/nova/virt/baremetal/driver.py
@@ -21,11 +21,12 @@
A driver for Bare-metal platform.
"""
+from oslo.config import cfg
+
from nova.compute import power_state
from nova import context as nova_context
from nova import exception
-from nova.openstack.common import cfg
-from nova.openstack.common.db.sqlalchemy import session as db_session
+from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import paths
@@ -82,11 +83,6 @@ DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
firewall.NoopFirewallDriver.__name__)
-def _get_baremetal_nodes(context):
- nodes = db.bm_node_get_all(context, service_host=CONF.host)
- return nodes
-
-
def _get_baremetal_node_by_instance_uuid(instance_uuid):
ctx = nova_context.get_admin_context()
node = db.bm_node_get_by_instance_uuid(ctx, instance_uuid)
@@ -106,6 +102,7 @@ def _update_state(context, node, instance, state):
values = {'task_state': state}
if not instance:
values['instance_uuid'] = None
+ values['instance_name'] = None
db.bm_node_update(context, node['id'], values)
@@ -172,104 +169,110 @@ class BareMetalDriver(driver.ComputeDriver):
def list_instances(self):
l = []
- ctx = nova_context.get_admin_context()
- for node in _get_baremetal_nodes(ctx):
- if not node['instance_uuid']:
- # Not currently assigned to an instance.
- continue
- try:
- inst = self.virtapi.instance_get_by_uuid(
- ctx, node['instance_uuid'])
- except exception.InstanceNotFound:
- # Assigned to an instance that no longer exists.
- LOG.warning(_("Node %(id)r assigned to instance %(uuid)r "
- "which cannot be found."),
- dict(id=node['id'], uuid=node['instance_uuid']))
- continue
- l.append(inst['name'])
+ context = nova_context.get_admin_context()
+ for node in db.bm_node_get_associated(context, service_host=CONF.host):
+ l.append(node['instance_name'])
return l
def _require_node(self, instance):
- """Get a node_id out of a manager instance dict.
+ """Get a node's uuid out of a manager instance dict.
- The compute manager is meant to know the node id, so a missing node is
+ The compute manager is meant to know the node uuid, so missing uuid
a significant issue - it may mean we've been passed someone elses data.
"""
- node_id = instance.get('node')
- if not node_id:
+ node_uuid = instance.get('node')
+ if not node_uuid:
raise exception.NovaException(_(
"Baremetal node id not supplied to driver for %r")
% instance['uuid'])
- return node_id
+ return node_uuid
+
+ def _attach_block_devices(self, instance, block_device_info):
+ block_device_mapping = driver.\
+ block_device_info_get_mapping(block_device_info)
+ for vol in block_device_mapping:
+ connection_info = vol['connection_info']
+ mountpoint = vol['mount_device']
+ self.attach_volume(
+ connection_info, instance['name'], mountpoint)
+
+ def _detach_block_devices(self, instance, block_device_info):
+ block_device_mapping = driver.\
+ block_device_info_get_mapping(block_device_info)
+ for vol in block_device_mapping:
+ connection_info = vol['connection_info']
+ mountpoint = vol['mount_device']
+ self.detach_volume(
+ connection_info, instance['name'], mountpoint)
+
+ def _start_firewall(self, instance, network_info):
+ self.firewall_driver.setup_basic_filtering(
+ instance, network_info)
+ self.firewall_driver.prepare_instance_filter(
+ instance, network_info)
+ self.firewall_driver.apply_instance_filter(
+ instance, network_info)
+
+ def _stop_firewall(self, instance, network_info):
+ self.firewall_driver.unfilter_instance(
+ instance, network_info)
def macs_for_instance(self, instance):
context = nova_context.get_admin_context()
- node_id = self._require_node(instance)
- return set(iface['address'] for iface in
- db.bm_interface_get_all_by_bm_node_id(context, node_id))
+ node_uuid = self._require_node(instance)
+ node = db.bm_node_get_by_node_uuid(context, node_uuid)
+ ifaces = db.bm_interface_get_all_by_bm_node_id(context, node['id'])
+ return set(iface['address'] for iface in ifaces)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
- node_id = self._require_node(instance)
+ node_uuid = self._require_node(instance)
# NOTE(deva): this db method will raise an exception if the node is
# already in use. We call it here to ensure no one else
# allocates this node before we begin provisioning it.
- node = db.bm_node_set_uuid_safe(context, node_id,
+ node = db.bm_node_associate_and_update(context, node_uuid,
{'instance_uuid': instance['uuid'],
+ 'instance_name': instance['hostname'],
'task_state': baremetal_states.BUILDING})
- pm = get_power_manager(node=node, instance=instance)
try:
self._plug_vifs(instance, network_info, context=context)
-
- self.firewall_driver.setup_basic_filtering(
- instance, network_info)
- self.firewall_driver.prepare_instance_filter(
- instance, network_info)
- self.firewall_driver.apply_instance_filter(
- instance, network_info)
-
- block_device_mapping = driver.\
- block_device_info_get_mapping(block_device_info)
- for vol in block_device_mapping:
- connection_info = vol['connection_info']
- mountpoint = vol['mount_device']
- self.attach_volume(
- connection_info, instance['name'], mountpoint)
-
- try:
- image_info = self.driver.cache_images(
- context, node, instance,
- admin_password=admin_password,
- image_meta=image_meta,
- injected_files=injected_files,
- network_info=network_info,
- )
- try:
- self.driver.activate_bootloader(context, node, instance)
- except Exception, e:
- self.driver.deactivate_bootloader(context, node, instance)
- raise e
- except Exception, e:
+ self._attach_block_devices(instance, block_device_info)
+ self._start_firewall(instance, network_info)
+
+ self.driver.cache_images(
+ context, node, instance,
+ admin_password=admin_password,
+ image_meta=image_meta,
+ injected_files=injected_files,
+ network_info=network_info,
+ )
+ self.driver.activate_bootloader(context, node, instance)
+ self.power_on(instance, node)
+ self.driver.activate_node(context, node, instance)
+ _update_state(context, node, instance, baremetal_states.ACTIVE)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.error(_("Error deploying instance %(instance)s "
+ "on baremetal node %(node)s.") %
+ {'instance': instance['uuid'],
+ 'node': node['uuid']})
+
+ # Do not set instance=None yet. This prevents another
+ # spawn() while we are cleaning up.
+ _update_state(context, node, instance, baremetal_states.ERROR)
+
+ self.driver.deactivate_node(context, node, instance)
+ self.power_off(instance, node)
+ self.driver.deactivate_bootloader(context, node, instance)
self.driver.destroy_images(context, node, instance)
- raise e
- except Exception, e:
- # TODO(deva): do network and volume cleanup here
- raise e
- else:
- # NOTE(deva): pm.activate_node should not raise exceptions.
- # We check its success in "finally" block
- pm.activate_node()
- pm.start_console()
- finally:
- if pm.state != baremetal_states.ACTIVE:
- pm.state = baremetal_states.ERROR
- try:
- _update_state(context, node, instance, pm.state)
- except db_session.DBError, e:
- LOG.warning(_("Failed to update state record for "
- "baremetal node %s") % instance['uuid'])
+
+ self._detach_block_devices(instance, block_device_info)
+ self._stop_firewall(instance, network_info)
+ self._unplug_vifs(instance, network_info)
+
+ _update_state(context, node, None, baremetal_states.DELETED)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None):
@@ -277,61 +280,67 @@ class BareMetalDriver(driver.ComputeDriver):
ctx = nova_context.get_admin_context()
pm = get_power_manager(node=node, instance=instance)
state = pm.reboot_node()
+ if pm.state != baremetal_states.ACTIVE:
+ raise exception.InstanceRebootFailure(_(
+ "Baremetal power manager failed to restart node "
+ "for instance %r") % instance['uuid'])
_update_state(ctx, node, instance, state)
def destroy(self, instance, network_info, block_device_info=None):
- ctx = nova_context.get_admin_context()
+ context = nova_context.get_admin_context()
try:
node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
except exception.InstanceNotFound:
- # TODO(deva): refactor so that dangling files can be cleaned
- # up even after a failed boot or delete
- LOG.warning(_("Delete called on non-existing instance %s")
+ LOG.warning(_("Destroy called on non-existing instance %s")
% instance['uuid'])
return
- self.driver.deactivate_node(ctx, node, instance)
-
- pm = get_power_manager(node=node, instance=instance)
-
- pm.stop_console()
-
- ## power off the node
- state = pm.deactivate_node()
-
- ## cleanup volumes
- # NOTE(vish): we disconnect from volumes regardless
- block_device_mapping = driver.block_device_info_get_mapping(
- block_device_info)
- for vol in block_device_mapping:
- connection_info = vol['connection_info']
- mountpoint = vol['mount_device']
- self.detach_volume(connection_info, instance['name'], mountpoint)
-
- self.driver.deactivate_bootloader(ctx, node, instance)
-
- self.driver.destroy_images(ctx, node, instance)
-
- # stop firewall
- self.firewall_driver.unfilter_instance(instance,
- network_info=network_info)
-
- self._unplug_vifs(instance, network_info)
+ try:
+ self.driver.deactivate_node(context, node, instance)
+ self.power_off(instance, node)
+ self.driver.deactivate_bootloader(context, node, instance)
+ self.driver.destroy_images(context, node, instance)
- _update_state(ctx, node, None, state)
+ self._detach_block_devices(instance, block_device_info)
+ self._stop_firewall(instance, network_info)
+ self._unplug_vifs(instance, network_info)
- def power_off(self, instance):
+ _update_state(context, node, None, baremetal_states.DELETED)
+ except Exception, e:
+ with excutils.save_and_reraise_exception():
+ try:
+ LOG.error(_("Error from baremetal driver "
+ "during destroy: %s") % e)
+ _update_state(context, node, instance,
+ baremetal_states.ERROR)
+ except Exception:
+ LOG.error(_("Error while recording destroy failure in "
+ "baremetal database: %s") % e)
+
+ def power_off(self, instance, node=None):
"""Power off the specified instance."""
- node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
+ if not node:
+ node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
pm = get_power_manager(node=node, instance=instance)
pm.deactivate_node()
+ if pm.state != baremetal_states.DELETED:
+ raise exception.InstancePowerOffFailure(_(
+ "Baremetal power manager failed to stop node "
+ "for instance %r") % instance['uuid'])
+ pm.stop_console()
- def power_on(self, instance):
+ def power_on(self, instance, node=None):
"""Power on the specified instance."""
- node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
+ if not node:
+ node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
pm = get_power_manager(node=node, instance=instance)
pm.activate_node()
+ if pm.state != baremetal_states.ACTIVE:
+ raise exception.InstancePowerOnFailure(_(
+ "Baremetal power manager failed to start node "
+ "for instance %r") % instance['uuid'])
+ pm.start_console()
def get_volume_connector(self, instance):
return self.volume_driver.get_volume_connector(instance)
@@ -391,7 +400,7 @@ class BareMetalDriver(driver.ComputeDriver):
'local_gb_used': local_gb_used,
'hypervisor_type': self.get_hypervisor_type(),
'hypervisor_version': self.get_hypervisor_version(),
- 'hypervisor_hostname': str(node['id']),
+ 'hypervisor_hostname': str(node['uuid']),
'cpu_info': 'baremetal cpu',
}
return dic
@@ -401,7 +410,7 @@ class BareMetalDriver(driver.ComputeDriver):
def get_available_resource(self, nodename):
context = nova_context.get_admin_context()
- node = db.bm_node_get(context, nodename)
+ node = db.bm_node_get_by_node_uuid(context, nodename)
dic = self._node_resource(node)
return dic
@@ -421,7 +430,7 @@ class BareMetalDriver(driver.ComputeDriver):
service_host=CONF.host)
for node in nodes:
res = self._node_resource(node)
- nodename = str(node['id'])
+ nodename = str(node['uuid'])
data = {}
data['vcpus'] = res['vcpus']
data['vcpus_used'] = res['vcpus_used']
@@ -472,4 +481,5 @@ class BareMetalDriver(driver.ComputeDriver):
def get_available_nodes(self):
context = nova_context.get_admin_context()
- return [str(n['id']) for n in _get_baremetal_nodes(context)]
+ return [str(n['uuid']) for n in
+ db.bm_node_get_unassociated(context, service_host=CONF.host)]
diff --git a/nova/virt/baremetal/ipmi.py b/nova/virt/baremetal/ipmi.py
index 5d4a87625..7cc272c32 100644
--- a/nova/virt/baremetal/ipmi.py
+++ b/nova/virt/baremetal/ipmi.py
@@ -25,8 +25,9 @@ import os
import stat
import tempfile
+from oslo.config import cfg
+
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import paths
from nova import utils
diff --git a/nova/virt/baremetal/pxe.py b/nova/virt/baremetal/pxe.py
index 14bdab2b0..813f95c05 100644
--- a/nova/virt/baremetal/pxe.py
+++ b/nova/virt/baremetal/pxe.py
@@ -20,19 +20,23 @@
Class for PXE bare-metal nodes.
"""
+import datetime
import os
+from oslo.config import cfg
+
from nova.compute import instance_types
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
+from nova.openstack.common import timeutils
+from nova import utils
+from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import base
from nova.virt.baremetal import db
from nova.virt.baremetal import utils as bm_utils
-
pxe_opts = [
cfg.StrOpt('deploy_kernel',
help='Default kernel image ID used in deployment phase'),
@@ -47,6 +51,9 @@ pxe_opts = [
cfg.StrOpt('pxe_config_template',
default='$pybasedir/nova/virt/baremetal/pxe_config.template',
help='Template file for PXE configuration'),
+ cfg.IntOpt('pxe_deploy_timeout',
+ help='Timeout for PXE deployments. Default: 0 (unlimited)',
+ default=0),
]
LOG = logging.getLogger(__name__)
@@ -374,16 +381,14 @@ class PXE(base.NodeDriver):
deployment_key = bm_utils.random_alnum(32)
deployment_iscsi_iqn = "iqn-%s" % instance['uuid']
- deployment_id = db.bm_deployment_create(
- context,
- deployment_key,
- image_file_path,
- pxe_config_file_path,
- root_mb,
- swap_mb
- )
+ db.bm_node_update(context, node['id'],
+ {'deploy_key': deployment_key,
+ 'image_path': image_file_path,
+ 'pxe_config_path': pxe_config_file_path,
+ 'root_mb': root_mb,
+ 'swap_mb': swap_mb})
pxe_config = build_pxe_config(
- deployment_id,
+ node['id'],
deployment_key,
deployment_iscsi_iqn,
image_info['deploy_kernel'][1],
@@ -402,6 +407,16 @@ class PXE(base.NodeDriver):
def deactivate_bootloader(self, context, node, instance):
"""Delete PXE bootloader images and config."""
try:
+ db.bm_node_update(context, node['id'],
+ {'deploy_key': None,
+ 'image_path': None,
+ 'pxe_config_path': None,
+ 'root_mb': 0,
+ 'swap_mb': 0})
+ except exception.NodeNotFound:
+ pass
+
+ try:
image_info = get_tftp_image_info(instance)
except exception.NovaException:
pass
@@ -423,7 +438,51 @@ class PXE(base.NodeDriver):
os.path.join(CONF.baremetal.tftp_root, instance['uuid']))
def activate_node(self, context, node, instance):
- pass
+ """Wait for PXE deployment to complete."""
+
+ locals = {'error': '', 'started': False}
+
+ def _wait_for_deploy():
+ """Called at an interval until the deployment completes."""
+ try:
+ row = db.bm_node_get(context, node['id'])
+ if instance['uuid'] != row.get('instance_uuid'):
+ locals['error'] = _("Node associated with another instance"
+ " while waiting for deploy of %s")
+ raise utils.LoopingCallDone()
+
+ status = row.get('task_state')
+ if (status == baremetal_states.DEPLOYING
+ and locals['started'] == False):
+ LOG.info(_("PXE deploy started for instance %s")
+ % instance['uuid'])
+ locals['started'] = True
+ elif status in (baremetal_states.DEPLOYDONE,
+ baremetal_states.ACTIVE):
+ LOG.info(_("PXE deploy completed for instance %s")
+ % instance['uuid'])
+ raise utils.LoopingCallDone()
+ elif status == baremetal_states.DEPLOYFAIL:
+ locals['error'] = _("PXE deploy failed for instance %s")
+ except exception.NodeNotFound:
+ locals['error'] = _("Baremetal node deleted while waiting "
+ "for deployment of instance %s")
+
+ if (CONF.baremetal.pxe_deploy_timeout and
+ timeutils.utcnow() > expiration):
+ locals['error'] = _("Timeout reached while waiting for "
+ "PXE deploy of instance %s")
+ if locals['error']:
+ raise utils.LoopingCallDone()
+
+ expiration = timeutils.utcnow() + datetime.timedelta(
+ seconds=CONF.baremetal.pxe_deploy_timeout)
+ timer = utils.FixedIntervalLoopingCall(_wait_for_deploy)
+ timer.start(interval=1).wait()
+
+ if locals['error']:
+ raise exception.InstanceDeployFailure(
+ locals['error'] % instance['uuid'])
def deactivate_node(self, context, node, instance):
pass
diff --git a/nova/virt/baremetal/vif_driver.py b/nova/virt/baremetal/vif_driver.py
index 08e68c250..627155f9c 100644
--- a/nova/virt/baremetal/vif_driver.py
+++ b/nova/virt/baremetal/vif_driver.py
@@ -13,9 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo.config import cfg
+
from nova import context
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.virt.baremetal import db as bmdb
diff --git a/nova/virt/baremetal/virtual_power_driver.py b/nova/virt/baremetal/virtual_power_driver.py
index f77579dba..a18a2f9d3 100644
--- a/nova/virt/baremetal/virtual_power_driver.py
+++ b/nova/virt/baremetal/virtual_power_driver.py
@@ -17,9 +17,9 @@
#
# Virtual power driver
+from oslo.config import cfg
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import utils
diff --git a/nova/virt/baremetal/volume_driver.py b/nova/virt/baremetal/volume_driver.py
index e92325b97..cc23f9064 100644
--- a/nova/virt/baremetal/volume_driver.py
+++ b/nova/virt/baremetal/volume_driver.py
@@ -18,9 +18,10 @@
import re
+from oslo.config import cfg
+
from nova import context as nova_context
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import utils
diff --git a/nova/virt/configdrive.py b/nova/virt/configdrive.py
index 886136460..658710e26 100644
--- a/nova/virt/configdrive.py
+++ b/nova/virt/configdrive.py
@@ -21,8 +21,9 @@ import os
import shutil
import tempfile
+from oslo.config import cfg
+
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
from nova import utils
diff --git a/nova/virt/disk/api.py b/nova/virt/disk/api.py
index d4f8b6cc0..0880af1cb 100644
--- a/nova/virt/disk/api.py
+++ b/nova/virt/disk/api.py
@@ -32,8 +32,9 @@ import tempfile
if os.name != 'nt':
import crypt
+from oslo.config import cfg
+
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import paths
diff --git a/nova/virt/disk/mount/api.py b/nova/virt/disk/mount/api.py
index 1d9d1fc20..3690f6ddf 100644
--- a/nova/virt/disk/mount/api.py
+++ b/nova/virt/disk/mount/api.py
@@ -211,11 +211,11 @@ class Mount(object):
finally:
if not status:
LOG.debug(_("Fail to mount, tearing back down"))
- self.do_umount()
+ self.do_teardown()
return status
def do_umount(self):
- """Call the unmnt, unmap and unget operations."""
+ """Call the unmnt operation."""
if self.mounted:
self.unmnt_dev()
diff --git a/nova/virt/disk/mount/nbd.py b/nova/virt/disk/mount/nbd.py
index 72302fb91..d6414e96c 100644
--- a/nova/virt/disk/mount/nbd.py
+++ b/nova/virt/disk/mount/nbd.py
@@ -20,7 +20,8 @@ import random
import re
import time
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.disk.mount import api
diff --git a/nova/virt/disk/vfs/localfs.py b/nova/virt/disk/vfs/localfs.py
index 9efa6798b..34c52dedd 100644
--- a/nova/virt/disk/vfs/localfs.py
+++ b/nova/virt/disk/vfs/localfs.py
@@ -85,7 +85,7 @@ class VFSLocalFS(vfs.VFS):
def teardown(self):
try:
if self.mount:
- self.mount.do_umount()
+ self.mount.do_teardown()
except Exception, e:
LOG.debug(_("Failed to unmount %(imgdir)s: %(ex)s") %
{'imgdir': self.imgdir, 'ex': str(e)})
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index ba0dfbafe..bc53aa88a 100755
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -24,10 +24,12 @@ Driver base-classes:
import sys
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import utils
+from nova.virt import event as virtevent
driver_opts = [
cfg.StrOpt('compute_driver',
@@ -39,6 +41,11 @@ driver_opts = [
default=None,
help='The default format an ephemeral_volume will be '
'formatted with on creation.'),
+ cfg.StrOpt('preallocate_images',
+ default='none',
+ help='VM image preallocation mode: '
+ '"none" => no storage provisioning is done up front, '
+ '"space" => storage is fully allocated at instance start'),
cfg.BoolOpt('use_cow_images',
default=True,
help='Whether to use cow images'),
@@ -125,6 +132,7 @@ class ComputeDriver(object):
def __init__(self, virtapi):
self.virtapi = virtapi
+ self._compute_event_callback = None
def init_host(self, host):
"""Initialize anything that is necessary for the driver to function,
@@ -302,6 +310,14 @@ class ComputeDriver(object):
"""Detach the disk attached to the instance."""
raise NotImplementedError()
+ def attach_interface(self, instance, image_meta, network_info):
+ """Attach an interface to the instance."""
+ raise NotImplementedError()
+
+ def detach_interface(self, instance, network_info):
+ """Detach an interface from the instance."""
+ raise NotImplementedError()
+
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, network_info,
block_device_info=None):
@@ -850,6 +866,38 @@ class ComputeDriver(object):
"""
return False
+ def register_event_listener(self, callback):
+ """Register a callback to receive events.
+
+ Register a callback to receive asynchronous event
+ notifications from hypervisors. The callback will
+ be invoked with a single parameter, which will be
+ an instance of the nova.virt.event.Event class."""
+
+ self._compute_event_callback = callback
+
+ def emit_event(self, event):
+ """Dispatches an event to the compute manager.
+
+ Invokes the event callback registered by the
+ compute manager to dispatch the event. This
+ must only be invoked from a green thread."""
+
+ if not self._compute_event_callback:
+ LOG.debug("Discarding event %s" % str(event))
+ return
+
+ if not isinstance(event, virtevent.Event):
+ raise ValueError(
+ _("Event must be an instance of nova.virt.event.Event"))
+
+ try:
+ LOG.debug("Emitting event %s" % str(event))
+ self._compute_event_callback(event)
+ except Exception, ex:
+ LOG.error(_("Exception dispatching event %(event)s: %(ex)s")
+ % locals())
+
def load_compute_driver(virtapi, compute_driver=None):
"""Load a compute driver module.
diff --git a/nova/virt/event.py b/nova/virt/event.py
new file mode 100644
index 000000000..684986f8a
--- /dev/null
+++ b/nova/virt/event.py
@@ -0,0 +1,85 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Asynchronous event notifications from virtualization drivers.
+
+This module defines a set of classes representing data for
+various asynchronous events that can occurr in a virtualization
+driver.
+"""
+
+import time
+
+EVENT_LIFECYCLE_STARTED = 0
+EVENT_LIFECYCLE_STOPPED = 1
+EVENT_LIFECYCLE_PAUSED = 2
+EVENT_LIFECYCLE_RESUMED = 3
+
+
+class Event(object):
+ """Base class for all events emitted by a hypervisor.
+
+ All events emitted by a virtualization driver are
+ subclasses of this base object. The only generic
+ information recorded in the base class is a timestamp
+ indicating when the event first occurred. The timestamp
+ is recorded as fractional seconds since the UNIX epoch.
+ """
+
+ def __init__(self, timestamp=None):
+ if timestamp is None:
+ self.timestamp = time.time()
+ else:
+ self.timestamp = timestamp
+
+ def get_timestamp(self):
+ return self.timestamp
+
+
+class InstanceEvent(Event):
+ """Base class for all instance events.
+
+ All events emitted by a virtualization driver which
+ are associated with a virtual domain instance are
+ subclasses of this base object. This object records
+ the UUID associated with the instance."""
+
+ def __init__(self, uuid, timestamp=None):
+ super(InstanceEvent, self).__init__(timestamp)
+
+ self.uuid = uuid
+
+ def get_instance_uuid(self):
+ return self.uuid
+
+
+class LifecycleEvent(InstanceEvent):
+ """Class for instance lifecycle state change events.
+
+ When a virtual domain instance lifecycle state changes,
+ events of this class are emitted. The EVENT_LIFECYCLE_XX
+ constants defined why lifecycle change occurred. This
+ event allows detection of an instance starting/stopping
+ without need for polling"""
+
+ def __init__(self, uuid, transition, timestamp=None):
+ super(LifecycleEvent, self).__init__(uuid, timestamp)
+
+ self.transition = transition
+
+ def get_transition(self):
+ return self.transition
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 30a5fc758..5545dcf96 100755
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -102,6 +102,7 @@ class FakeDriver(driver.ComputeDriver):
'hypervisor_hostname': 'fake-mini',
}
self._mounts = {}
+ self._interfaces = {}
def init_host(self, host):
return
@@ -222,6 +223,19 @@ class FakeDriver(driver.ComputeDriver):
pass
return True
+ def attach_interface(self, instance, image_meta, network_info):
+ for (network, mapping) in network_info:
+ if mapping['vif_uuid'] in self._interfaces:
+ raise exception.InterfaceAttachFailed('duplicate')
+ self._interfaces[mapping['vif_uuid']] = mapping
+
+ def detach_interface(self, instance, network_info):
+ for (network, mapping) in network_info:
+ try:
+ del self._interfaces[mapping['vif_uuid']]
+ except KeyError:
+ raise exception.InterfaceDetachFailed('not attached')
+
def get_info(self, instance):
if instance['name'] not in self.instances:
raise exception.InstanceNotFound(instance_id=instance['name'])
diff --git a/nova/virt/firewall.py b/nova/virt/firewall.py
index ad38cd9a4..a36beb7f0 100644
--- a/nova/virt/firewall.py
+++ b/nova/virt/firewall.py
@@ -17,16 +17,17 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo.config import cfg
+
+from nova import conductor
from nova import context
from nova import network
from nova.network import linux_net
-from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova.virt import netutils
-
LOG = logging.getLogger(__name__)
firewall_opts = [
@@ -398,9 +399,11 @@ class IptablesFirewallDriver(FirewallDriver):
# and should be the only one making
# making rpc calls.
nw_api = network.API()
+ capi = conductor.API()
for instance in rule['grantee_group']['instances']:
nw_info = nw_api.get_instance_nw_info(ctxt,
- instance)
+ instance,
+ capi)
ips = [ip['address']
for ip in nw_info.fixed_ips()
diff --git a/nova/virt/hyperv/basevolumeutils.py b/nova/virt/hyperv/basevolumeutils.py
index 5b515a0f1..32221843f 100644
--- a/nova/virt/hyperv/basevolumeutils.py
+++ b/nova/virt/hyperv/basevolumeutils.py
@@ -28,8 +28,9 @@ if sys.platform == 'win32':
import _winreg
import wmi
+from oslo.config import cfg
+
from nova import block_device
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.virt import driver
diff --git a/nova/virt/hyperv/hostops.py b/nova/virt/hyperv/hostops.py
index b3b38aab9..4e5cfc6a4 100644
--- a/nova/virt/hyperv/hostops.py
+++ b/nova/virt/hyperv/hostops.py
@@ -21,7 +21,8 @@ Management class for host operations.
import os
import platform
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.virt.hyperv import constants
diff --git a/nova/virt/hyperv/livemigrationops.py b/nova/virt/hyperv/livemigrationops.py
index 108413b0b..dca5de0cc 100644
--- a/nova/virt/hyperv/livemigrationops.py
+++ b/nova/virt/hyperv/livemigrationops.py
@@ -18,7 +18,8 @@
"""
Management class for live migration VM operations.
"""
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova.virt.hyperv import imagecache
diff --git a/nova/virt/hyperv/pathutils.py b/nova/virt/hyperv/pathutils.py
index 05cfffaac..1297cd1ed 100644
--- a/nova/virt/hyperv/pathutils.py
+++ b/nova/virt/hyperv/pathutils.py
@@ -18,7 +18,8 @@
import os
import shutil
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/nova/virt/hyperv/snapshotops.py b/nova/virt/hyperv/snapshotops.py
index ab7c96943..5bed46665 100644
--- a/nova/virt/hyperv/snapshotops.py
+++ b/nova/virt/hyperv/snapshotops.py
@@ -20,9 +20,10 @@ Management class for VM snapshot operations.
"""
import os
+from oslo.config import cfg
+
from nova.compute import task_states
from nova.image import glance
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.virt.hyperv import pathutils
from nova.virt.hyperv import vhdutils
diff --git a/nova/virt/hyperv/vif.py b/nova/virt/hyperv/vif.py
index cfe7c6a4c..d49441cee 100644
--- a/nova/virt/hyperv/vif.py
+++ b/nova/virt/hyperv/vif.py
@@ -18,12 +18,12 @@
import abc
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common import log as logging
from nova.virt.hyperv import networkutils
from nova.virt.hyperv import vmutils
-
hyperv_opts = [
cfg.StrOpt('vswitch_name',
default=None,
diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py
index c8acc0fa1..c7a4e5468 100644
--- a/nova/virt/hyperv/vmops.py
+++ b/nova/virt/hyperv/vmops.py
@@ -21,9 +21,10 @@ Management class for basic VM operations.
"""
import os
+from oslo.config import cfg
+
from nova.api.metadata import base as instance_metadata
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
diff --git a/nova/virt/hyperv/vmutils.py b/nova/virt/hyperv/vmutils.py
index d80144b65..6536efe1e 100644
--- a/nova/virt/hyperv/vmutils.py
+++ b/nova/virt/hyperv/vmutils.py
@@ -27,8 +27,9 @@ import uuid
if sys.platform == 'win32':
import wmi
+from oslo.config import cfg
+
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.virt.hyperv import constants
diff --git a/nova/virt/hyperv/volumeops.py b/nova/virt/hyperv/volumeops.py
index c665cbeb7..0d12b93a2 100644
--- a/nova/virt/hyperv/volumeops.py
+++ b/nova/virt/hyperv/volumeops.py
@@ -21,7 +21,8 @@ Management class for Storage-related functions (attach, detach, etc).
"""
import time
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common import log as logging
from nova.virt import driver
from nova.virt.hyperv import hostutils
diff --git a/nova/virt/hyperv/volumeutils.py b/nova/virt/hyperv/volumeutils.py
index 713ace258..6d623fca7 100644
--- a/nova/virt/hyperv/volumeutils.py
+++ b/nova/virt/hyperv/volumeutils.py
@@ -24,8 +24,8 @@ and storage repositories
import time
from eventlet.green import subprocess
+from oslo.config import cfg
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.virt.hyperv import basevolumeutils
from nova.virt.hyperv import vmutils
diff --git a/nova/virt/hyperv/volumeutilsv2.py b/nova/virt/hyperv/volumeutilsv2.py
index 8322d31d3..2dc86c2e0 100644
--- a/nova/virt/hyperv/volumeutilsv2.py
+++ b/nova/virt/hyperv/volumeutilsv2.py
@@ -26,7 +26,8 @@ import time
if sys.platform == 'win32':
import wmi
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common import log as logging
from nova.virt.hyperv import basevolumeutils
diff --git a/nova/virt/images.py b/nova/virt/images.py
index a5c960486..50d89bfb8 100755
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -24,13 +24,13 @@ Handling of VM disk images.
import os
import re
+from oslo.config import cfg
+
from nova import exception
from nova.image import glance
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import utils
-
LOG = logging.getLogger(__name__)
image_opts = [
@@ -225,6 +225,7 @@ def fetch_to_raw(context, image_href, path, user_id, project_id):
LOG.debug("%s was %s, converting to raw" % (image_href, fmt))
with utils.remove_path_on_error(staged):
convert_image(path_tmp, staged, 'raw')
+ os.unlink(path_tmp)
data = qemu_img_info(staged)
if data.file_format != "raw":
@@ -233,6 +234,5 @@ def fetch_to_raw(context, image_href, path, user_id, project_id):
data.file_format)
os.rename(staged, path)
-
else:
os.rename(path_tmp, path)
diff --git a/nova/virt/libvirt/blockinfo.py b/nova/virt/libvirt/blockinfo.py
index 0098410cd..09e3809d9 100644
--- a/nova/virt/libvirt/blockinfo.py
+++ b/nova/virt/libvirt/blockinfo.py
@@ -116,6 +116,8 @@ def get_dev_prefix_for_disk_bus(disk_bus):
return "sd"
elif disk_bus == "uml":
return "ubd"
+ elif disk_bus == "lxc":
+ return None
else:
raise exception.NovaException(
_("Unable to determine disk prefix for %s") %
@@ -150,6 +152,9 @@ def find_disk_dev_for_disk_bus(mapping, bus, last_device=False):
"""
dev_prefix = get_dev_prefix_for_disk_bus(bus)
+ if dev_prefix is None:
+ return None
+
max_dev = get_dev_count_for_disk_bus(bus)
if last_device:
devs = [max_dev - 1]
@@ -172,6 +177,7 @@ def is_disk_bus_valid_for_virt(virt_type, disk_bus):
'kvm': ['virtio', 'scsi', 'ide', 'usb'],
'xen': ['xen', 'ide'],
'uml': ['uml'],
+ 'lxc': ['lxc'],
}
if virt_type not in valid_bus:
@@ -207,6 +213,8 @@ def get_disk_bus_for_device_type(virt_type,
if virt_type == "uml":
if device_type == "disk":
return "uml"
+ elif virt_type == "lxc":
+ return "lxc"
elif virt_type == "xen":
if device_type == "cdrom":
return "ide"
@@ -293,6 +301,19 @@ def get_disk_mapping(virt_type, instance,
mapping = {}
if virt_type == "lxc":
+ # NOTE(zul): This information is not used by the libvirt driver
+ # however we need to populate mapping so the image can be
+ # created when the instance is started. This can
+ # be removed when we convert LXC to use block devices.
+ root_disk_bus = disk_bus
+ root_device_type = 'disk'
+
+ root_info = get_next_disk_info(mapping,
+ root_disk_bus,
+ root_device_type)
+ mapping['root'] = root_info
+ mapping['disk'] = root_info
+
return mapping
if rescue:
diff --git a/nova/virt/libvirt/config.py b/nova/virt/libvirt/config.py
index d6ef3fca9..729d27ea4 100644
--- a/nova/virt/libvirt/config.py
+++ b/nova/virt/libvirt/config.py
@@ -468,6 +468,12 @@ class LibvirtConfigGuestDisk(LibvirtConfigGuestDevice):
self.auth_secret_type = None
self.auth_secret_uuid = None
self.serial = None
+ self.disk_read_bytes_sec = None
+ self.disk_read_iops_sec = None
+ self.disk_write_bytes_sec = None
+ self.disk_write_iops_sec = None
+ self.disk_total_bytes_sec = None
+ self.disk_total_iops_sec = None
def format_dom(self):
dev = super(LibvirtConfigGuestDisk, self).format_dom()
@@ -512,6 +518,34 @@ class LibvirtConfigGuestDisk(LibvirtConfigGuestDevice):
if self.serial is not None:
dev.append(self._text_node("serial", self.serial))
+ iotune = etree.Element("iotune")
+
+ if self.disk_read_bytes_sec is not None:
+ iotune.append(self._text_node("read_bytes_sec",
+ self.disk_read_bytes_sec))
+
+ if self.disk_read_iops_sec is not None:
+ iotune.append(self._text_node("read_iops_sec",
+ self.disk_read_iops_sec))
+
+ if self.disk_write_bytes_sec is not None:
+ iotune.append(self._text_node("write_bytes_sec",
+ self.disk_write_bytes_sec))
+
+ if self.disk_write_iops_sec is not None:
+ iotune.append(self._text_node("write_iops_sec",
+ self.disk_write_iops_sec))
+
+ if self.disk_total_bytes_sec is not None:
+ iotune.append(self._text_node("total_bytes_sec",
+ self.disk_total_bytes_sec))
+
+ if self.disk_total_iops_sec is not None:
+ iotune.append(self._text_node("total_iops_sec",
+ self.disk_total_iops_sec))
+
+ if len(iotune) > 0:
+ dev.append(iotune)
return dev
@@ -555,6 +589,12 @@ class LibvirtConfigGuestInterface(LibvirtConfigGuestDevice):
self.filtername = None
self.filterparams = []
self.driver_name = None
+ self.vif_inbound_peak = None
+ self.vif_inbound_burst = None
+ self.vif_inbound_average = None
+ self.vif_outbound_peak = None
+ self.vif_outbound_burst = None
+ self.vif_outbound_average = None
def format_dom(self):
dev = super(LibvirtConfigGuestInterface, self).format_dom()
@@ -595,6 +635,27 @@ class LibvirtConfigGuestInterface(LibvirtConfigGuestDevice):
value=p['value']))
dev.append(filter)
+ if self.vif_inbound_average or self.vif_outbound_average:
+ bandwidth = etree.Element("bandwidth")
+ if self.vif_inbound_average is not None:
+ vif_inbound = etree.Element("inbound",
+ average=str(self.vif_inbound_average))
+ if self.vif_inbound_peak is not None:
+ vif_inbound.set("peak", str(self.vif_inbound_peak))
+ if self.vif_inbound_burst is not None:
+ vif_inbound.set("burst", str(self.vif_inbound_burst))
+ bandwidth.append(vif_inbound)
+
+ if self.vif_outbound_average is not None:
+ vif_outbound = etree.Element("outbound",
+ average=str(self.vif_outbound_average))
+ if self.vif_outbound_peak is not None:
+ vif_outbound.set("peak", str(self.vif_outbound_peak))
+ if self.vif_outbound_burst is not None:
+ vif_outbound.set("burst", str(self.vif_outbound_burst))
+ bandwidth.append(vif_outbound)
+ dev.append(bandwidth)
+
return dev
def add_filter_param(self, key, value):
@@ -729,6 +790,9 @@ class LibvirtConfigGuest(LibvirtConfigObject):
self.memory = 1024 * 1024 * 500
self.vcpus = 1
self.cpu = None
+ self.cpu_shares = None
+ self.cpu_quota = None
+ self.cpu_period = None
self.acpi = False
self.apic = False
self.clock = None
@@ -780,6 +844,17 @@ class LibvirtConfigGuest(LibvirtConfigObject):
features.append(etree.Element("apic"))
root.append(features)
+ def _format_cputune(self, root):
+ cputune = etree.Element("cputune")
+ if self.cpu_shares is not None:
+ cputune.append(self._text_node("shares", self.cpu_shares))
+ if self.cpu_quota is not None:
+ cputune.append(self._text_node("quota", self.cpu_quota))
+ if self.cpu_period is not None:
+ cputune.append(self._text_node("period", self.cpu_period))
+ if len(cputune) > 0:
+ root.append(cputune)
+
def _format_devices(self, root):
if len(self.devices) == 0:
return
@@ -800,6 +875,7 @@ class LibvirtConfigGuest(LibvirtConfigObject):
self._format_os(root)
self._format_features(root)
+ self._format_cputune(root)
if self.clock is not None:
root.append(self.clock.format_dom())
diff --git a/nova/virt/libvirt/designer.py b/nova/virt/libvirt/designer.py
index 176eeef4c..0625d407b 100644
--- a/nova/virt/libvirt/designer.py
+++ b/nova/virt/libvirt/designer.py
@@ -21,8 +21,6 @@ This module provides helper APIs for populating the config.py
classes based on common operational needs / policies
"""
-from nova.virt import netutils
-
def set_vif_guest_frontend_config(conf, mac, model, driver):
"""Populate a LibvirtConfigGuestInterface instance
@@ -102,32 +100,12 @@ def set_vif_host_backend_802qbh_config(conf, devname, profileid,
conf.target_dev = tapname
-def set_vif_host_backend_filter_config(conf, name,
- primary_addr,
- dhcp_server=None,
- ra_server=None,
- allow_same_net=False,
- ipv4_cidr=None,
- ipv6_cidr=None):
- """Populate a LibvirtConfigGuestInterface instance
- with host backend details for traffic filtering"""
-
- conf.filtername = name
- conf.add_filter_param("IP", primary_addr)
-
- if dhcp_server:
- conf.add_filter_param("DHCPSERVER", dhcp_server)
-
- if ra_server:
- conf.add_filter_param("RASERVER", ra_server)
-
- if allow_same_net:
- if ipv4_cidr:
- net, mask = netutils.get_net_and_mask(ipv4_cidr)
- conf.add_filter_param("PROJNET", net)
- conf.add_filter_param("PROJMASK", mask)
+def set_vif_bandwidth_config(conf, extra_specs):
+ """Config vif inbound/outbound bandwidth limit."""
- if ipv6_cidr:
- net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr)
- conf.add_filter_param("PROJNET6", net)
- conf.add_filter_param("PROJMASK6", prefix)
+ bandwidth_items = ['vif_inbound_average', 'vif_inbound_peak',
+ 'vif_inbound_burst', 'vif_outbound_average', 'vif_outbound_peak',
+ 'vif_outbound_burst']
+ for key, value in extra_specs.iteritems():
+ if key in bandwidth_items:
+ setattr(conf, key, value)
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 7282c9e61..bcbd1de87 100755
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -6,6 +6,7 @@
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
+# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -40,19 +41,25 @@ Supports KVM, LXC, QEMU, UML, and XEN.
"""
import errno
+import eventlet
import functools
import glob
import hashlib
import os
import shutil
+import socket
import sys
import tempfile
import time
import uuid
+from eventlet import greenio
from eventlet import greenthread
+from eventlet import patcher
from eventlet import tpool
+from eventlet import util as eventlet_util
from lxml import etree
+from oslo.config import cfg
from xml.dom import minidom
from nova.api.metadata import base as instance_metadata
@@ -63,7 +70,6 @@ from nova.compute import vm_mode
from nova import context as nova_context
from nova import exception
from nova.image import glance
-from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import fileutils
from nova.openstack.common import importutils
@@ -75,6 +81,7 @@ from nova import version
from nova.virt import configdrive
from nova.virt.disk import api as disk
from nova.virt import driver
+from nova.virt import event as virtevent
from nova.virt import firewall
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
@@ -84,6 +91,9 @@ from nova.virt.libvirt import imagecache
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt import netutils
+native_threading = patcher.original("threading")
+native_Queue = patcher.original("Queue")
+
libvirt = None
LOG = logging.getLogger(__name__)
@@ -154,7 +164,11 @@ libvirt_opts = [
'nfs=nova.virt.libvirt.volume.LibvirtNFSVolumeDriver',
'aoe=nova.virt.libvirt.volume.LibvirtAOEVolumeDriver',
'glusterfs='
- 'nova.virt.libvirt.volume.LibvirtGlusterfsVolumeDriver'
+ 'nova.virt.libvirt.volume.LibvirtGlusterfsVolumeDriver',
+ 'fibre_channel=nova.virt.libvirt.volume.'
+ 'LibvirtFibreChannelVolumeDriver',
+ 'scality='
+ 'nova.virt.libvirt.volume.LibvirtScalityVolumeDriver',
],
help='Libvirt handlers for remote volumes.'),
cfg.StrOpt('libvirt_disk_prefix',
@@ -281,6 +295,8 @@ class LibvirtDriver(driver.ComputeDriver):
self._host_state = None
self._initiator = None
+ self._fc_wwnns = None
+ self._fc_wwpns = None
self._wrapped_conn = None
self._caps = None
self.read_only = read_only
@@ -296,6 +312,7 @@ class LibvirtDriver(driver.ComputeDriver):
CONF.libvirt_volume_drivers, self)
self._host_state = None
+ self._event_queue = None
self._disk_cachemode = None
self.image_cache_manager = imagecache.ImageCacheManager()
@@ -346,7 +363,146 @@ class LibvirtDriver(driver.ComputeDriver):
except Exception:
return False
+ def _native_thread(self):
+ """Receives async events coming in from libvirtd.
+
+ This is a native thread which runs the default
+ libvirt event loop implementation. This processes
+ any incoming async events from libvirtd and queues
+ them for later dispatch. This thread is only
+ permitted to use libvirt python APIs, and the
+ driver.queue_event method. In particular any use
+ of logging is forbidden, since it will confuse
+ eventlet's greenthread integration"""
+
+ while True:
+ libvirt.virEventRunDefaultImpl()
+
+ def _dispatch_thread(self):
+ """Dispatches async events coming in from libvirtd.
+
+ This is a green thread which waits for events to
+ arrive from the libvirt event loop thread. This
+ then dispatches the events to the compute manager."""
+
+ while True:
+ self._dispatch_events()
+
+ @staticmethod
+ def _event_lifecycle_callback(conn, dom, event, detail, opaque):
+ """Receives lifecycle events from libvirt.
+
+ NB: this method is executing in a native thread, not
+ an eventlet coroutine. It can only invoke other libvirt
+ APIs, or use self.queue_event(). Any use of logging APIs
+ in particular is forbidden."""
+
+ self = opaque
+
+ uuid = dom.UUIDString()
+ transition = None
+ if event == libvirt.VIR_DOMAIN_EVENT_STOPPED:
+ transition = virtevent.EVENT_LIFECYCLE_STOPPED
+ elif event == libvirt.VIR_DOMAIN_EVENT_STARTED:
+ transition = virtevent.EVENT_LIFECYCLE_STARTED
+ elif event == libvirt.VIR_DOMAIN_EVENT_SUSPENDED:
+ transition = virtevent.EVENT_LIFECYCLE_PAUSED
+ elif event == libvirt.VIR_DOMAIN_EVENT_RESUMED:
+ transition = virtevent.EVENT_LIFECYCLE_RESUMED
+
+ if transition is not None:
+ self._queue_event(virtevent.LifecycleEvent(uuid, transition))
+
+ def _queue_event(self, event):
+ """Puts an event on the queue for dispatch.
+
+ This method is called by the native event thread to
+ put events on the queue for later dispatch by the
+ green thread."""
+
+ if self._event_queue is None:
+ LOG.debug("Event loop thread is not active, "
+ "discarding event %s" % event)
+ return
+
+ # Queue the event...
+ self._event_queue.put(event)
+
+ # ...then wakeup the green thread to dispatch it
+ c = ' '.encode()
+ self._event_notify_send.write(c)
+ self._event_notify_send.flush()
+
+ def _dispatch_events(self):
+ """Wait for & dispatch events from native thread
+
+ Blocks until native thread indicates some events
+ are ready. Then dispatches all queued events."""
+
+ # Wait to be notified that there are some
+ # events pending
+ try:
+ _c = self._event_notify_recv.read(1)
+ assert _c
+ except ValueError:
+ return # will be raised when pipe is closed
+
+ # Process as many events as possible without
+ # blocking
+ while not self._event_queue.empty():
+ try:
+ event = self._event_queue.get(block=False)
+ self.emit_event(event)
+ except native_Queue.Empty:
+ pass
+
+ def _init_events_pipe(self):
+ """Create a self-pipe for the native thread to synchronize on.
+
+ This code is taken from the eventlet tpool module, under terms
+ of the Apache License v2.0."""
+
+ self._event_queue = native_Queue.Queue()
+ try:
+ rpipe, wpipe = os.pipe()
+ self._event_notify_send = greenio.GreenPipe(wpipe, 'wb', 0)
+ self._event_notify_recv = greenio.GreenPipe(rpipe, 'rb', 0)
+ except (ImportError, NotImplementedError):
+ # This is Windows compatibility -- use a socket instead
+ # of a pipe because pipes don't really exist on Windows.
+ sock = eventlet_util.__original_socket__(socket.AF_INET,
+ socket.SOCK_STREAM)
+ sock.bind(('localhost', 0))
+ sock.listen(50)
+ csock = eventlet_util.__original_socket__(socket.AF_INET,
+ socket.SOCK_STREAM)
+ csock.connect(('localhost', sock.getsockname()[1]))
+ nsock, addr = sock.accept()
+ self._event_notify_send = nsock.makefile('wb', 0)
+ gsock = greenio.GreenSocket(csock)
+ self._event_notify_recv = gsock.makefile('rb', 0)
+
+ def _init_events(self):
+ """Initializes the libvirt events subsystem.
+
+ This requires running a native thread to provide the
+ libvirt event loop integration. This forwards events
+ to a green thread which does the actual dispatching.
+ """
+
+ self._init_events_pipe()
+
+ LOG.debug("Starting native event thread")
+ event_thread = native_threading.Thread(target=self._native_thread)
+ event_thread.setDaemon(True)
+ event_thread.start()
+
+ LOG.debug("Starting green dispatch thread")
+ dispatch_thread = eventlet.spawn(self._dispatch_thread)
+
def init_host(self, host):
+ libvirt.virEventRegisterDefaultImpl()
+
if not self.has_min_version(MIN_LIBVIRT_VERSION):
major = MIN_LIBVIRT_VERSION[0]
minor = MIN_LIBVIRT_VERSION[1]
@@ -355,6 +511,8 @@ class LibvirtDriver(driver.ComputeDriver):
'%(major)i.%(minor)i.%(micro)i or greater.') %
locals())
+ self._init_events()
+
def _get_connection(self):
if not self._wrapped_conn or not self._test_connection():
LOG.debug(_('Connecting to libvirt: %s'), self.uri)
@@ -366,6 +524,17 @@ class LibvirtDriver(driver.ComputeDriver):
(libvirt.virDomain, libvirt.virConnect),
self._connect, self.uri, self.read_only)
+ try:
+ LOG.debug("Registering for lifecycle events %s" % str(self))
+ self._wrapped_conn.domainEventRegisterAny(
+ None,
+ libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
+ self._event_lifecycle_callback,
+ self)
+ except Exception, e:
+ LOG.warn(_("URI %s does not support events"),
+ self.uri)
+
return self._wrapped_conn
_conn = property(_get_connection)
@@ -686,13 +855,37 @@ class LibvirtDriver(driver.ComputeDriver):
if not self._initiator:
self._initiator = libvirt_utils.get_iscsi_initiator()
if not self._initiator:
- LOG.warn(_('Could not determine iscsi initiator name'),
- instance=instance)
- return {
- 'ip': CONF.my_ip,
- 'initiator': self._initiator,
- 'host': CONF.host
- }
+ LOG.debug(_('Could not determine iscsi initiator name'),
+ instance=instance)
+
+ if not self._fc_wwnns:
+ self._fc_wwnns = libvirt_utils.get_fc_wwnns()
+ if not self._fc_wwnns or len(self._fc_wwnns) == 0:
+ LOG.debug(_('Could not determine fibre channel '
+ 'world wide node names'),
+ instance=instance)
+
+ if not self._fc_wwpns:
+ self._fc_wwpns = libvirt_utils.get_fc_wwpns()
+ if not self._fc_wwpns or len(self._fc_wwpns) == 0:
+ LOG.debug(_('Could not determine fibre channel '
+ 'world wide port names'),
+ instance=instance)
+
+ if not self._initiator and not self._fc_wwnns and not self._fc_wwpns:
+ msg = _("No Volume Connector found.")
+ LOG.error(msg)
+ raise exception.NovaException(msg)
+
+ connector = {'ip': CONF.my_ip,
+ 'initiator': self._initiator,
+ 'host': CONF.host}
+
+ if self._fc_wwnns and self._fc_wwpns:
+ connector["wwnns"] = self._fc_wwnns
+ connector["wwpns"] = self._fc_wwpns
+
+ return connector
def _cleanup_resize(self, instance, network_info):
target = libvirt_utils.get_instance_path(instance) + "_resize"
@@ -809,6 +1002,50 @@ class LibvirtDriver(driver.ComputeDriver):
connection_info,
disk_dev)
+ @exception.wrap_exception()
+ def attach_interface(self, instance, image_meta, network_info):
+ virt_dom = self._lookup_by_name(instance['name'])
+ for (network, mapping) in network_info:
+ self.vif_driver.plug(instance, (network, mapping))
+ self.firewall_driver.setup_basic_filtering(instance,
+ [(network, mapping)])
+ cfg = self.vif_driver.get_config(instance, network, mapping,
+ image_meta)
+ try:
+ flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
+ state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
+ if state == power_state.RUNNING:
+ flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
+ virt_dom.attachDeviceFlags(cfg.to_xml(), flags)
+ except libvirt.libvirtError as ex:
+ LOG.error(_('attaching network adapter failed.'),
+ instance=instance)
+ self.vif_driver.unplug(instance, (network, mapping))
+ raise exception.InterfaceAttachFailed(instance)
+
+ @exception.wrap_exception()
+ def detach_interface(self, instance, network_info):
+ virt_dom = self._lookup_by_name(instance['name'])
+ for (network, mapping) in network_info:
+ cfg = self.vif_driver.get_config(instance, network, mapping, None)
+ try:
+ self.vif_driver.unplug(instance, (network, mapping))
+ flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
+ state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
+ if state == power_state.RUNNING:
+ flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
+ virt_dom.detachDeviceFlags(cfg.to_xml(), flags)
+ except libvirt.libvirtError as ex:
+ error_code = ex.get_error_code()
+ if error_code == libvirt.VIR_ERR_NO_DOMAIN:
+ LOG.warn(_("During detach_interface, "
+ "instance disappeared."),
+ instance=instance)
+ else:
+ LOG.error(_('detaching network adapter failed.'),
+ instance=instance)
+ raise exception.InterfaceDetachFailed(instance)
+
def snapshot(self, context, instance, image_href, update_task_state):
"""Create snapshot from a running VM instance.
@@ -1124,7 +1361,7 @@ class LibvirtDriver(driver.ComputeDriver):
def power_on(self, instance):
"""Power on the specified instance."""
dom = self._lookup_by_name(instance['name'])
- self._create_domain(domain=dom)
+ self._create_domain(domain=dom, instance=instance)
timer = utils.FixedIntervalLoopingCall(self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
@@ -1722,7 +1959,8 @@ class LibvirtDriver(driver.ComputeDriver):
return image.libvirt_info(disk_info['bus'],
disk_info['dev'],
disk_info['type'],
- self.disk_cachemode)
+ self.disk_cachemode,
+ instance['extra_specs'])
def get_guest_storage_config(self, instance, image_meta,
disk_info,
@@ -1834,9 +2072,14 @@ class LibvirtDriver(driver.ComputeDriver):
guest.memory = inst_type['memory_mb'] * 1024
guest.vcpus = inst_type['vcpus']
+ quota_items = ['cpu_shares', 'cpu_period', 'cpu_quota']
+ for key, value in instance['extra_specs'].iteritems():
+ if key in quota_items:
+ setattr(guest, key, value)
+
guest.cpu = self.get_guest_cpu_config()
- if 'root' in disk_mapping:
+ if 'root' in disk_mapping and disk_mapping['root']['dev'] is not None:
root_device_name = "/dev/" + disk_mapping['root']['dev']
else:
root_device_name = None
@@ -2019,8 +2262,7 @@ class LibvirtDriver(driver.ComputeDriver):
xml = conf.to_xml()
if write_to_disk:
- instance_dir = os.path.join(CONF.instances_path,
- instance["name"])
+ instance_dir = libvirt_utils.get_instance_path(instance)
xml_path = os.path.join(instance_dir, 'libvirt.xml')
libvirt_utils.write_to_file(xml_path, xml)
@@ -2223,23 +2465,25 @@ class LibvirtDriver(driver.ComputeDriver):
return self._conn.getInfo()[1]
@staticmethod
- def get_local_gb_total():
- """Get the total hdd size(GB) of physical computer.
-
- :returns:
- The total amount of HDD(GB).
- Note that this value shows a partition where
- NOVA-INST-DIR/instances mounts.
+ def get_local_gb_info():
+ """Get local storage info of the compute node in GB.
+ :returns: A dict containing:
+ :total: How big the overall usable filesystem is (in gigabytes)
+ :free: How much space is free (in gigabytes)
+ :used: How much space is used (in gigabytes)
"""
if CONF.libvirt_images_type == 'lvm':
- vg_total = libvirt_utils.volume_group_total_space(
+ info = libvirt_utils.get_volume_group_info(
CONF.libvirt_images_volume_group)
- return vg_total / (1024 ** 3)
else:
- stats = libvirt_utils.get_fs_info(CONF.instances_path)
- return stats['total'] / (1024 ** 3)
+ info = libvirt_utils.get_fs_info(CONF.instances_path)
+
+ for (k, v) in info.iteritems():
+ info[k] = v / (1024 ** 3)
+
+ return info
def get_vcpu_used(self):
"""Get vcpu usage number of physical computer.
@@ -2305,24 +2549,6 @@ class LibvirtDriver(driver.ComputeDriver):
# Convert it to MB
return self.get_memory_mb_total() - avail / 1024
- def get_local_gb_used(self):
- """Get the free hdd size(GB) of physical computer.
-
- :returns:
- The total usage of HDD(GB).
- Note that this value shows a partition where
- NOVA-INST-DIR/instances mounts.
-
- """
-
- if CONF.libvirt_images_type == 'lvm':
- vg_used = libvirt_utils.volume_group_used_space(
- CONF.libvirt_images_volume_group)
- return vg_used / (1024 ** 3)
- else:
- stats = libvirt_utils.get_fs_info(CONF.instances_path)
- return stats['used'] / (1024 ** 3)
-
def get_hypervisor_type(self):
"""Get hypervisor type.
@@ -2494,17 +2720,35 @@ class LibvirtDriver(driver.ComputeDriver):
:param nodename: ignored in this driver
:returns: dictionary containing resource info
"""
+
+ def _get_disk_available_least():
+ """Return total real disk available least size.
+
+ The size of available disk, when block_migration command given
+ disk_over_commit param is FALSE.
+
+ The size that deducted real instance disk size from the total size
+ of the virtual disk of all instances.
+
+ """
+ disk_free_gb = disk_info_dict['free']
+ disk_over_committed = self.get_disk_over_committed_size_total()
+ # Disk available least size
+ available_least = disk_free_gb * (1024 ** 3) - disk_over_committed
+ return (available_least / (1024 ** 3))
+
+ disk_info_dict = self.get_local_gb_info()
dic = {'vcpus': self.get_vcpu_total(),
'memory_mb': self.get_memory_mb_total(),
- 'local_gb': self.get_local_gb_total(),
+ 'local_gb': disk_info_dict['total'],
'vcpus_used': self.get_vcpu_used(),
'memory_mb_used': self.get_memory_mb_used(),
- 'local_gb_used': self.get_local_gb_used(),
+ 'local_gb_used': disk_info_dict['used'],
'hypervisor_type': self.get_hypervisor_type(),
'hypervisor_version': self.get_hypervisor_version(),
'hypervisor_hostname': self.get_hypervisor_hostname(),
'cpu_info': self.get_cpu_info(),
- 'disk_available_least': self.get_disk_available_least()}
+ 'disk_available_least': _get_disk_available_least()}
return dic
def check_can_live_migrate_destination(self, ctxt, instance_ref,
@@ -3033,22 +3277,11 @@ class LibvirtDriver(driver.ComputeDriver):
'disk_size': dk_size})
return jsonutils.dumps(disk_info)
- def get_disk_available_least(self):
- """Return disk available least size.
-
- The size of available disk, when block_migration command given
- disk_over_commit param is FALSE.
-
- The size that deducted real nstance disk size from the total size
- of the virtual disk of all instances.
-
- """
- # available size of the disk
- dk_sz_gb = self.get_local_gb_total() - self.get_local_gb_used()
-
+ def get_disk_over_committed_size_total(self):
+ """Return total over committed disk size for all instances."""
# Disk size that all instance uses : virtual_size - disk_size
instances_name = self.list_instances()
- instances_sz = 0
+ disk_over_committed_size = 0
for i_name in instances_name:
try:
disk_infos = jsonutils.loads(
@@ -3056,7 +3289,7 @@ class LibvirtDriver(driver.ComputeDriver):
for info in disk_infos:
i_vt_sz = int(info['virt_disk_size'])
i_dk_sz = int(info['disk_size'])
- instances_sz += i_vt_sz - i_dk_sz
+ disk_over_committed_size += i_vt_sz - i_dk_sz
except OSError as e:
if e.errno == errno.ENOENT:
LOG.error(_("Getting disk size of %(i_name)s: %(e)s") %
@@ -3068,9 +3301,7 @@ class LibvirtDriver(driver.ComputeDriver):
pass
# NOTE(gtt116): give change to do other task.
greenthread.sleep(0)
- # Disk available least size
- available_least_size = dk_sz_gb * (1024 ** 3) - instances_sz
- return (available_least_size / 1024 / 1024 / 1024)
+ return disk_over_committed_size
def unfilter_instance(self, instance_ref, network_info):
"""See comments of same method in firewall_driver."""
@@ -3346,6 +3577,9 @@ class LibvirtDriver(driver.ComputeDriver):
% locals())
return os.access(instance_path, os.W_OK)
+ def inject_network_info(self, instance, nw_info):
+ self.firewall_driver.setup_basic_filtering(instance, nw_info)
+
class HostState(object):
"""Manages information about the compute node through libvirt."""
@@ -3370,9 +3604,10 @@ class HostState(object):
data["vcpus"] = self.driver.get_vcpu_total()
data["vcpus_used"] = self.driver.get_vcpu_used()
data["cpu_info"] = jsonutils.loads(self.driver.get_cpu_info())
- data["disk_total"] = self.driver.get_local_gb_total()
- data["disk_used"] = self.driver.get_local_gb_used()
- data["disk_available"] = data["disk_total"] - data["disk_used"]
+ disk_info_dict = self.driver.get_local_gb_info()
+ data["disk_total"] = disk_info_dict['total']
+ data["disk_used"] = disk_info_dict['used']
+ data["disk_available"] = disk_info_dict['free']
data["host_memory_total"] = self.driver.get_memory_mb_total()
data["host_memory_free"] = (data["host_memory_total"] -
self.driver.get_memory_mb_used())
diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py
index c9a1b1fbb..90155baf3 100644
--- a/nova/virt/libvirt/firewall.py
+++ b/nova/virt/libvirt/firewall.py
@@ -17,13 +17,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-
from eventlet import tpool
+from oslo.config import cfg
from nova.cloudpipe import pipelib
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
import nova.virt.firewall as base_firewall
+from nova.virt import netutils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@@ -124,10 +124,48 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
base_filter = self.get_base_filter_list(instance, allow_dhcp)
for (network, mapping) in network_info:
- nic_id = mapping['mac'].replace(':', '')
- instance_filter_name = self._instance_filter_name(instance, nic_id)
- self._define_filter(self._filter_container(instance_filter_name,
- base_filter))
+ self._define_filter(self._get_instance_filter_xml(instance,
+ base_filter,
+ network,
+ mapping))
+
+ def _get_instance_filter_parameters(self, network, mapping):
+ parameters = []
+
+ def format_parameter(parameter, value):
+ return ("<parameter name='%s' value='%s'/>" % (parameter, value))
+
+ for address in mapping['ips']:
+ parameters.append(format_parameter('IP', address['ip']))
+ if mapping['dhcp_server']:
+ parameters.append(format_parameter('DHCPSERVER',
+ mapping['dhcp_server']))
+ if CONF.use_ipv6:
+ ra_server = mapping.get('gateway_v6') + "/128"
+ parameters.append(format_parameter('RASERVER', ra_server))
+ if CONF.allow_same_net_traffic:
+ ipv4_cidr = network['cidr']
+ net, mask = netutils.get_net_and_mask(ipv4_cidr)
+ parameters.append(format_parameter('PROJNET', net))
+ parameters.append(format_parameter('PROJMASK', mask))
+ if CONF.use_ipv6:
+ ipv6_cidr = network['cidr_v6']
+ net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr)
+ parameters.append(format_parameter('PROJNET6', net))
+ parameters.append(format_parameter('PROJMASK6', prefix))
+ return parameters
+
+ def _get_instance_filter_xml(self, instance, filters, network, mapping):
+ nic_id = mapping['mac'].replace(':', '')
+ instance_filter_name = self._instance_filter_name(instance, nic_id)
+ parameters = self._get_instance_filter_parameters(network, mapping)
+ xml = '''<filter name='%s' chain='root'>''' % instance_filter_name
+ for f in filters:
+ xml += '''<filterref filter='%s'>''' % f
+ xml += ''.join(parameters)
+ xml += '</filterref>'
+ xml += '</filter>'
+ return xml
def get_base_filter_list(self, instance, allow_dhcp):
"""
diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py
index 0a84b22e5..082dd403e 100755
--- a/nova/virt/libvirt/imagebackend.py
+++ b/nova/virt/libvirt/imagebackend.py
@@ -19,10 +19,12 @@ import abc
import contextlib
import os
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common import excutils
from nova.openstack.common import fileutils
from nova.openstack.common import lockutils
+from nova.openstack.common import log as logging
from nova import utils
from nova.virt.disk import api as disk
from nova.virt import images
@@ -44,7 +46,7 @@ __imagebackend_opts = [
help='Create sparse logical volumes (with virtualsize)'
' if this flag is set to True.'),
cfg.IntOpt('libvirt_lvm_snapshot_size',
- default='1000',
+ default=1000,
help='The amount of storage (in megabytes) to allocate for LVM'
' snapshot copy-on-write blocks.'),
]
@@ -52,6 +54,9 @@ __imagebackend_opts = [
CONF = cfg.CONF
CONF.register_opts(__imagebackend_opts)
CONF.import_opt('base_dir_name', 'nova.virt.libvirt.imagecache')
+CONF.import_opt('preallocate_images', 'nova.virt.driver')
+
+LOG = logging.getLogger(__name__)
class Image(object):
@@ -67,6 +72,7 @@ class Image(object):
self.source_type = source_type
self.driver_format = driver_format
self.is_block_dev = is_block_dev
+ self.preallocate = False
# NOTE(mikal): We need a lock directory which is shared along with
# instance files, to cover the scenario where multiple compute nodes
@@ -86,13 +92,15 @@ class Image(object):
"""
pass
- def libvirt_info(self, disk_bus, disk_dev, device_type, cache_mode):
+ def libvirt_info(self, disk_bus, disk_dev, device_type, cache_mode,
+ extra_specs):
"""Get `LibvirtConfigGuestDisk` filled for this image.
:disk_dev: Disk bus device name
:disk_bus: Disk bus type
:device_type: Device type for this image.
:cache_mode: Caching mode for this image
+ :extra_specs: Instance type extra specs dict.
"""
info = vconfig.LibvirtConfigGuestDisk()
info.source_type = self.source_type
@@ -104,6 +112,16 @@ class Image(object):
driver_name = libvirt_utils.pick_disk_driver_name(self.is_block_dev)
info.driver_name = driver_name
info.source_path = self.path
+
+ tune_items = ['disk_read_bytes_sec', 'disk_read_iops_sec',
+ 'disk_write_bytes_sec', 'disk_write_iops_sec',
+ 'disk_total_bytes_sec', 'disk_total_iops_sec']
+ # Note(yaguang): Currently, the only tuning available is Block I/O
+ # throttling for qemu.
+ if self.source_type in ['file', 'block']:
+ for key, value in extra_specs.iteritems():
+ if key in tune_items:
+ setattr(info, key, value)
return info
def cache(self, fetch_func, filename, size=None, *args, **kwargs):
@@ -133,6 +151,25 @@ class Image(object):
self.create_image(call_if_not_exists, base, size,
*args, **kwargs)
+ if size and self.preallocate and self._can_fallocate():
+ utils.execute('fallocate', '-n', '-l', size, self.path)
+
+ def _can_fallocate(self):
+ """Check once per class, whether fallocate(1) is available,
+ and that the instances directory supports fallocate(2).
+ """
+ can_fallocate = getattr(self.__class__, 'can_fallocate', None)
+ if can_fallocate is None:
+ _out, err = utils.trycmd('fallocate', '-n', '-l', '1',
+ self.path + '.fallocate_test')
+ utils.delete_if_exists(self.path + '.fallocate_test')
+ can_fallocate = not err
+ self.__class__.can_fallocate = can_fallocate
+ if not can_fallocate:
+ LOG.error('Unable to preallocate_images=%s at path: %s' %
+ (CONF.preallocate_images, self.path))
+ return can_fallocate
+
def snapshot_create(self):
raise NotImplementedError
@@ -152,6 +189,7 @@ class Raw(Image):
os.path.join(libvirt_utils.get_instance_path(instance),
disk_name))
self.snapshot_name = snapshot_name
+ self.preallocate = CONF.preallocate_images != 'none'
def create_image(self, prepare_template, base, size, *args, **kwargs):
@lockutils.synchronized(base, 'nova-', external=True,
@@ -190,11 +228,15 @@ class Qcow2(Image):
os.path.join(libvirt_utils.get_instance_path(instance),
disk_name))
self.snapshot_name = snapshot_name
+ self.preallocate = CONF.preallocate_images != 'none'
def create_image(self, prepare_template, base, size, *args, **kwargs):
@lockutils.synchronized(base, 'nova-', external=True,
lock_path=self.lock_path)
def copy_qcow2_image(base, target, size):
+ # TODO(pbrady): Consider copying the cow image here
+ # with preallocation=metadata set for performance reasons.
+ # This would be keyed on a 'preallocate_images' setting.
libvirt_utils.create_cow_image(base, target)
if size:
disk.extend(target, size)
@@ -241,13 +283,19 @@ class Lvm(Image):
self.escape(disk_name))
self.path = os.path.join('/dev', self.vg, self.lv)
+ # TODO(pbrady): possibly deprecate libvirt_sparse_logical_volumes
+ # for the more general preallocate_images
self.sparse = CONF.libvirt_sparse_logical_volumes
+ self.preallocate = not self.sparse
if snapshot_name:
self.snapshot_name = snapshot_name
self.snapshot_path = os.path.join('/dev', self.vg,
self.snapshot_name)
+ def _can_fallocate(self):
+ return False
+
def create_image(self, prepare_template, base, size, *args, **kwargs):
@lockutils.synchronized(base, 'nova-', external=True,
lock_path=self.lock_path)
diff --git a/nova/virt/libvirt/imagecache.py b/nova/virt/libvirt/imagecache.py
index ea7bded95..d66d61415 100644
--- a/nova/virt/libvirt/imagecache.py
+++ b/nova/virt/libvirt/imagecache.py
@@ -28,9 +28,10 @@ import os
import re
import time
+from oslo.config import cfg
+
from nova.compute import task_states
from nova.compute import vm_states
-from nova.openstack.common import cfg
from nova.openstack.common import fileutils
from nova.openstack.common import jsonutils
from nova.openstack.common import lockutils
@@ -38,7 +39,6 @@ from nova.openstack.common import log as logging
from nova import utils
from nova.virt.libvirt import utils as virtutils
-
LOG = logging.getLogger(__name__)
imagecache_opts = [
@@ -220,7 +220,7 @@ class ImageCacheManager(object):
self.used_images = {}
self.image_popularity = {}
- self.instance_names = {}
+ self.instance_names = set()
self.active_base_files = []
self.corrupt_base_files = []
@@ -263,7 +263,11 @@ class ImageCacheManager(object):
self.instance_names = set()
for instance in all_instances:
+ # NOTE(mikal): "instance name" here means "the name of a directory
+ # which might contain an instance" and therefore needs to include
+ # historical permutations as well as the current one.
self.instance_names.add(instance['name'])
+ self.instance_names.add(instance['uuid'])
resize_states = [task_states.RESIZE_PREP,
task_states.RESIZE_MIGRATING,
@@ -272,6 +276,7 @@ class ImageCacheManager(object):
if instance['task_state'] in resize_states or \
instance['vm_state'] == vm_states.RESIZED:
self.instance_names.add(instance['name'] + '_resize')
+ self.instance_names.add(instance['uuid'] + '_resize')
image_ref_str = str(instance['image_ref'])
local, remote, insts = self.used_images.get(image_ref_str,
diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py
index b8e0cafec..cf3fd9d26 100755
--- a/nova/virt/libvirt/utils.py
+++ b/nova/virt/libvirt/utils.py
@@ -6,6 +6,7 @@
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2011 OpenStack LLC
+# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -22,9 +23,9 @@
import os
from lxml import etree
+from oslo.config import cfg
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import utils
from nova.virt import images
@@ -56,6 +57,93 @@ def get_iscsi_initiator():
return l[l.index('=') + 1:].strip()
+def get_fc_hbas():
+ """Get the Fibre Channel HBA information."""
+ try:
+ out, err = execute('systool', '-c', 'fc_host', '-v',
+ run_as_root=True)
+ except exception.ProcessExecutionError as exc:
+ if exc.exit_code == 96:
+ LOG.warn(_("systool is not installed"))
+ return []
+
+ if out is None:
+ raise RuntimeError(_("Cannot find any Fibre Channel HBAs"))
+
+ lines = out.split('\n')
+ # ignore the first 2 lines
+ lines = lines[2:]
+ hbas = []
+ hba = {}
+ lastline = None
+ for line in lines:
+ line = line.strip()
+ # 2 newlines denotes a new hba port
+ if line == '' and lastline == '':
+ if len(hba) > 0:
+ hbas.append(hba)
+ hba = {}
+ else:
+ val = line.split('=')
+ if len(val) == 2:
+ key = val[0].strip().replace(" ", "")
+ value = val[1].strip()
+ hba[key] = value.replace('"', '')
+ lastline = line
+
+ return hbas
+
+
+def get_fc_hbas_info():
+ """Get Fibre Channel WWNs and device paths from the system, if any."""
+ # Note modern linux kernels contain the FC HBA's in /sys
+ # and are obtainable via the systool app
+ hbas = get_fc_hbas()
+ hbas_info = []
+ for hba in hbas:
+ wwpn = hba['port_name'].replace('0x', '')
+ wwnn = hba['node_name'].replace('0x', '')
+ device_path = hba['ClassDevicepath']
+ device = hba['ClassDevice']
+ hbas_info.append({'port_name': wwpn,
+ 'node_name': wwnn,
+ 'host_device': device,
+ 'device_path': device_path})
+ return hbas_info
+
+
+def get_fc_wwpns():
+ """Get Fibre Channel WWPNs from the system, if any."""
+ # Note modern linux kernels contain the FC HBA's in /sys
+ # and are obtainable via the systool app
+ hbas = get_fc_hbas()
+
+ wwpns = []
+ if hbas:
+ for hba in hbas:
+ if hba['port_state'] == 'Online':
+ wwpn = hba['port_name'].replace('0x', '')
+ wwpns.append(wwpn)
+
+ return wwpns
+
+
+def get_fc_wwnns():
+ """Get Fibre Channel WWNNs from the system, if any."""
+ # Note modern linux kernels contain the FC HBA's in /sys
+ # and are obtainable via the systool app
+ hbas = get_fc_hbas()
+
+ wwnns = []
+ if hbas:
+ for hba in hbas:
+ if hba['port_state'] == 'Online':
+ wwnn = hba['node_name'].replace('0x', '')
+ wwnns.append(wwnn)
+
+ return wwnns
+
+
def create_image(disk_format, path, size):
"""Create a disk image
@@ -117,7 +205,8 @@ def create_lvm_image(vg, lv, size, sparse=False):
:size: size of image in bytes
:sparse: create sparse logical volume
"""
- free_space = volume_group_free_space(vg)
+ vg_info = get_volume_group_info(vg)
+ free_space = vg_info['free']
def check_size(vg, lv, size):
if size > free_space:
@@ -144,33 +233,14 @@ def create_lvm_image(vg, lv, size, sparse=False):
execute(*cmd, run_as_root=True, attempts=3)
-def volume_group_free_space(vg):
- """Return available space on volume group in bytes.
-
- :param vg: volume group name
- """
- out, err = execute('vgs', '--noheadings', '--nosuffix',
- '--units', 'b', '-o', 'vg_free', vg,
- run_as_root=True)
- return int(out.strip())
-
-
-def volume_group_total_space(vg):
- """Return total space on volume group in bytes.
-
- :param vg: volume group name
- """
-
- out, err = execute('vgs', '--noheadings', '--nosuffix',
- '--units', 'b', '-o', 'vg_size', vg,
- run_as_root=True)
- return int(out.strip())
-
-
-def volume_group_used_space(vg):
- """Return available space on volume group in bytes.
+def get_volume_group_info(vg):
+ """Return free/used/total space info for a volume group in bytes
:param vg: volume group name
+ :returns: A dict containing:
+ :total: How big the filesystem is (in bytes)
+ :free: How much space is free (in bytes)
+ :used: How much space is used (in bytes)
"""
out, err = execute('vgs', '--noheadings', '--nosuffix',
@@ -182,7 +252,9 @@ def volume_group_used_space(vg):
if len(info) != 2:
raise RuntimeError(_("vg %s must be LVM volume group") % vg)
- return int(info[0]) - int(info[1])
+ return {'total': int(info[0]),
+ 'free': int(info[1]),
+ 'used': int(info[0]) - int(info[1])}
def list_logical_volumes(vg):
@@ -518,17 +590,19 @@ def fetch_image(context, target, image_id, user_id, project_id):
images.fetch_to_raw(context, image_id, target, user_id, project_id)
-def get_instance_path(instance):
+def get_instance_path(instance, forceold=False):
"""Determine the correct path for instance storage.
- This used to be calculated all over the place. This method centralizes
- this into one location, which will make it easier to change the
- algorithm used to name instance storage directories.
+ This method determines the directory name for instance storage, while
+ handling the fact that we changed the naming style to something more
+ unique in the grizzly release.
:param instance: the instance we want a path for
+ :param forceold: force the use of the pre-grizzly format
:returns: a path to store information about that instance
"""
- # TODO(mikal): we should use UUID instead of name, as name isn't
- # nessesarily unique
- return os.path.join(CONF.instances_path, instance['name'])
+ pre_grizzly_name = os.path.join(CONF.instances_path, instance['name'])
+ if forceold or os.path.exists(pre_grizzly_name):
+ return pre_grizzly_name
+ return os.path.join(CONF.instances_path, instance['uuid'])
diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py
index ee4f7e194..523857e42 100644
--- a/nova/virt/libvirt/vif.py
+++ b/nova/virt/libvirt/vif.py
@@ -21,13 +21,13 @@
import copy
+from oslo.config import cfg
+
from nova import exception
from nova.network import linux_net
from nova.network import model as network_model
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import utils
-
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import designer
@@ -172,22 +172,8 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
mac_id = mapping['mac'].replace(':', '')
name = "nova-instance-" + instance['name'] + "-" + mac_id
- primary_addr = mapping['ips'][0]['ip']
- dhcp_server = ra_server = ipv4_cidr = ipv6_cidr = None
-
- if mapping['dhcp_server']:
- dhcp_server = mapping['dhcp_server']
- if CONF.use_ipv6:
- ra_server = mapping.get('gateway_v6') + "/128"
- if CONF.allow_same_net_traffic:
- ipv4_cidr = network['cidr']
- if CONF.use_ipv6:
- ipv6_cidr = network['cidr_v6']
-
- if self.get_firewall_required():
- designer.set_vif_host_backend_filter_config(
- conf, name, primary_addr, dhcp_server,
- ra_server, ipv4_cidr, ipv6_cidr)
+ conf.filtername = name
+ designer.set_vif_bandwidth_config(conf, instance)
return conf
diff --git a/nova/virt/libvirt/volume.py b/nova/virt/libvirt/volume.py
index c368f66f6..1af29fa56 100644
--- a/nova/virt/libvirt/volume.py
+++ b/nova/virt/libvirt/volume.py
@@ -1,6 +1,7 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
+# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -20,12 +21,16 @@
import hashlib
import os
import time
+import urllib2
+import urlparse
+
+from oslo.config import cfg
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova import paths
+from nova.storage import linuxscsi
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import utils as virtutils
@@ -50,13 +55,22 @@ volume_opts = [
default=None,
help='Mount options passed to the nfs client. See section '
'of the nfs man page for details'),
- cfg.StrOpt('num_aoe_discover_tries',
+ cfg.IntOpt('num_aoe_discover_tries',
default=3,
help='number of times to rediscover AoE target to find volume'),
cfg.StrOpt('glusterfs_mount_point_base',
default=paths.state_path_def('mnt'),
help='Dir where the glusterfs volume is mounted on the '
'compute node'),
+ cfg.BoolOpt('libvirt_iscsi_use_multipath',
+ default=False,
+ help='use multipath connection of the iSCSI volume'),
+ cfg.StrOpt('scality_sofs_config',
+ default=None,
+ help='Path or URL to Scality SOFS configuration file'),
+ cfg.StrOpt('scality_sofs_mount_point',
+ default='$state_path/scality',
+ help='Base dir where Scality SOFS shall be mounted'),
]
CONF = cfg.CONF
@@ -173,6 +187,9 @@ class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver):
'-v', property_value)
return self._run_iscsiadm(iscsi_properties, iscsi_command, **kwargs)
+ def _get_target_portals_from_iscsiadm_output(self, output):
+ return [line.split()[0] for line in output.splitlines()]
+
@lockutils.synchronized('connect_volume', 'nova-')
def connect_volume(self, connection_info, disk_info):
"""Attach the volume to instance_name."""
@@ -181,43 +198,35 @@ class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver):
disk_info)
iscsi_properties = connection_info['data']
- # NOTE(vish): If we are on the same host as nova volume, the
- # discovery makes the target so we don't need to
- # run --op new. Therefore, we check to see if the
- # target exists, and if we get 255 (Not Found), then
- # we run --op new. This will also happen if another
- # volume is using the same target.
- try:
- self._run_iscsiadm(iscsi_properties, ())
- except exception.ProcessExecutionError as exc:
- # iscsiadm returns 21 for "No records found" after version 2.0-871
- if exc.exit_code in [21, 255]:
- self._run_iscsiadm(iscsi_properties, ('--op', 'new'))
- else:
- raise
-
- if iscsi_properties.get('auth_method'):
- self._iscsiadm_update(iscsi_properties,
- "node.session.auth.authmethod",
- iscsi_properties['auth_method'])
- self._iscsiadm_update(iscsi_properties,
- "node.session.auth.username",
- iscsi_properties['auth_username'])
- self._iscsiadm_update(iscsi_properties,
- "node.session.auth.password",
- iscsi_properties['auth_password'])
- # NOTE(vish): If we have another lun on the same target, we may
- # have a duplicate login
- self._run_iscsiadm(iscsi_properties, ("--login",),
- check_exit_code=[0, 255])
-
- self._iscsiadm_update(iscsi_properties, "node.startup", "automatic")
+ libvirt_iscsi_use_multipath = CONF.libvirt_iscsi_use_multipath
+
+ if libvirt_iscsi_use_multipath:
+ #multipath installed, discovering other targets if available
+ #multipath should be configured on the nova-compute node,
+ #in order to fit storage vendor
+ out = self._run_iscsiadm_bare(['-m',
+ 'discovery',
+ '-t',
+ 'sendtargets',
+ '-p',
+ iscsi_properties['target_portal']],
+ check_exit_code=[0, 255])[0] \
+ or ""
+
+ for ip in self._get_target_portals_from_iscsiadm_output(out):
+ props = iscsi_properties.copy()
+ props['target_portal'] = ip
+ self._connect_to_iscsi_portal(props)
+
+ self._rescan_iscsi()
+ else:
+ self._connect_to_iscsi_portal(iscsi_properties)
host_device = ("/dev/disk/by-path/ip-%s-iscsi-%s-lun-%s" %
- (iscsi_properties['target_portal'],
- iscsi_properties['target_iqn'],
- iscsi_properties.get('target_lun', 0)))
+ (iscsi_properties['target_portal'],
+ iscsi_properties['target_iqn'],
+ iscsi_properties.get('target_lun', 0)))
# The /dev/disk/by-path/... node is not always present immediately
# TODO(justinsb): This retry-with-delay is a pattern, move to utils?
@@ -244,6 +253,13 @@ class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver):
"(after %(tries)s rescans)") %
locals())
+ if libvirt_iscsi_use_multipath:
+ #we use the multipath device instead of the single path device
+ self._rescan_multipath()
+ multipath_device = self._get_multipath_device_name(host_device)
+ if multipath_device is not None:
+ host_device = multipath_device
+
conf.source_type = "block"
conf.source_path = host_device
return conf
@@ -254,6 +270,30 @@ class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver):
super(LibvirtISCSIVolumeDriver,
self).disconnect_volume(connection_info, disk_dev)
iscsi_properties = connection_info['data']
+
+ if CONF.libvirt_iscsi_use_multipath and \
+ "mapper" in connection_info['data']['device_path']:
+ self._rescan_iscsi()
+ self._rescan_multipath()
+ devices = [dev for dev in self.connection.get_all_block_devices()
+ if "/mapper/" in dev]
+ if not devices:
+ #disconnect if no other multipath devices
+ self._disconnect_mpath(iscsi_properties)
+ return
+
+ other_iqns = [self._get_multipath_iqn(device)
+ for device in devices]
+
+ if iscsi_properties['target_iqn'] not in other_iqns:
+ #disconnect if no other multipath devices with same iqn
+ self._disconnect_mpath(iscsi_properties)
+ return
+
+ #else do not disconnect iscsi portals,
+ #as they are used for other luns
+ return
+
# NOTE(vish): Only disconnect from the target if no luns from the
# target are in use.
device_prefix = ("/dev/disk/by-path/ip-%s-iscsi-%s-lun-" %
@@ -262,12 +302,144 @@ class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver):
devices = self.connection.get_all_block_devices()
devices = [dev for dev in devices if dev.startswith(device_prefix)]
if not devices:
- self._iscsiadm_update(iscsi_properties, "node.startup", "manual",
- check_exit_code=[0, 21, 255])
- self._run_iscsiadm(iscsi_properties, ("--logout",),
- check_exit_code=[0, 21, 255])
- self._run_iscsiadm(iscsi_properties, ('--op', 'delete'),
- check_exit_code=[0, 21, 255])
+ self._disconnect_from_iscsi_portal(iscsi_properties)
+
+ def _connect_to_iscsi_portal(self, iscsi_properties):
+ # NOTE(vish): If we are on the same host as nova volume, the
+ # discovery makes the target so we don't need to
+ # run --op new. Therefore, we check to see if the
+ # target exists, and if we get 255 (Not Found), then
+ # we run --op new. This will also happen if another
+ # volume is using the same target.
+ try:
+ self._run_iscsiadm(iscsi_properties, ())
+ except exception.ProcessExecutionError as exc:
+ # iscsiadm returns 21 for "No records found" after version 2.0-871
+ if exc.exit_code in [21, 255]:
+ self._run_iscsiadm(iscsi_properties, ('--op', 'new'))
+ else:
+ raise
+
+ if iscsi_properties.get('auth_method'):
+ self._iscsiadm_update(iscsi_properties,
+ "node.session.auth.authmethod",
+ iscsi_properties['auth_method'])
+ self._iscsiadm_update(iscsi_properties,
+ "node.session.auth.username",
+ iscsi_properties['auth_username'])
+ self._iscsiadm_update(iscsi_properties,
+ "node.session.auth.password",
+ iscsi_properties['auth_password'])
+
+ #duplicate logins crash iscsiadm after load,
+ #so we scan active sessions to see if the node is logged in.
+ out = self._run_iscsiadm_bare(["-m", "session"],
+ run_as_root=True,
+ check_exit_code=[0, 1, 21])[0] or ""
+
+ portals = [{'portal': p.split(" ")[2], 'iqn': p.split(" ")[3]}
+ for p in out.splitlines() if p.startswith("tcp:")]
+
+ stripped_portal = iscsi_properties['target_portal'].split(",")[0]
+ if len(portals) == 0 or len([s for s in portals
+ if stripped_portal ==
+ s['portal'].split(",")[0]
+ and
+ s['iqn'] ==
+ iscsi_properties['target_iqn']]
+ ) == 0:
+ try:
+ self._run_iscsiadm(iscsi_properties,
+ ("--login",),
+ check_exit_code=[0, 255])
+ except exception.ProcessExecutionError as err:
+ #as this might be one of many paths,
+ #only set successfull logins to startup automatically
+ if err.exit_code in [15]:
+ self._iscsiadm_update(iscsi_properties,
+ "node.startup",
+ "automatic")
+ return
+
+ self._iscsiadm_update(iscsi_properties,
+ "node.startup",
+ "automatic")
+
+ def _disconnect_from_iscsi_portal(self, iscsi_properties):
+ self._iscsiadm_update(iscsi_properties, "node.startup", "manual",
+ check_exit_code=[0, 21, 255])
+ self._run_iscsiadm(iscsi_properties, ("--logout",),
+ check_exit_code=[0, 21, 255])
+ self._run_iscsiadm(iscsi_properties, ('--op', 'delete'),
+ check_exit_code=[0, 21, 255])
+
+ def _get_multipath_device_name(self, single_path_device):
+ device = os.path.realpath(single_path_device)
+ out = self._run_multipath(['-ll',
+ device],
+ check_exit_code=[0, 1])[0]
+ mpath_line = [line for line in out.splitlines()
+ if "scsi_id" not in line] # ignore udev errors
+ if len(mpath_line) > 0 and len(mpath_line[0]) > 0:
+ return "/dev/mapper/%s" % mpath_line[0].split(" ")[0]
+
+ return None
+
+ def _get_iscsi_devices(self):
+ try:
+ devices = list(os.walk('/dev/disk/by-path'))[0][-1]
+ except IndexError:
+ return []
+ return [entry for entry in devices if entry.startswith("ip-")]
+
+ def _disconnect_mpath(self, iscsi_properties):
+ entries = self._get_iscsi_devices()
+ ips = [ip.split("-")[1] for ip in entries
+ if iscsi_properties['target_iqn'] in ip]
+ for ip in ips:
+ props = iscsi_properties.copy()
+ props['target_portal'] = ip
+ self._disconnect_from_iscsi_portal(props)
+
+ self._rescan_multipath()
+
+ def _get_multipath_iqn(self, multipath_device):
+ entries = self._get_iscsi_devices()
+ for entry in entries:
+ entry_real_path = os.path.realpath("/dev/disk/by-path/%s" % entry)
+ entry_multipath = self._get_multipath_device_name(entry_real_path)
+ if entry_multipath == multipath_device:
+ return entry.split("iscsi-")[1].split("-lun")[0]
+ return None
+
+ def _run_iscsiadm_bare(self, iscsi_command, **kwargs):
+ check_exit_code = kwargs.pop('check_exit_code', 0)
+ (out, err) = utils.execute('iscsiadm',
+ *iscsi_command,
+ run_as_root=True,
+ check_exit_code=check_exit_code)
+ LOG.debug("iscsiadm %s: stdout=%s stderr=%s" %
+ (iscsi_command, out, err))
+ return (out, err)
+
+ def _run_multipath(self, multipath_command, **kwargs):
+ check_exit_code = kwargs.pop('check_exit_code', 0)
+ (out, err) = utils.execute('multipath',
+ *multipath_command,
+ run_as_root=True,
+ check_exit_code=check_exit_code)
+ LOG.debug("multipath %s: stdout=%s stderr=%s" %
+ (multipath_command, out, err))
+ return (out, err)
+
+ def _rescan_iscsi(self):
+ self._run_iscsiadm_bare(('-m', 'node', '--rescan'),
+ check_exit_code=[0, 1, 21, 255])
+ self._run_iscsiadm_bare(('-m', 'session', '--rescan'),
+ check_exit_code=[0, 1, 21, 255])
+
+ def _rescan_multipath(self):
+ self._run_multipath('-r', check_exit_code=[0, 1, 21])
class LibvirtNFSVolumeDriver(LibvirtBaseVolumeDriver):
@@ -450,3 +622,212 @@ class LibvirtGlusterfsVolumeDriver(LibvirtBaseVolumeDriver):
return utils.execute('stat', path, run_as_root=True)
except exception.ProcessExecutionError:
return False
+
+
+class LibvirtFibreChannelVolumeDriver(LibvirtBaseVolumeDriver):
+ """Driver to attach Fibre Channel Network volumes to libvirt."""
+
+ def __init__(self, connection):
+ super(LibvirtFibreChannelVolumeDriver,
+ self).__init__(connection, is_block_dev=False)
+
+ def _get_pci_num(self, hba):
+ # NOTE(walter-boring)
+ # device path is in format of
+ # /sys/devices/pci0000:00/0000:00:03.0/0000:05:00.3/host2/fc_host/host2
+ # sometimes an extra entry exists before the host2 value
+ # we always want the value prior to the host2 value
+ pci_num = None
+ if hba is not None:
+ if "device_path" in hba:
+ index = 0
+ device_path = hba['device_path'].split('/')
+ for value in device_path:
+ if value.startswith('host'):
+ break
+ index = index + 1
+
+ if index > 0:
+ pci_num = device_path[index - 1]
+
+ return pci_num
+
+ @lockutils.synchronized('connect_volume', 'nova-')
+ def connect_volume(self, connection_info, disk_info):
+ """Attach the volume to instance_name."""
+ fc_properties = connection_info['data']
+ mount_device = disk_info["dev"]
+
+ ports = fc_properties['target_wwn']
+ wwns = []
+ # we support a list of wwns or a single wwn
+ if isinstance(ports, list):
+ for wwn in ports:
+ wwns.append(wwn)
+ elif isinstance(ports, str):
+ wwns.append(ports)
+
+ # We need to look for wwns on every hba
+ # because we don't know ahead of time
+ # where they will show up.
+ hbas = virtutils.get_fc_hbas_info()
+ host_devices = []
+ for hba in hbas:
+ pci_num = self._get_pci_num(hba)
+ if pci_num is not None:
+ for wwn in wwns:
+ target_wwn = "0x%s" % wwn.lower()
+ host_device = ("/dev/disk/by-path/pci-%s-fc-%s-lun-%s" %
+ (pci_num,
+ target_wwn,
+ fc_properties.get('target_lun', 0)))
+ host_devices.append(host_device)
+
+ if len(host_devices) == 0:
+ # this is empty because we don't have any FC HBAs
+ msg = _("We are unable to locate any Fibre Channel devices")
+ raise exception.NovaException(msg)
+
+ # The /dev/disk/by-path/... node is not always present immediately
+ # We only need to find the first device. Once we see the first device
+ # multipath will have any others.
+ def _wait_for_device_discovery(host_devices, mount_device):
+ tries = self.tries
+ for device in host_devices:
+ LOG.debug(_("Looking for Fibre Channel dev %(device)s")
+ % locals())
+ if os.path.exists(device):
+ self.host_device = device
+ # get the /dev/sdX device. This is used
+ # to find the multipath device.
+ self.device_name = os.path.realpath(device)
+ raise utils.LoopingCallDone()
+
+ if self.tries >= CONF.num_iscsi_scan_tries:
+ msg = _("Fibre Channel device not found.")
+ raise exception.NovaException(msg)
+
+ LOG.warn(_("Fibre volume not yet found at: %(mount_device)s. "
+ "Will rescan & retry. Try number: %(tries)s") %
+ locals())
+
+ linuxscsi.rescan_hosts(hbas)
+ self.tries = self.tries + 1
+
+ self.host_device = None
+ self.device_name = None
+ self.tries = 0
+ timer = utils.FixedIntervalLoopingCall(_wait_for_device_discovery,
+ host_devices, mount_device)
+ timer.start(interval=2).wait()
+
+ tries = self.tries
+ if self.host_device is not None and self.device_name is not None:
+ LOG.debug(_("Found Fibre Channel volume %(mount_device)s "
+ "(after %(tries)s rescans)") % locals())
+
+ # see if the new drive is part of a multipath
+ # device. If so, we'll use the multipath device.
+ mdev_info = linuxscsi.find_multipath_device(self.device_name)
+ if mdev_info is not None:
+ LOG.debug(_("Multipath device discovered %(device)s")
+ % {'device': mdev_info['device']})
+ device_path = mdev_info['device']
+ connection_info['data']['devices'] = mdev_info['devices']
+ else:
+ # we didn't find a multipath device.
+ # so we assume the kernel only sees 1 device
+ device_path = self.host_device
+ device_info = linuxscsi.get_device_info(self.device_name)
+ connection_info['data']['devices'] = [device_info]
+
+ conf = super(LibvirtFibreChannelVolumeDriver,
+ self).connect_volume(connection_info, disk_info)
+
+ conf.source_type = "block"
+ conf.source_path = device_path
+ return conf
+
+ @lockutils.synchronized('connect_volume', 'nova-')
+ def disconnect_volume(self, connection_info, mount_device):
+ """Detach the volume from instance_name."""
+ super(LibvirtFibreChannelVolumeDriver,
+ self).disconnect_volume(connection_info, mount_device)
+ devices = connection_info['data']['devices']
+
+ # There may have been more than 1 device mounted
+ # by the kernel for this volume. We have to remove
+ # all of them
+ for device in devices:
+ linuxscsi.remove_device(device)
+
+
+class LibvirtScalityVolumeDriver(LibvirtBaseVolumeDriver):
+ """Scality SOFS Nova driver. Provide hypervisors with access
+ to sparse files on SOFS. """
+
+ def __init__(self, connection):
+ """Create back-end to SOFS and check connection."""
+ super(LibvirtScalityVolumeDriver,
+ self).__init__(connection, is_block_dev=False)
+
+ def connect_volume(self, connection_info, disk_info):
+ """Connect the volume. Returns xml for libvirt."""
+ self._check_prerequisites()
+ self._mount_sofs()
+ conf = super(LibvirtScalityVolumeDriver,
+ self).connect_volume(connection_info, disk_info)
+ path = os.path.join(CONF.scality_sofs_mount_point,
+ connection_info['data']['sofs_path'])
+ conf.source_type = 'file'
+ conf.source_path = path
+
+ # The default driver cache policy is 'none', and this causes
+ # qemu/kvm to open the volume file with O_DIRECT, which is
+ # rejected by FUSE (on kernels older than 3.3). Scality SOFS
+ # is FUSE based, so we must provide a more sensible default.
+ conf.driver_cache = 'writethrough'
+
+ return conf
+
+ def _check_prerequisites(self):
+ """Sanity checks before attempting to mount SOFS."""
+
+ # config is mandatory
+ config = CONF.scality_sofs_config
+ if not config:
+ msg = _("Value required for 'scality_sofs_config'")
+ LOG.warn(msg)
+ raise exception.NovaException(msg)
+
+ # config can be a file path or a URL, check it
+ if urlparse.urlparse(config).scheme == '':
+ # turn local path into URL
+ config = 'file://%s' % config
+ try:
+ urllib2.urlopen(config, timeout=5).close()
+ except urllib2.URLError as e:
+ msg = _("Cannot access 'scality_sofs_config': %s") % e
+ LOG.warn(msg)
+ raise exception.NovaException(msg)
+
+ # mount.sofs must be installed
+ if not os.access('/sbin/mount.sofs', os.X_OK):
+ msg = _("Cannot execute /sbin/mount.sofs")
+ LOG.warn(msg)
+ raise exception.NovaException(msg)
+
+ def _mount_sofs(self):
+ config = CONF.scality_sofs_config
+ mount_path = CONF.scality_sofs_mount_point
+ sysdir = os.path.join(mount_path, 'sys')
+
+ if not os.path.isdir(mount_path):
+ utils.execute('mkdir', '-p', mount_path)
+ if not os.path.isdir(sysdir):
+ utils.execute('mount', '-t', 'sofs', config, mount_path,
+ run_as_root=True)
+ if not os.path.isdir(sysdir):
+ msg = _("Cannot mount Scality SOFS, check syslog for errors")
+ LOG.warn(msg)
+ raise exception.NovaException(msg)
diff --git a/nova/virt/netutils.py b/nova/virt/netutils.py
index 70f1544c4..eec2d6467 100644
--- a/nova/virt/netutils.py
+++ b/nova/virt/netutils.py
@@ -23,7 +23,7 @@
import netaddr
-from nova.openstack.common import cfg
+from oslo.config import cfg
CONF = cfg.CONF
CONF.import_opt('use_ipv6', 'nova.netconf')
diff --git a/nova/virt/powervm/blockdev.py b/nova/virt/powervm/blockdev.py
index dc539814e..58ad29099 100644
--- a/nova/virt/powervm/blockdev.py
+++ b/nova/virt/powervm/blockdev.py
@@ -18,8 +18,9 @@ import hashlib
import os
import re
+from oslo.config import cfg
+
from nova.image import glance
-from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova import utils
@@ -34,7 +35,74 @@ CONF = cfg.CONF
class PowerVMDiskAdapter(object):
- pass
+ """PowerVM disk adapter interface
+ Provides a contract to implement multiple ways to generate
+ and attach volumes to virtual machines using local and/or
+ external storage
+ """
+
+ def create_volume(self, size):
+ """Creates a volume with a minimum size
+
+ :param size: size of the volume in bytes
+ :returns: string -- the name of the disk device.
+ """
+ pass
+
+ def delete_volume(self, volume_info):
+ """Removes the disk and its associated vSCSI connection
+
+ :param volume_info: dictionary with volume info including name of
+ disk device in /dev/
+ """
+ pass
+
+ def create_volume_from_image(self, context, instance, image_id):
+ """Creates a Volume and copies the specified image to it
+
+ :param context: nova context used to retrieve image from glance
+ :param instance: instance to create the volume for
+ :param image_id: image_id reference used to locate image in glance
+ :returns: dictionary with the name of the created
+ disk device in 'device_name' key
+ """
+ pass
+
+ def create_image_from_volume(self, device_name, context,
+ image_id, image_meta):
+ """Capture the contents of a volume and upload to glance
+
+ :param device_name: device in /dev/ to capture
+ :param context: nova context for operation
+ :param image_id: image reference to pre-created image in glance
+ :param image_meta: metadata for new image
+ """
+ pass
+
+ def migrate_volume(self, lv_name, src_host, dest, image_path,
+ instance_name=None):
+ """Copy a logical volume to file, compress, and transfer
+
+ :param lv_name: volume device name
+ :param src_host: source IP or DNS name.
+ :param dest: destination IP or DNS name
+ :param image_path: path to remote image storage directory
+ :param instance_name: name of instance that is being migrated
+ :returns: file path on destination of image file that was moved
+ """
+ pass
+
+ def attach_volume_to_host(self, *args, **kargs):
+ """
+ Attaches volume to host using info passed in *args and **kargs
+ """
+ pass
+
+ def detach_volume_from_host(self, *args, **kargs):
+ """
+ Detaches volume from host using info passed in *args and **kargs
+ """
+ pass
class PowerVMLocalVolumeAdapter(PowerVMDiskAdapter):
@@ -65,11 +133,13 @@ class PowerVMLocalVolumeAdapter(PowerVMDiskAdapter):
"""
return self._create_logical_volume(size)
- def delete_volume(self, disk_name):
+ def delete_volume(self, volume_info):
"""Removes the Logical Volume and its associated vSCSI connection
- :param disk_name: name of Logical Volume device in /dev/
+ :param volume_info: Dictionary with volume info including name of
+ Logical Volume device in /dev/ via device_name key
"""
+ disk_name = volume_info["device_name"]
LOG.debug(_("Removing the logical volume '%s'") % disk_name)
self._remove_logical_volume(disk_name)
diff --git a/nova/virt/powervm/driver.py b/nova/virt/powervm/driver.py
index 9cd6453ab..892ac34ef 100755
--- a/nova/virt/powervm/driver.py
+++ b/nova/virt/powervm/driver.py
@@ -17,13 +17,13 @@
import socket
import time
+from oslo.config import cfg
+
from nova.image import glance
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.virt import driver
from nova.virt.powervm import operator
-
LOG = logging.getLogger(__name__)
powervm_opts = [
diff --git a/nova/virt/powervm/operator.py b/nova/virt/powervm/operator.py
index 43fa27160..87f596765 100644
--- a/nova/virt/powervm/operator.py
+++ b/nova/virt/powervm/operator.py
@@ -19,14 +19,13 @@ import random
import re
import time
-from nova import exception as nova_exception
-from nova import utils
+from oslo.config import cfg
from nova.compute import power_state
-from nova.openstack.common import cfg
+from nova import exception as nova_exception
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
-
+from nova import utils
from nova.virt.powervm import blockdev
from nova.virt.powervm import command
from nova.virt.powervm import common
@@ -34,7 +33,6 @@ from nova.virt.powervm import constants
from nova.virt.powervm import exception
from nova.virt.powervm import lpar as LPAR
-
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@@ -172,81 +170,6 @@ class PowerVMOperator(object):
self._host_stats = data
def spawn(self, context, instance, image_id, network_info):
- def _create_lpar_instance(instance):
- host_stats = self.get_host_stats(refresh=True)
- inst_name = instance['name']
-
- # CPU/Memory min and max can be configurable. Lets assume
- # some default values for now.
-
- # Memory
- mem = instance['memory_mb']
- if mem > host_stats['host_memory_free']:
- LOG.error(_('Not enough free memory in the host'))
- raise exception.PowerVMInsufficientFreeMemory(
- instance_name=instance['name'])
- mem_min = min(mem, constants.POWERVM_MIN_MEM)
- mem_max = mem + constants.POWERVM_MAX_MEM
-
- # CPU
- cpus = instance['vcpus']
- avail_cpus = host_stats['vcpus'] - host_stats['vcpus_used']
- if cpus > avail_cpus:
- LOG.error(_('Insufficient available CPU on PowerVM'))
- raise exception.PowerVMInsufficientCPU(
- instance_name=instance['name'])
- cpus_min = min(cpus, constants.POWERVM_MIN_CPUS)
- cpus_max = cpus + constants.POWERVM_MAX_CPUS
- cpus_units_min = decimal.Decimal(cpus_min) / decimal.Decimal(10)
- cpus_units = decimal.Decimal(cpus) / decimal.Decimal(10)
-
- try:
- # Network
- # To ensure the MAC address on the guest matches the
- # generated value, pull the first 10 characters off the
- # MAC address for the mac_base_value parameter and then
- # get the integer value of the final 2 characters as the
- # slot_id parameter
- #
- # NOTE(mjfork) the slot_id should not exceed 255 (FF) to
- # to avoid spilling over into the next
- # highest octet. The contract with
- # macs_for_instance limits to a value between
- # 32 and 63 inclusive so we are safe.
- #
- # Further, with the contract on slot_id, we
- # can hard code max_virtual_slots to 64 in
- # LPAR definition.
- mac = network_info[0]['address']
- mac_base_value = (mac[:-2]).replace(':', '')
- eth_id = self._operator.get_virtual_eth_adapter_id()
- slot_id = int(mac[-2:], 16)
- virtual_eth_adapters = ('%(slot_id)s/0/%(eth_id)s//0/0' %
- locals())
-
- # LPAR configuration data
- lpar_inst = LPAR.LPAR(
- name=inst_name, lpar_env='aixlinux',
- min_mem=mem_min, desired_mem=mem,
- max_mem=mem_max, proc_mode='shared',
- sharing_mode='uncap', min_procs=cpus_min,
- desired_procs=cpus, max_procs=cpus_max,
- min_proc_units=cpus_units_min,
- desired_proc_units=cpus_units,
- max_proc_units=cpus_max,
- virtual_eth_mac_base_value=mac_base_value,
- max_virtual_slots=64,
- virtual_eth_adapters=virtual_eth_adapters)
-
- LOG.debug(_("Creating LPAR instance '%s'") % instance['name'])
- self._operator.create_lpar(lpar_inst)
- #TODO(mjfork) capture the error and handle the error when the MAC
- # prefix already exists on the system (1 in 2^32)
- except nova_exception.ProcessExecutionError:
- LOG.exception(_("LPAR instance '%s' creation failed") %
- instance['name'])
- raise exception.PowerVMLPARCreationFailed()
-
def _create_image(context, instance, image_id):
"""Fetch image from glance and copy it to the remote system."""
try:
@@ -268,7 +191,11 @@ class PowerVMOperator(object):
try:
try:
host_stats = self.get_host_stats(refresh=True)
- lpar_inst = self._create_lpar_instance(instance, host_stats)
+ lpar_inst = self._create_lpar_instance(instance,
+ network_info, host_stats)
+ #TODO(mjfork) capture the error and handle the error when the
+ # MAC prefix already exists on the
+ # system (1 in 2^28)
self._operator.create_lpar(lpar_inst)
LOG.debug(_("Creating LPAR instance '%s'") % instance['name'])
except nova_exception.ProcessExecutionError:
@@ -281,6 +208,9 @@ class PowerVMOperator(object):
% instance['name'])
self._operator.start_lpar(instance['name'])
+ # TODO(mrodden): probably do this a better way
+ # that actually relies on the time module
+ # and nonblocking threading
# Wait for boot
timeout_count = range(10)
while timeout_count:
@@ -360,14 +290,19 @@ class PowerVMOperator(object):
LOG.debug(_("Shutting down the instance '%s'") % instance_name)
self._operator.stop_lpar(instance_name)
+ #dperaza: LPAR should be deleted first so that vhost is
+ #cleanly removed and detached from disk device.
+ LOG.debug(_("Deleting the LPAR instance '%s'") % instance_name)
+ self._operator.remove_lpar(instance_name)
+
if disk_name and destroy_disks:
# TODO(mrodden): we should also detach from the instance
# before we start deleting things...
- self._disk_adapter.detach_volume_from_host(disk_name)
- self._disk_adapter.delete_volume(disk_name)
-
- LOG.debug(_("Deleting the LPAR instance '%s'") % instance_name)
- self._operator.remove_lpar(instance_name)
+ volume_info = {'device_name': disk_name}
+ #Volume info dictionary might need more info that is lost when
+ #volume is detached from host so that it can be deleted
+ self._disk_adapter.detach_volume_from_host(volume_info)
+ self._disk_adapter.delete_volume(volume_info)
except Exception:
LOG.exception(_("PowerVM instance cleanup failed"))
raise exception.PowerVMLPARInstanceCleanupFailed(
@@ -382,7 +317,7 @@ class PowerVMOperator(object):
def macs_for_instance(self, instance):
return self._operator.macs_for_instance(instance)
- def _create_lpar_instance(self, instance, host_stats=None):
+ def _create_lpar_instance(self, instance, network_info, host_stats=None):
inst_name = instance['name']
# CPU/Memory min and max can be configurable. Lets assume
@@ -411,9 +346,21 @@ class PowerVMOperator(object):
cpus_units = decimal.Decimal(cpus) / decimal.Decimal(10)
# Network
+ # To ensure the MAC address on the guest matches the
+ # generated value, pull the first 10 characters off the
+ # MAC address for the mac_base_value parameter and then
+ # get the integer value of the final 2 characters as the
+ # slot_id parameter
+ mac = network_info[0]['address']
+ mac_base_value = (mac[:-2]).replace(':', '')
eth_id = self._operator.get_virtual_eth_adapter_id()
+ slot_id = int(mac[-2:], 16)
+ virtual_eth_adapters = ('%(slot_id)s/0/%(eth_id)s//0/0' %
+ locals())
# LPAR configuration data
+ # max_virtual_slots is hardcoded to 64 since we generate a MAC
+ # address that must be placed in slots 32 - 64
lpar_inst = LPAR.LPAR(
name=inst_name, lpar_env='aixlinux',
min_mem=mem_min, desired_mem=mem,
@@ -423,7 +370,9 @@ class PowerVMOperator(object):
min_proc_units=cpus_units_min,
desired_proc_units=cpus_units,
max_proc_units=cpus_max,
- virtual_eth_adapters='4/0/%s//0/0' % eth_id)
+ virtual_eth_mac_base_value=mac_base_value,
+ max_virtual_slots=64,
+ virtual_eth_adapters=virtual_eth_adapters)
return lpar_inst
def _check_host_resources(self, instance, vcpus, mem, host_stats):
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index 63d9e3c57..5e48f4cd3 100755
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -40,9 +40,9 @@ A connection to the VMware ESX platform.
import time
from eventlet import event
+from oslo.config import cfg
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import utils
diff --git a/nova/virt/vmwareapi/network_util.py b/nova/virt/vmwareapi/network_util.py
index 5a83b0763..c353e66fd 100644
--- a/nova/virt/vmwareapi/network_util.py
+++ b/nova/virt/vmwareapi/network_util.py
@@ -113,7 +113,7 @@ def get_vswitch_for_vlan_interface(session, vlan_interface, cluster=None):
def check_if_vlan_interface_exists(session, vlan_interface, cluster=None):
- """Checks if the vlan_inteface exists on the esx host."""
+ """Checks if the vlan_interface exists on the esx host."""
host_mor = vm_util.get_host_ref(session, cluster)
physical_nics_ret = session._call_method(vim_util,
"get_dynamic_property", host_mor,
diff --git a/nova/virt/vmwareapi/vif.py b/nova/virt/vmwareapi/vif.py
index e2dfa0427..1a28ca35e 100644
--- a/nova/virt/vmwareapi/vif.py
+++ b/nova/virt/vmwareapi/vif.py
@@ -17,8 +17,9 @@
"""VIF drivers for VMware."""
+from oslo.config import cfg
+
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.virt.vmwareapi import network_util
diff --git a/nova/virt/vmwareapi/vim.py b/nova/virt/vmwareapi/vim.py
index 83d120df5..f3a47315b 100644
--- a/nova/virt/vmwareapi/vim.py
+++ b/nova/virt/vmwareapi/vim.py
@@ -27,7 +27,8 @@ try:
except ImportError:
suds = None
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.virt.vmwareapi import error_util
RESP_NOT_XML_ERROR = 'Response is "text/html", not "text/xml"'
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index 0aeb58ea3..b70d1583e 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -27,13 +27,14 @@ import urllib
import urllib2
import uuid
+from oslo.config import cfg
+
from nova import block_device
from nova.compute import api as compute
from nova.compute import power_state
from nova.compute import task_states
from nova import context as nova_context
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova.virt import driver
diff --git a/nova/virt/vmwareapi/volumeops.py b/nova/virt/vmwareapi/volumeops.py
index 855106e4a..dd8f60f2a 100644
--- a/nova/virt/vmwareapi/volumeops.py
+++ b/nova/virt/vmwareapi/volumeops.py
@@ -18,14 +18,14 @@
Management class for Storage-related functions (attach, detach, etc).
"""
+from oslo.config import cfg
+
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import volume_util
-
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
diff --git a/nova/virt/xenapi/agent.py b/nova/virt/xenapi/agent.py
index 8220fb67b..0be26a241 100644
--- a/nova/virt/xenapi/agent.py
+++ b/nova/virt/xenapi/agent.py
@@ -21,10 +21,11 @@ import os
import time
import uuid
+from oslo.config import cfg
+
from nova.api.metadata import password
from nova import context
from nova import crypto
-from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import utils
diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py
index 6a1cdd6b9..347d36255 100755
--- a/nova/virt/xenapi/driver.py
+++ b/nova/virt/xenapi/driver.py
@@ -44,10 +44,10 @@ import xmlrpclib
from eventlet import queue
from eventlet import timeout
+from oslo.config import cfg
from nova import context
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.virt import driver
from nova.virt.xenapi import host
diff --git a/nova/virt/xenapi/host.py b/nova/virt/xenapi/host.py
index 045409e4f..6ddc76494 100644
--- a/nova/virt/xenapi/host.py
+++ b/nova/virt/xenapi/host.py
@@ -19,13 +19,12 @@
Management class for host-related functions (start, reboot, etc).
"""
-import logging
-
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import exception
from nova.openstack.common import jsonutils
+from nova.openstack.common import log as logging
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
diff --git a/nova/virt/xenapi/imageupload/glance.py b/nova/virt/xenapi/imageupload/glance.py
index adc06f65b..5ea75db16 100644
--- a/nova/virt/xenapi/imageupload/glance.py
+++ b/nova/virt/xenapi/imageupload/glance.py
@@ -13,8 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo.config import cfg
+
from nova.image import glance
-from nova.openstack.common import cfg
import nova.openstack.common.log as logging
from nova.virt.xenapi import vm_utils
diff --git a/nova/virt/xenapi/pool.py b/nova/virt/xenapi/pool.py
index 1682f18d1..ee39fb2da 100644
--- a/nova/virt/xenapi/pool.py
+++ b/nova/virt/xenapi/pool.py
@@ -21,9 +21,10 @@ Management class for Pool-related functions (join, eject, etc).
import urlparse
+from oslo.config import cfg
+
from nova.compute import rpcapi as compute_rpcapi
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.virt.xenapi import pool_states
diff --git a/nova/virt/xenapi/vif.py b/nova/virt/xenapi/vif.py
index 35cdb201d..f0b9eebbe 100644
--- a/nova/virt/xenapi/vif.py
+++ b/nova/virt/xenapi/vif.py
@@ -19,7 +19,8 @@
"""VIF drivers for XenAPI."""
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common import log as logging
from nova.virt.xenapi import network_utils
from nova.virt.xenapi import vm_utils
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 3bfe6de09..28e83198f 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -33,6 +33,7 @@ from xml.dom import minidom
from xml.parsers import expat
from eventlet import greenthread
+from oslo.config import cfg
from nova.api.metadata import base as instance_metadata
from nova import block_device
@@ -40,7 +41,6 @@ from nova.compute import power_state
from nova.compute import task_states
from nova import exception
from nova.image import glance
-from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova import utils
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 5dbadc416..bf3f631b3 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -25,6 +25,7 @@ import time
from eventlet import greenthread
import netaddr
+from oslo.config import cfg
from nova import block_device
from nova.compute import api as compute
@@ -34,7 +35,6 @@ from nova.compute import vm_mode
from nova.compute import vm_states
from nova import context as nova_context
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
index 40451a48e..edf92cee9 100644
--- a/nova/virt/xenapi/volume_utils.py
+++ b/nova/virt/xenapi/volume_utils.py
@@ -23,9 +23,9 @@ and storage repositories
import re
import string
-from nova.openstack.common import cfg
-from nova.openstack.common import log as logging
+from oslo.config import cfg
+from nova.openstack.common import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
diff --git a/nova/vnc/__init__.py b/nova/vnc/__init__.py
index ffbd509fd..6a4c2eb4a 100644
--- a/nova/vnc/__init__.py
+++ b/nova/vnc/__init__.py
@@ -18,7 +18,7 @@
"""Module for VNC Proxying."""
-from nova.openstack.common import cfg
+from oslo.config import cfg
vnc_opts = [
diff --git a/nova/vnc/xvp_proxy.py b/nova/vnc/xvp_proxy.py
index 9489b949a..3efa23f8b 100644
--- a/nova/vnc/xvp_proxy.py
+++ b/nova/vnc/xvp_proxy.py
@@ -25,10 +25,10 @@ import eventlet
import eventlet.green
import eventlet.greenio
import eventlet.wsgi
+from oslo.config import cfg
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import context
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import version
from nova import wsgi
diff --git a/nova/volume/__init__.py b/nova/volume/__init__.py
index da54a5009..1a1917307 100644
--- a/nova/volume/__init__.py
+++ b/nova/volume/__init__.py
@@ -16,23 +16,24 @@
# License for the specific language governing permissions and limitations
# under the License.
+import oslo.config.cfg
+
# Importing full names to not pollute the namespace and cause possible
# collisions with use of 'from nova.volume import <foo>' elsewhere.
-import nova.openstack.common.cfg
import nova.openstack.common.importutils
_volume_opts = [
- nova.openstack.common.cfg.StrOpt('volume_api_class',
+ oslo.config.cfg.StrOpt('volume_api_class',
default='nova.volume.cinder.API',
help='The full class name of the '
'volume API class to use'),
]
-nova.openstack.common.cfg.CONF.register_opts(_volume_opts)
+oslo.config.cfg.CONF.register_opts(_volume_opts)
def API():
importutils = nova.openstack.common.importutils
- volume_api_class = nova.openstack.common.cfg.CONF.volume_api_class
+ volume_api_class = oslo.config.cfg.CONF.volume_api_class
cls = importutils.import_class(volume_api_class)
return cls()
diff --git a/nova/volume/cinder.py b/nova/volume/cinder.py
index b58e63011..e465daf1c 100644
--- a/nova/volume/cinder.py
+++ b/nova/volume/cinder.py
@@ -26,10 +26,10 @@ import sys
from cinderclient import exceptions as cinder_exception
from cinderclient import service_catalog
from cinderclient.v1 import client as cinder_client
+from oslo.config import cfg
from nova.db import base
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
cinder_opts = [
diff --git a/nova/wsgi.py b/nova/wsgi.py
index 651dbc4f6..7b85d0a5b 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -26,6 +26,7 @@ import sys
import eventlet
import eventlet.wsgi
import greenlet
+from oslo.config import cfg
from paste import deploy
import routes.middleware
import ssl
@@ -33,7 +34,6 @@ import webob.dec
import webob.exc
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
wsgi_opts = [
diff --git a/openstack-common.conf b/openstack-common.conf
index fcff378b5..463abd1c2 100644
--- a/openstack-common.conf
+++ b/openstack-common.conf
@@ -1,7 +1,7 @@
[DEFAULT]
# The list of modules to copy from openstack-common
-modules=cfg,cliutils,context,db,db.sqlalchemy,excutils,eventlet_backdoor,fileutils,gettextutils,importutils,iniparser,jsonutils,local,lockutils,log,network_utils,notifier,plugin,policy,rootwrap,setup,timeutils,rpc,uuidutils,install_venv_common,flakes,version
+modules=cliutils,context,db,db.api,db.sqlalchemy,excutils,eventlet_backdoor,fileutils,gettextutils,importutils,jsonutils,local,lockutils,log,network_utils,notifier,plugin,policy,rootwrap,setup,timeutils,rpc,uuidutils,install_venv_common,flakes,version
# The base module to hold the copy of openstack.common
base=nova
diff --git a/setup.py b/setup.py
index 54d62e409..fd968eeb2 100644
--- a/setup.py
+++ b/setup.py
@@ -20,6 +20,7 @@ import setuptools
from nova.openstack.common import setup as common_setup
requires = common_setup.parse_requirements()
+depend_links = common_setup.parse_dependency_links()
project = 'nova'
setuptools.setup(
@@ -42,6 +43,7 @@ setuptools.setup(
cmdclass=common_setup.get_cmdclass(),
packages=setuptools.find_packages(exclude=['bin', 'smoketests']),
install_requires=requires,
+ dependency_links=depend_links,
include_package_data=True,
test_suite='nose.collector',
setup_requires=['setuptools_git>=0.4'],
diff --git a/tools/conf/analyze_opts.py b/tools/conf/analyze_opts.py
index f78169a8c..3088d6390 100755
--- a/tools/conf/analyze_opts.py
+++ b/tools/conf/analyze_opts.py
@@ -26,7 +26,7 @@ import os
import sys
sys.path.append(os.getcwd())
-from nova.openstack.common import iniparser
+from oslo.config import iniparser
class PropertyCollecter(iniparser.BaseParser):
diff --git a/tools/conf/extract_opts.py b/tools/conf/extract_opts.py
index 4dde53335..83d99dd19 100644
--- a/tools/conf/extract_opts.py
+++ b/tools/conf/extract_opts.py
@@ -26,7 +26,8 @@ import socket
import sys
import textwrap
-from nova.openstack.common import cfg
+from oslo.config import cfg
+
from nova.openstack.common import importutils
diff --git a/tools/hacking.py b/tools/hacking.py
index d5853d591..1279e87e9 100755
--- a/tools/hacking.py
+++ b/tools/hacking.py
@@ -29,7 +29,6 @@ import re
import subprocess
import sys
import tokenize
-import warnings
import pep8
@@ -47,8 +46,8 @@ logging.disable('LOG')
#N9xx other
IMPORT_EXCEPTIONS = ['sqlalchemy', 'migrate', 'nova.db.sqlalchemy.session',
- 'nova.openstack.common.log.logging',
'nova.db.sqlalchemy.migration.versioning_api']
+# Paste is missing a __init__ in top level directory
START_DOCSTRING_TRIPLE = ['u"""', 'r"""', '"""', "u'''", "r'''", "'''"]
END_DOCSTRING_TRIPLE = ['"""', "'''"]
VERBOSE_MISSING_IMPORT = os.getenv('HACKING_VERBOSE_MISSING_IMPORT', 'False')
@@ -187,37 +186,35 @@ def nova_import_rules(logical_line):
# pass the doctest, since the relativity depends on the file's locality
def is_module_for_sure(mod, search_path=sys.path):
- mod_path = mod.replace('.', os.sep)
try:
- imp.find_module(mod_path, search_path)
+ mod_name = mod
+ while '.' in mod_name:
+ pack_name, _sep, mod_name = mod.partition('.')
+ f, p, d = imp.find_module(pack_name, search_path)
+ search_path = [p]
+ imp.find_module(mod_name, search_path)
except ImportError:
- return False
+ try:
+ # NOTE(vish): handle namespace modules
+ module = __import__(mod)
+ except ImportError, exc:
+ # NOTE(vish): the import error might be due
+ # to a missing dependency
+ missing = str(exc).split()[-1]
+ if missing != mod.split('.')[-1]:
+ _missingImport.add(missing)
+ return True
+ return False
return True
- def is_module_for_sure_cached(mod):
+ def is_module(mod):
+ """Checks for non module imports."""
if mod in modules_cache:
return modules_cache[mod]
res = is_module_for_sure(mod)
modules_cache[mod] = res
return res
- def is_module(mod):
- """Checks for non module imports.
-
- If can't find module on first try, recursively check for the parent
- modules.
- When parsing 'from x import y,' x is the parent.
- """
- if is_module_for_sure_cached(mod):
- return True
- parts = mod.split('.')
- for i in range(len(parts) - 1, 0, -1):
- path = '.'.join(parts[0:i])
- if is_module_for_sure_cached(path):
- return False
- _missingImport.add(mod)
- return True
-
current_path = os.path.dirname(pep8.current_file)
current_mod = os.path.basename(pep8.current_file)
if current_mod[-3:] == ".py":
@@ -258,7 +255,7 @@ def nova_import_rules(logical_line):
# The guestfs module now imports guestfs
mod = split_line[1]
if (current_mod != mod and
- not is_module_for_sure_cached(mod) and
+ not is_module(mod) and
is_module_for_sure(mod, [current_path])):
yield 0, ("N304: No relative imports."
" '%s' is a relative import"
diff --git a/tools/install_venv_common.py b/tools/install_venv_common.py
index 3d47b12b2..fd9076f0e 100644
--- a/tools/install_venv_common.py
+++ b/tools/install_venv_common.py
@@ -21,20 +21,12 @@ virtual environments.
Synced in from openstack-common
"""
+import argparse
import os
import subprocess
import sys
-possible_topdir = os.getcwd()
-if os.path.exists(os.path.join(possible_topdir, "nova",
- "__init__.py")):
- sys.path.insert(0, possible_topdir)
-
-
-from nova.openstack.common import cfg
-
-
class InstallVenv(object):
def __init__(self, root, venv, pip_requires, test_requires, py_version,
@@ -58,7 +50,7 @@ class InstallVenv(object):
check_exit_code=True):
"""Runs a command in an out-of-process shell.
- Returns the output of that command. Working directory is ROOT.
+ Returns the output of that command. Working directory is self.root.
"""
if redirect_output:
stdout = subprocess.PIPE
@@ -101,7 +93,7 @@ class InstallVenv(object):
else:
self.run_command(['virtualenv', '-q', self.venv])
print 'done.'
- print 'Installing pip in virtualenv...',
+ print 'Installing pip in venv...',
if not self.run_command(['tools/with_venv.sh', 'easy_install',
'pip>1.0']).strip():
self.die("Failed to install pip.")
@@ -139,17 +131,12 @@ class InstallVenv(object):
def parse_args(self, argv):
"""Parses command-line arguments."""
- cli_opts = [
- cfg.BoolOpt('no-site-packages',
- default=False,
- short='n',
- help="Do not inherit packages from global Python"
- "install"),
- ]
- CLI = cfg.ConfigOpts()
- CLI.register_cli_opts(cli_opts)
- CLI(argv[1:])
- return CLI
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-n', '--no-site-packages',
+ action='store_true',
+ help="Do not inherit packages from global Python "
+ "install")
+ return parser.parse_args(argv[1:])
class Distro(InstallVenv):
diff --git a/tools/pip-requires b/tools/pip-requires
index d7e48ff87..bb4eb7119 100644
--- a/tools/pip-requires
+++ b/tools/pip-requires
@@ -27,3 +27,4 @@ python-glanceclient>=0.5.0,<2
python-keystoneclient>=0.2.0
stevedore>=0.7
websockify
+http://tarballs.openstack.org/oslo-config/oslo-config-2013.1b4.tar.gz#egg=oslo-config
diff --git a/tools/run_pep8.sh b/tools/run_pep8.sh
index 4e7212e08..80c20a92d 100755
--- a/tools/run_pep8.sh
+++ b/tools/run_pep8.sh
@@ -1,4 +1,6 @@
#!/bin/bash
+
+set -e
# This is used by run_tests.sh and tox.ini
python tools/hacking.py --doctest
@@ -12,7 +14,7 @@ ${PEP8} ${EXCLUDE} .
${PEP8} --filename=nova* bin
-SCRIPT_ROOT=$(echo $(dirname $(readlink -f "$0")) | sed s/\\/tools//)
+SCRIPT_ROOT=$(echo $(cd "$(dirname $0)"; pwd) | sed s/\\/tools//)
SCRIPTS_PATH=${SCRIPT_ROOT}/plugins/xenserver/networking/etc/xensource/scripts
PYTHONPATH=${SCRIPTS_PATH} ${PEP8} ./plugins/xenserver/networking
@@ -20,6 +22,6 @@ PYTHONPATH=${SCRIPTS_PATH} ${PEP8} ./plugins/xenserver/networking
# NOTE(sirp): Also check Dom0 plugins w/o .py extension
PLUGINS_PATH=${SCRIPT_ROOT}/plugins/xenserver/xenapi/etc/xapi.d/plugins
PYTHONPATH=${PLUGINS_PATH} ${PEP8} ./plugins/xenserver/xenapi \
- `find plugins/xenserver/xenapi/etc/xapi.d/plugins/ -type f -perm +111`
+ `find plugins/xenserver/xenapi/etc/xapi.d/plugins -type f -perm +111`
! pyflakes nova/ | grep "imported but unused"
diff --git a/tools/test-requires b/tools/test-requires
index 49ee52809..1318ecd9a 100644
--- a/tools/test-requires
+++ b/tools/test-requires
@@ -9,6 +9,7 @@ mox==0.5.3
MySQL-python
psycopg2
pep8==1.3.3
+pyflakes
pylint==0.25.2
python-subunit
sphinx>=1.1.2
diff --git a/tools/xenserver/destroy_cached_images.py b/tools/xenserver/destroy_cached_images.py
index fa71c0a68..625c765c6 100644
--- a/tools/xenserver/destroy_cached_images.py
+++ b/tools/xenserver/destroy_cached_images.py
@@ -16,6 +16,8 @@ eventlet.monkey_patch()
import os
import sys
+from oslo.config import cfg
+
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
@@ -26,7 +28,6 @@ if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
sys.path.insert(0, POSSIBLE_TOPDIR)
from nova import config
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.xenapi import driver as xenapi_driver
diff --git a/tools/xenserver/vm_vdi_cleaner.py b/tools/xenserver/vm_vdi_cleaner.py
index 27b89d510..a842282fa 100755
--- a/tools/xenserver/vm_vdi_cleaner.py
+++ b/tools/xenserver/vm_vdi_cleaner.py
@@ -19,6 +19,8 @@
import doctest
import os
import sys
+
+from oslo.config import cfg
import XenAPI
@@ -30,7 +32,6 @@ if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
from nova import context
from nova import db
from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import timeutils
from nova.virt import virtapi
from nova.virt.xenapi import driver as xenapi_driver
@@ -40,8 +41,14 @@ cleaner_opts = [
default=172800,
help='Number of seconds zombie instances are cleaned up.'),
]
+
+cli_opt = cfg.StrOpt('command',
+ default=None,
+ help='Cleaner command')
+
CONF = cfg.CONF
CONF.register_opts(cleaner_opts)
+CONF.register_cli_opt(cli_opt)
CONF.import_opt('verbose', 'nova.openstack.common.log')
CONF.import_opt("resize_confirm_window", "nova.compute.manager")
@@ -279,14 +286,14 @@ def clean_orphaned_instances(xenapi, orphaned_instances):
def main():
"""Main loop."""
- args = CONF(args=sys.argv,
- usage='%prog [options] [' + '|'.join(ALLOWED_COMMANDS) + ']')
- if len(args) < 2:
+ args = CONF(args=sys.argv[1:], usage='%(prog)s [options] --command={' +
+ '|'.join(ALLOWED_COMMANDS) + '}')
+
+ command = CONF.command
+ if not command or command not in ALLOWED_COMMANDS:
CONF.print_usage()
sys.exit(1)
- command = args[1]
-
if CONF.zombie_instance_updated_at_window < CONF.resize_confirm_window:
raise Exception("`zombie_instance_updated_at_window` has to be longer"
" than `resize_confirm_window`.")
diff --git a/tox.ini b/tox.ini
index a34315a7f..b4b5ccc20 100644
--- a/tox.ini
+++ b/tox.ini
@@ -15,9 +15,6 @@ sitepackages = True
downloadcache = ~/cache/pip
[testenv:pep8]
-deps=
- pep8==1.3.3
- pyflakes
commands = bash tools/run_pep8.sh
[testenv:pylint]