summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.coveragerc2
-rw-r--r--HACKING.rst25
-rwxr-xr-xbin/nova-baremetal-manage11
-rwxr-xr-xbin/nova-clear-rabbit-queues4
-rwxr-xr-xbin/nova-compute2
-rwxr-xr-xbin/nova-dhcpbridge2
-rwxr-xr-xbin/nova-manage17
-rwxr-xr-xbin/nova-rootwrap17
-rwxr-xr-xbin/nova-xvpvncproxy1
-rw-r--r--doc/api_samples/OS-EXT-AZ/server-get-resp.json5
-rw-r--r--doc/api_samples/OS-EXT-AZ/server-get-resp.xml4
-rw-r--r--doc/api_samples/OS-EXT-AZ/servers-detail-resp.json5
-rw-r--r--doc/api_samples/OS-EXT-AZ/servers-detail-resp.xml4
-rw-r--r--doc/api_samples/OS-EXT-IPS/server-get-resp.json55
-rw-r--r--doc/api_samples/OS-EXT-IPS/server-get-resp.xml19
-rw-r--r--doc/api_samples/OS-EXT-IPS/server-post-req.json16
-rw-r--r--doc/api_samples/OS-EXT-IPS/server-post-req.xml19
-rw-r--r--doc/api_samples/OS-EXT-IPS/server-post-resp.json16
-rw-r--r--doc/api_samples/OS-EXT-IPS/server-post-resp.xml6
-rw-r--r--doc/api_samples/OS-EXT-IPS/servers-detail-resp.json57
-rw-r--r--doc/api_samples/OS-EXT-IPS/servers-detail-resp.xml21
-rw-r--r--doc/api_samples/OS-EXT-SRV-ATTR/server-get-resp.json (renamed from doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.json)0
-rw-r--r--doc/api_samples/OS-EXT-SRV-ATTR/server-get-resp.xml (renamed from doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.xml)0
-rw-r--r--doc/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.json (renamed from doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.json)0
-rw-r--r--doc/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.xml (renamed from doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.xml)0
-rw-r--r--doc/api_samples/OS-EXT-STS/server-get-resp.json57
-rw-r--r--doc/api_samples/OS-EXT-STS/server-get-resp.xml19
-rw-r--r--doc/api_samples/OS-EXT-STS/servers-list-resp.json18
-rw-r--r--doc/api_samples/OS-EXT-STS/servers-list-resp.xml7
-rw-r--r--doc/api_samples/all_extensions/extensions-get-resp.json40
-rw-r--r--doc/api_samples/all_extensions/extensions-get-resp.xml27
-rw-r--r--doc/api_samples/all_extensions/server-get-resp.json20
-rw-r--r--doc/api_samples/all_extensions/server-get-resp.xml10
-rw-r--r--doc/api_samples/all_extensions/servers-details-resp.json20
-rw-r--r--doc/api_samples/all_extensions/servers-details-resp.xml12
-rw-r--r--doc/api_samples/os-config-drive/server-config-drive-get-resp.json55
-rw-r--r--doc/api_samples/os-config-drive/server-config-drive-get-resp.xml19
-rw-r--r--doc/api_samples/os-config-drive/server-post-req.json16
-rw-r--r--doc/api_samples/os-config-drive/server-post-req.xml19
-rw-r--r--doc/api_samples/os-config-drive/server-post-resp.json16
-rw-r--r--doc/api_samples/os-config-drive/server-post-resp.xml6
-rw-r--r--doc/api_samples/os-config-drive/servers-config-drive-details-resp.json57
-rw-r--r--doc/api_samples/os-config-drive/servers-config-drive-details-resp.xml21
-rw-r--r--doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json7
-rw-r--r--doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.xml5
-rw-r--r--doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json9
-rw-r--r--doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.xml2
-rw-r--r--doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json8
-rw-r--r--doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.xml6
-rw-r--r--doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json8
-rw-r--r--doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.xml2
-rw-r--r--doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json9
-rw-r--r--doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.xml2
-rw-r--r--doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json11
-rw-r--r--doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.xml4
-rw-r--r--doc/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json10
-rw-r--r--doc/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.xml4
-rw-r--r--doc/api_samples/os-instance-actions/instance-action-get-resp.json27
-rw-r--r--doc/api_samples/os-instance-actions/instance-action-get-resp.xml5
-rw-r--r--doc/api_samples/os-instance-actions/instance-actions-list-resp.json22
-rw-r--r--doc/api_samples/os-instance-actions/instance-actions-list-resp.xml5
-rw-r--r--doc/api_samples/os-services/service-disable-put-req.json4
-rw-r--r--doc/api_samples/os-services/service-disable-put-resp.json5
-rw-r--r--doc/api_samples/os-services/service-enable-put-req.json4
-rw-r--r--doc/api_samples/os-services/service-enable-put-resp.json5
-rw-r--r--doc/api_samples/os-services/services-list-get-resp.json36
-rw-r--r--doc/source/_ga/layout.html6
-rw-r--r--doc/source/conf.py7
-rw-r--r--doc/source/devref/filter_scheduler.rst7
-rw-r--r--etc/nova/policy.json3
-rw-r--r--etc/nova/rootwrap.d/compute.filters5
-rw-r--r--nova/api/ec2/ec2utils.py4
-rw-r--r--nova/api/metadata/base.py56
-rw-r--r--nova/api/metadata/handler.py7
-rw-r--r--nova/api/openstack/compute/contrib/admin_actions.py2
-rw-r--r--nova/api/openstack/compute/contrib/console_output.py2
-rw-r--r--nova/api/openstack/compute/contrib/extended_availability_zone.py27
-rw-r--r--nova/api/openstack/compute/contrib/extended_ips.py111
-rw-r--r--nova/api/openstack/compute/contrib/instance_actions.py128
-rw-r--r--nova/api/openstack/compute/contrib/quotas.py11
-rw-r--r--nova/api/openstack/compute/contrib/security_groups.py7
-rw-r--r--nova/api/openstack/compute/contrib/server_start_stop.py10
-rw-r--r--nova/api/openstack/compute/servers.py19
-rw-r--r--nova/availability_zones.py10
-rw-r--r--nova/compute/api.py37
-rw-r--r--nova/compute/cells_api.py2
-rw-r--r--nova/compute/instance_types.py3
-rwxr-xr-xnova/compute/manager.py52
-rw-r--r--nova/conductor/api.py56
-rw-r--r--nova/conductor/manager.py60
-rw-r--r--nova/conductor/rpcapi.py45
-rw-r--r--nova/context.py7
-rw-r--r--nova/crypto.py80
-rw-r--r--nova/db/api.py29
-rw-r--r--nova/db/sqlalchemy/api.py116
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/154_add_shadow_tables.py77
-rw-r--r--nova/db/sqlalchemy/types.py18
-rw-r--r--nova/db/sqlalchemy/utils.py117
-rw-r--r--nova/exception.py22
-rw-r--r--nova/locale/nova.pot1089
-rw-r--r--nova/network/linux_net.py61
-rw-r--r--nova/network/minidns.py1
-rw-r--r--nova/network/quantumv2/api.py30
-rw-r--r--nova/openstack/__init__.py15
-rw-r--r--nova/openstack/common/__init__.py15
-rw-r--r--nova/openstack/common/cfg.py30
-rw-r--r--nova/openstack/common/cliutils.py13
-rw-r--r--nova/openstack/common/excutils.py6
-rw-r--r--nova/openstack/common/jsonutils.py13
-rw-r--r--nova/openstack/common/plugin/pluginmanager.py1
-rw-r--r--nova/openstack/common/policy.py20
-rw-r--r--nova/openstack/common/rpc/impl_zmq.py5
-rw-r--r--nova/openstack/common/rpc/matchmaker.py17
-rw-r--r--nova/openstack/common/setup.py213
-rw-r--r--nova/openstack/common/timeutils.py18
-rw-r--r--nova/openstack/common/version.py94
-rw-r--r--nova/policy.py7
-rw-r--r--nova/scheduler/driver.py20
-rw-r--r--nova/scheduler/filter_scheduler.py59
-rw-r--r--nova/scheduler/filters/affinity_filter.py20
-rw-r--r--nova/scheduler/host_manager.py10
-rw-r--r--nova/scheduler/manager.py11
-rw-r--r--nova/servicegroup/api.py5
-rw-r--r--nova/servicegroup/drivers/zk.py157
-rw-r--r--nova/testing/__init__.py0
-rw-r--r--nova/tests/README.rst (renamed from nova/testing/README.rst)10
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_admin_actions.py57
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_console_output.py16
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_extended_availability_zone.py30
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_extended_ips.py168
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_instance_actions.py231
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_quotas.py39
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_security_groups.py38
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_server_start_stop.py21
-rw-r--r--nova/tests/api/openstack/compute/test_extensions.py2
-rw-r--r--nova/tests/baremetal/test_virtual_power_driver.py85
-rw-r--r--nova/tests/compute/test_compute.py59
-rw-r--r--nova/tests/conductor/test_conductor.py83
-rw-r--r--nova/tests/fake_instance_actions.py58
-rw-r--r--nova/tests/fake_network.py4
-rw-r--r--nova/tests/fake_policy.py3
-rw-r--r--nova/tests/fake_volume.py11
-rw-r--r--nova/tests/hyperv/fake.py52
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-AZ/server-get-resp.json.tpl3
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-AZ/server-get-resp.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.json.tpl3
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-IPS/server-get-resp.json.tpl55
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-IPS/server-get-resp.xml.tpl19
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-IPS/server-post-req.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-IPS/server-post-req.xml.tpl19
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-IPS/server-post-resp.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-IPS/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-IPS/servers-detail-resp.json.tpl56
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-IPS/servers-detail-resp.xml.tpl21
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/server-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.json.tpl)0
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/server-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.xml.tpl)0
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.json.tpl)0
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.xml.tpl)0
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-STS/server-get-resp.json.tpl57
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-STS/server-get-resp.xml.tpl19
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-STS/servers-list-resp.json.tpl18
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-STS/servers-list-resp.xml.tpl7
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl6
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/server-get-resp.json.tpl4
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/server-get-resp.xml.tpl4
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/servers-details-resp.json.tpl4
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/servers-details-resp.xml.tpl6
-rw-r--r--nova/tests/integrated/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl55
-rw-r--r--nova/tests/integrated/api_samples/os-config-drive/server-config-drive-get-resp.xml.tpl19
-rw-r--r--nova/tests/integrated/api_samples/os-config-drive/server-post-req.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/os-config-drive/server-post-req.xml.tpl19
-rw-r--r--nova/tests/integrated/api_samples/os-config-drive/server-post-resp.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/os-config-drive/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/integrated/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl57
-rw-r--r--nova/tests/integrated/api_samples/os-config-drive/servers-config-drive-details-resp.xml.tpl21
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json.tpl7
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.xml.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json.tpl9
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json.tpl8
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.xml.tpl6
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json.tpl8
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json.tpl9
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json.tpl11
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.xml.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json.tpl10
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.xml.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-instance-actions/instance-action-get-resp.json.tpl27
-rw-r--r--nova/tests/integrated/api_samples/os-instance-actions/instance-action-get-resp.xml.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-instance-actions/instance-actions-list-resp.json.tpl22
-rw-r--r--nova/tests/integrated/api_samples/os-instance-actions/instance-actions-list-resp.xml.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-services/service-disable-put-req.json.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-services/service-disable-put-resp.json.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-services/service-enable-put-req.json.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-services/service-enable-put-resp.json.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-services/services-list-get-resp.json.tpl36
-rw-r--r--nova/tests/integrated/test_api_samples.py296
-rw-r--r--nova/tests/network/test_linux_net.py67
-rw-r--r--nova/tests/network/test_quantumv2.py12
-rw-r--r--nova/tests/scheduler/test_filter_scheduler.py176
-rw-r--r--nova/tests/scheduler/test_host_filters.py14
-rw-r--r--nova/tests/scheduler/test_host_manager.py63
-rw-r--r--nova/tests/scheduler/test_scheduler.py56
-rw-r--r--nova/tests/servicegroup/test_zk_driver.py65
-rw-r--r--nova/tests/test_api.py7
-rw-r--r--nova/tests/test_crypto.py28
-rw-r--r--nova/tests/test_db_api.py167
-rw-r--r--nova/tests/test_hypervapi.py290
-rw-r--r--nova/tests/test_imagebackend.py8
-rw-r--r--nova/tests/test_instance_types.py11
-rw-r--r--nova/tests/test_iptables_network.py118
-rw-r--r--nova/tests/test_libvirt_blockinfo.py2
-rw-r--r--nova/tests/test_libvirt_vif.py44
-rw-r--r--nova/tests/test_libvirt_volume.py56
-rw-r--r--nova/tests/test_metadata.py14
-rw-r--r--nova/tests/test_migration_utils.py126
-rw-r--r--nova/tests/test_migrations.py122
-rw-r--r--nova/tests/test_utils.py39
-rw-r--r--nova/tests/test_versions.py36
-rw-r--r--nova/tests/test_vmwareapi.py20
-rw-r--r--nova/tests/test_vmwareapi_vmops.py62
-rw-r--r--nova/tests/test_xenapi.py7
-rw-r--r--nova/tests/utils.py85
-rw-r--r--nova/tests/virt/xenapi/test_volumeops.py32
-rw-r--r--nova/utils.py31
-rw-r--r--nova/version.py22
-rw-r--r--nova/virt/baremetal/pxe.py3
-rw-r--r--nova/virt/baremetal/virtual_power_driver.py10
-rw-r--r--nova/virt/hyperv/basevolumeutils.py7
-rwxr-xr-xnova/virt/hyperv/driver.py31
-rw-r--r--nova/virt/hyperv/hostops.py15
-rw-r--r--nova/virt/hyperv/hostutils.py7
-rw-r--r--nova/virt/hyperv/imagecache.py60
-rw-r--r--nova/virt/hyperv/livemigrationops.py19
-rw-r--r--nova/virt/hyperv/livemigrationutils.py2
-rw-r--r--nova/virt/hyperv/migrationops.py233
-rw-r--r--nova/virt/hyperv/pathutils.py113
-rw-r--r--nova/virt/hyperv/snapshotops.py20
-rw-r--r--nova/virt/hyperv/vhdutils.py29
-rw-r--r--nova/virt/hyperv/vmops.py188
-rw-r--r--nova/virt/hyperv/vmutils.py39
-rw-r--r--nova/virt/hyperv/volumeops.py74
-rwxr-xr-x[-rw-r--r--]nova/virt/images.py4
-rw-r--r--nova/virt/libvirt/blockinfo.py8
-rwxr-xr-xnova/virt/libvirt/driver.py20
-rwxr-xr-xnova/virt/libvirt/imagebackend.py27
-rw-r--r--nova/virt/libvirt/vif.py137
-rw-r--r--nova/virt/libvirt/volume.py128
-rw-r--r--nova/virt/powervm/blockdev.py43
-rw-r--r--nova/virt/powervm/command.py3
-rw-r--r--nova/virt/powervm/common.py68
-rwxr-xr-xnova/virt/powervm/driver.py102
-rw-r--r--nova/virt/powervm/lpar.py6
-rw-r--r--nova/virt/powervm/operator.py233
-rwxr-xr-xnova/virt/vmwareapi/driver.py2
-rw-r--r--nova/virt/vmwareapi/vif.py6
-rw-r--r--nova/virt/vmwareapi/vm_util.py19
-rw-r--r--nova/virt/vmwareapi/vmops.py69
-rwxr-xr-xnova/virt/xenapi/driver.py4
-rw-r--r--nova/virt/xenapi/vm_utils.py8
-rw-r--r--nova/virt/xenapi/vmops.py1
-rw-r--r--nova/virt/xenapi/volume_utils.py188
-rw-r--r--nova/virt/xenapi/volumeops.py87
-rw-r--r--nova/volume/cinder.py10
-rw-r--r--openstack-common.conf2
-rwxr-xr-xplugins/xenserver/xenapi/etc/xapi.d/plugins/kernel2
-rwxr-xr-xplugins/xenserver/xenapi/etc/xapi.d/plugins/migration4
-rwxr-xr-xplugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost2
-rwxr-xr-xrun_tests.sh47
-rw-r--r--setup.py7
-rwxr-xr-xtools/hacking.py196
-rw-r--r--tools/pip-requires1
-rwxr-xr-xtools/regression_tester.py48
-rwxr-xr-xtools/run_pep8.sh25
-rwxr-xr-xtools/unused_imports.sh4
-rw-r--r--tox.ini8
280 files changed, 8113 insertions, 2087 deletions
diff --git a/.coveragerc b/.coveragerc
index 902a94349..c89967dfe 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -1,7 +1,7 @@
[run]
branch = True
source = nova
-omit = nova/tests/*,DynamicallyCompiledCheetahTemplate.py
+omit = nova/tests/*,nova/openstack/*,DynamicallyCompiledCheetahTemplate.py
[report]
ignore-errors = True
diff --git a/HACKING.rst b/HACKING.rst
index 213495832..30f87576f 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -52,6 +52,7 @@ Imports
-------
- Do not import objects, only modules (*)
- Do not import more than one module per line (*)
+- Do not use wildcard ``*`` import (*)
- Do not make relative imports
- Do not make new nova.db imports in nova/virt/*
- Order your imports by the full module path
@@ -62,6 +63,8 @@ Imports
- imports from ``migrate`` package
- imports from ``sqlalchemy`` package
- imports from ``nova.db.sqlalchemy.session`` module
+- imports from ``nova.openstack.common.log.logging`` package
+- imports from ``nova.db.sqlalchemy.migration.versioning_api`` package
Example::
@@ -218,7 +221,27 @@ submitted bug fix does have a unit test, be sure to add a new one that fails
without the patch and passes with the patch.
For more information on creating unit tests and utilizing the testing
-infrastructure in OpenStack Nova, please read nova/testing/README.rst.
+infrastructure in OpenStack Nova, please read nova/tests/README.rst.
+
+
+Running Tests
+-------------
+The testing system is based on a combination of tox and testr. The canonical
+approach to running tests is to simply run the command `tox`. This will
+create virtual environments, populate them with depenedencies and run all of
+the tests that OpenStack CI systems run. Behind the scenes, tox is running
+`testr run --parallel`, but is set up such that you can supply any additional
+testr arguments that are needed to tox. For example, you can run:
+`tox -- --analyze-isolation` to cause tox to tell testr to add
+--analyze-isolation to its argument list.
+
+It is also possible to run the tests inside of a virtual environment
+you have created, or it is possible that you have all of the dependencies
+installed locally already. In this case, you can interact with the testr
+command directly. Running `testr run` will run the entire test suite. `testr
+run --parallel` will run it in parallel (this is the default incantation tox
+uses.) More information about testr can be found at:
+http://wiki.openstack.org/testr
openstack-common
diff --git a/bin/nova-baremetal-manage b/bin/nova-baremetal-manage
index 6c27a7b1a..35fb83f72 100755
--- a/bin/nova-baremetal-manage
+++ b/bin/nova-baremetal-manage
@@ -54,12 +54,7 @@
CLI interface for nova bare-metal management.
"""
-import ast
-import errno
import gettext
-import math
-import netaddr
-import optparse
import os
import sys
@@ -75,15 +70,9 @@ if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
from nova import config
-from nova import context
-from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import cliutils
-from nova.openstack.common import importutils
from nova.openstack.common import log as logging
-from nova.openstack.common import rpc
-from nova.openstack.common import timeutils
-from nova import utils
from nova import version
from nova.virt.baremetal import db as bmdb
from nova.virt.baremetal.db import migration as bmdb_migration
diff --git a/bin/nova-clear-rabbit-queues b/bin/nova-clear-rabbit-queues
index 618aa4587..e31758769 100755
--- a/bin/nova-clear-rabbit-queues
+++ b/bin/nova-clear-rabbit-queues
@@ -23,11 +23,9 @@
"""
-import datetime
import gettext
import os
import sys
-import time
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
@@ -41,8 +39,6 @@ gettext.install('nova', unicode=1)
from nova import config
-from nova import context
-from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
diff --git a/bin/nova-compute b/bin/nova-compute
index 814147d66..d99a21051 100755
--- a/bin/nova-compute
+++ b/bin/nova-compute
@@ -64,7 +64,7 @@ def block_db_access():
def __call__(self, *args, **kwargs):
stacktrace = "".join(traceback.format_stack())
LOG.error('No db access allowed in nova-compute: %s' % stacktrace)
- raise exception.DBError('No db access allowed in nova-compute')
+ raise exception.DBNotAllowed('nova-compute')
nova.db.api.IMPL = NoDB()
diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge
index 0438ee6ff..33df97b55 100755
--- a/bin/nova-dhcpbridge
+++ b/bin/nova-dhcpbridge
@@ -38,13 +38,11 @@ gettext.install('nova', unicode=1)
from nova import config
from nova import context
from nova import db
-from nova.network import linux_net
from nova.network import rpcapi as network_rpcapi
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
-from nova import utils
CONF = cfg.CONF
CONF.import_opt('host', 'nova.netconf')
diff --git a/bin/nova-manage b/bin/nova-manage
index c793fed16..22549a50d 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -72,7 +72,6 @@ gettext.install('nova', unicode=1)
from nova.api.ec2 import ec2utils
from nova import availability_zones
from nova.compute import instance_types
-from nova.compute import rpcapi as compute_rpcapi
from nova import config
from nova import context
from nova import db
@@ -87,6 +86,7 @@ from nova.openstack.common import rpc
from nova.openstack.common import timeutils
from nova import quota
from nova.scheduler import rpcapi as scheduler_rpcapi
+from nova import servicegroup
from nova import utils
from nova import version
@@ -624,6 +624,7 @@ class ServiceCommands(object):
"""
Show a list of all running services. Filter by host & service name.
"""
+ servicegroup_api = servicegroup.API()
ctxt = context.get_admin_context()
now = timeutils.utcnow()
services = db.service_get_all(ctxt)
@@ -641,8 +642,7 @@ class ServiceCommands(object):
_('State'),
_('Updated_At'))
for svc in services:
- delta = now - (svc['updated_at'] or svc['created_at'])
- alive = abs(utils.total_seconds(delta)) <= CONF.service_down_time
+ alive = servicegroup_api.service_is_up(svc)
art = (alive and ":-)") or "XXX"
active = 'enabled'
if svc['disabled']:
@@ -770,6 +770,17 @@ class DbCommands(object):
"""Print the current database version."""
print migration.db_version()
+ @args('--max_rows', dest='max_rows', metavar='<number>',
+ help='Maximum number of deleted rows to archive')
+ def archive_deleted_rows(self, max_rows=None):
+ """Move up to max_rows deleted rows from production tables to shadow
+ tables.
+ """
+ if max_rows is not None:
+ max_rows = int(max_rows)
+ admin_context = context.get_admin_context()
+ db.archive_deleted_rows(admin_context, max_rows)
+
class InstanceTypeCommands(object):
"""Class for managing instance types / flavors."""
diff --git a/bin/nova-rootwrap b/bin/nova-rootwrap
index 72a8c6309..35e2f47f3 100755
--- a/bin/nova-rootwrap
+++ b/bin/nova-rootwrap
@@ -20,14 +20,17 @@
Filters which commands a service is allowed to run as another user.
- To use this with nova, you should set the following in nova.conf:
+ To use this with nova, you should set the following in
+ nova.conf:
rootwrap_config=/etc/nova/rootwrap.conf
- You also need to let the nova user run nova-rootwrap as root in sudoers:
- nova ALL = (root) NOPASSWD: /usr/bin/nova-rootwrap /etc/nova/rootwrap.conf *
+ You also need to let the nova user run nova-rootwrap
+ as root in sudoers:
+ nova ALL = (root) NOPASSWD: /usr/bin/nova-rootwrap
+ /etc/nova/rootwrap.conf *
- Service packaging should deploy .filters files only on nodes where they are
- needed, to avoid allowing more than is necessary.
+ Service packaging should deploy .filters files only on nodes where
+ they are needed, to avoid allowing more than is necessary.
"""
import ConfigParser
@@ -102,8 +105,8 @@ if __name__ == '__main__':
exec_dirs=config.exec_dirs)
if config.use_syslog:
logging.info("(%s > %s) Executing %s (filter match = %s)" % (
- os.getlogin(), pwd.getpwuid(os.getuid())[0],
- command, filtermatch.name))
+ os.getlogin(), pwd.getpwuid(os.getuid())[0],
+ command, filtermatch.name))
obj = subprocess.Popen(command,
stdin=sys.stdin,
diff --git a/bin/nova-xvpvncproxy b/bin/nova-xvpvncproxy
index 7882645ad..c24f41176 100755
--- a/bin/nova-xvpvncproxy
+++ b/bin/nova-xvpvncproxy
@@ -33,7 +33,6 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
from nova import config
from nova.openstack.common import log as logging
-from nova.openstack.common import rpc
from nova import service
from nova.vnc import xvp_proxy
diff --git a/doc/api_samples/OS-EXT-AZ/server-get-resp.json b/doc/api_samples/OS-EXT-AZ/server-get-resp.json
index a7cf031ef..c2f0fb2ba 100644
--- a/doc/api_samples/OS-EXT-AZ/server-get-resp.json
+++ b/doc/api_samples/OS-EXT-AZ/server-get-resp.json
@@ -1,7 +1,6 @@
{
"server": {
- "OS-EXT-AZ:availability_zone": null,
- "OS-EXT-AZ:host_availability_zone": "nova",
+ "OS-EXT-AZ:availability_zone": "nova",
"accessIPv4": "",
"accessIPv6": "",
"addresses": {
@@ -53,4 +52,4 @@
"updated": "2013-01-30T13:38:49Z",
"user_id": "fake"
}
-} \ No newline at end of file
+}
diff --git a/doc/api_samples/OS-EXT-AZ/server-get-resp.xml b/doc/api_samples/OS-EXT-AZ/server-get-resp.xml
index 7a6edf057..5025af4e8 100644
--- a/doc/api_samples/OS-EXT-AZ/server-get-resp.xml
+++ b/doc/api_samples/OS-EXT-AZ/server-get-resp.xml
@@ -1,5 +1,5 @@
<?xml version='1.0' encoding='UTF-8'?>
-<server xmlns:OS-EXT-AZ="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="2013-01-30T14:29:20Z" hostId="471e52951e3182954c5a93489dafc3fc38a9ef3e0b62d26dc740460c" name="new-server-test" created="2013-01-30T14:29:19Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="26ea8424-758d-483a-addc-9a5905afc9e6" OS-EXT-AZ:host_availability_zone="nova" OS-EXT-AZ:availability_zone="None">
+<server xmlns:OS-EXT-AZ="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="2013-01-30T14:29:20Z" hostId="471e52951e3182954c5a93489dafc3fc38a9ef3e0b62d26dc740460c" name="new-server-test" created="2013-01-30T14:29:19Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="26ea8424-758d-483a-addc-9a5905afc9e6" OS-EXT-AZ:availability_zone="nova">
<image id="70a599e0-31e7-49b7-b260-868f441e862b">
<atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
</image>
@@ -16,4 +16,4 @@
</addresses>
<atom:link href="http://openstack.example.com/v2/openstack/servers/26ea8424-758d-483a-addc-9a5905afc9e6" rel="self"/>
<atom:link href="http://openstack.example.com/openstack/servers/26ea8424-758d-483a-addc-9a5905afc9e6" rel="bookmark"/>
-</server> \ No newline at end of file
+</server>
diff --git a/doc/api_samples/OS-EXT-AZ/servers-detail-resp.json b/doc/api_samples/OS-EXT-AZ/servers-detail-resp.json
index 5fab02ac0..2ecea959a 100644
--- a/doc/api_samples/OS-EXT-AZ/servers-detail-resp.json
+++ b/doc/api_samples/OS-EXT-AZ/servers-detail-resp.json
@@ -1,8 +1,7 @@
{
"servers": [
{
- "OS-EXT-AZ:availability_zone": null,
- "OS-EXT-AZ:host_availability_zone": "nova",
+ "OS-EXT-AZ:availability_zone": "nova",
"accessIPv4": "",
"accessIPv6": "",
"addresses": {
@@ -55,4 +54,4 @@
"user_id": "fake"
}
]
-} \ No newline at end of file
+}
diff --git a/doc/api_samples/OS-EXT-AZ/servers-detail-resp.xml b/doc/api_samples/OS-EXT-AZ/servers-detail-resp.xml
index 4f1f311a8..4cdf79334 100644
--- a/doc/api_samples/OS-EXT-AZ/servers-detail-resp.xml
+++ b/doc/api_samples/OS-EXT-AZ/servers-detail-resp.xml
@@ -1,6 +1,6 @@
<?xml version='1.0' encoding='UTF-8'?>
<servers xmlns:OS-EXT-AZ="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
- <server status="ACTIVE" updated="2013-01-30T14:29:20Z" hostId="85adf7d0492dedf0a7e3dc44ef7d16186b768ca3df33c4d608e630d9" name="new-server-test" created="2013-01-30T14:29:19Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="a668c72d-2bac-4806-a297-c7c11d97e3b3" OS-EXT-AZ:host_availability_zone="nova" OS-EXT-AZ:availability_zone="None">
+ <server status="ACTIVE" updated="2013-01-30T14:29:20Z" hostId="85adf7d0492dedf0a7e3dc44ef7d16186b768ca3df33c4d608e630d9" name="new-server-test" created="2013-01-30T14:29:19Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="a668c72d-2bac-4806-a297-c7c11d97e3b3" OS-EXT-AZ:availability_zone="nova">
<image id="70a599e0-31e7-49b7-b260-868f441e862b">
<atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
</image>
@@ -18,4 +18,4 @@
<atom:link href="http://openstack.example.com/v2/openstack/servers/a668c72d-2bac-4806-a297-c7c11d97e3b3" rel="self"/>
<atom:link href="http://openstack.example.com/openstack/servers/a668c72d-2bac-4806-a297-c7c11d97e3b3" rel="bookmark"/>
</server>
-</servers> \ No newline at end of file
+</servers>
diff --git a/doc/api_samples/OS-EXT-IPS/server-get-resp.json b/doc/api_samples/OS-EXT-IPS/server-get-resp.json
new file mode 100644
index 000000000..e985ad115
--- /dev/null
+++ b/doc/api_samples/OS-EXT-IPS/server-get-resp.json
@@ -0,0 +1,55 @@
+{
+ "server": {
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "192.168.0.3",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2013-02-07T18:46:28Z",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "4e2003eddbfdb1280c2618d04090bcdd6773203b8da8347af0b2723d",
+ "id": "dc7281f9-ee47-40b9-9950-9f73e7961caa",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/dc7281f9-ee47-40b9-9950-9f73e7961caa",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/dc7281f9-ee47-40b9-9950-9f73e7961caa",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "2013-02-07T18:46:29Z",
+ "user_id": "fake"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-IPS/server-get-resp.xml b/doc/api_samples/OS-EXT-IPS/server-get-resp.xml
new file mode 100644
index 000000000..84619ffec
--- /dev/null
+++ b/doc/api_samples/OS-EXT-IPS/server-get-resp.xml
@@ -0,0 +1,19 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:OS-EXT-IPS="http://docs.openstack.org/compute/ext/extended_ips/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="2013-02-07T18:46:29Z" hostId="068cc5e2de14b6e533a239c6eac0a0bdedcd57cab25450a6d3da43af" name="new-server-test" created="2013-02-07T18:46:28Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="22e7cf4d-ab7a-4a3d-9599-7d0dbaf9ed55">
+ <image id="70a599e0-31e7-49b7-b260-868f441e862b">
+ <atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="http://openstack.example.com/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip OS-EXT-IPS:type="fixed" version="4" addr="192.168.0.3"/>
+ </network>
+ </addresses>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/22e7cf4d-ab7a-4a3d-9599-7d0dbaf9ed55" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/22e7cf4d-ab7a-4a3d-9599-7d0dbaf9ed55" rel="bookmark"/>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-IPS/server-post-req.json b/doc/api_samples/OS-EXT-IPS/server-post-req.json
new file mode 100644
index 000000000..d88eb4122
--- /dev/null
+++ b/doc/api_samples/OS-EXT-IPS/server-post-req.json
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-IPS/server-post-req.xml b/doc/api_samples/OS-EXT-IPS/server-post-req.xml
new file mode 100644
index 000000000..0a3c8bb53
--- /dev/null
+++ b/doc/api_samples/OS-EXT-IPS/server-post-req.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" flavorRef="http://openstack.example.com/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-IPS/server-post-resp.json b/doc/api_samples/OS-EXT-IPS/server-post-resp.json
new file mode 100644
index 000000000..d641e74aa
--- /dev/null
+++ b/doc/api_samples/OS-EXT-IPS/server-post-resp.json
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "zD7wDKTXiHsp",
+ "id": "b44e5008-42f7-4048-b4c8-f40a29da88ba",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/b44e5008-42f7-4048-b4c8-f40a29da88ba",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/b44e5008-42f7-4048-b4c8-f40a29da88ba",
+ "rel": "bookmark"
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-IPS/server-post-resp.xml b/doc/api_samples/OS-EXT-IPS/server-post-resp.xml
new file mode 100644
index 000000000..b268ba0d3
--- /dev/null
+++ b/doc/api_samples/OS-EXT-IPS/server-post-resp.xml
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="752dd57d-933b-4a57-a0ae-4c3431c5abc7" adminPass="B2gvFFjBQCVQ">
+ <metadata/>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/752dd57d-933b-4a57-a0ae-4c3431c5abc7" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/752dd57d-933b-4a57-a0ae-4c3431c5abc7" rel="bookmark"/>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-IPS/servers-detail-resp.json b/doc/api_samples/OS-EXT-IPS/servers-detail-resp.json
new file mode 100644
index 000000000..cb91d6937
--- /dev/null
+++ b/doc/api_samples/OS-EXT-IPS/servers-detail-resp.json
@@ -0,0 +1,57 @@
+{
+ "servers": [
+ {
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "192.168.0.3",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2013-02-07T18:40:59Z",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "fe866a4962fe3bdb6c2db9c8f7dcdb9555aca73387e72b5cb9c45bd3",
+ "id": "76908712-653a-4d16-807e-d89d41435d24",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/76908712-653a-4d16-807e-d89d41435d24",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/76908712-653a-4d16-807e-d89d41435d24",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "2013-02-07T18:40:59Z",
+ "user_id": "fake"
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-IPS/servers-detail-resp.xml b/doc/api_samples/OS-EXT-IPS/servers-detail-resp.xml
new file mode 100644
index 000000000..93a3ff2e9
--- /dev/null
+++ b/doc/api_samples/OS-EXT-IPS/servers-detail-resp.xml
@@ -0,0 +1,21 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers xmlns:OS-EXT-IPS="http://docs.openstack.org/compute/ext/extended_ips/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server status="ACTIVE" updated="2013-02-07T18:40:59Z" hostId="51a80e6ee89b638b2cb57eb4e39d89a725e07c8a698f4d8e256f8665" name="new-server-test" created="2013-02-07T18:40:59Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="0337de6b-1d43-46c8-8804-35669f1dea9a">
+ <image id="70a599e0-31e7-49b7-b260-868f441e862b">
+ <atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="http://openstack.example.com/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip OS-EXT-IPS:type="fixed" version="4" addr="192.168.0.3"/>
+ </network>
+ </addresses>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/0337de6b-1d43-46c8-8804-35669f1dea9a" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/0337de6b-1d43-46c8-8804-35669f1dea9a" rel="bookmark"/>
+ </server>
+</servers> \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.json b/doc/api_samples/OS-EXT-SRV-ATTR/server-get-resp.json
index ad3bcab5d..ad3bcab5d 100644
--- a/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.json
+++ b/doc/api_samples/OS-EXT-SRV-ATTR/server-get-resp.json
diff --git a/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.xml b/doc/api_samples/OS-EXT-SRV-ATTR/server-get-resp.xml
index 4b42c3586..4b42c3586 100644
--- a/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.xml
+++ b/doc/api_samples/OS-EXT-SRV-ATTR/server-get-resp.xml
diff --git a/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.json b/doc/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.json
index db3de77f4..db3de77f4 100644
--- a/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.json
+++ b/doc/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.json
diff --git a/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.xml b/doc/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.xml
index 8179a3bf9..8179a3bf9 100644
--- a/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.xml
+++ b/doc/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.xml
diff --git a/doc/api_samples/OS-EXT-STS/server-get-resp.json b/doc/api_samples/OS-EXT-STS/server-get-resp.json
new file mode 100644
index 000000000..763426bb1
--- /dev/null
+++ b/doc/api_samples/OS-EXT-STS/server-get-resp.json
@@ -0,0 +1,57 @@
+{
+ "server": {
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.0.3",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2013-02-07T19:35:09Z",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "570eff4776ab310707d11d181037337197086998a8b3305c90bf87c8",
+ "id": "ecb5e433-fa75-4db2-af3d-a29ae8618edc",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/ecb5e433-fa75-4db2-af3d-a29ae8618edc",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/ecb5e433-fa75-4db2-af3d-a29ae8618edc",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "2013-02-07T19:35:10Z",
+ "user_id": "fake"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-STS/server-get-resp.xml b/doc/api_samples/OS-EXT-STS/server-get-resp.xml
new file mode 100644
index 000000000..53363a224
--- /dev/null
+++ b/doc/api_samples/OS-EXT-STS/server-get-resp.xml
@@ -0,0 +1,19 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="2013-02-07T19:35:10Z" hostId="372afb648339fb6f22faa0b75fdd8834e2382fe02b352af8d7ee0b84" name="new-server-test" created="2013-02-07T19:35:09Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="68647408-85a7-4d9b-85e7-7f1e238983ad" OS-EXT-STS:vm_state="active" OS-EXT-STS:task_state="None" OS-EXT-STS:power_state="1">
+ <image id="70a599e0-31e7-49b7-b260-868f441e862b">
+ <atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="http://openstack.example.com/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="192.168.0.3"/>
+ </network>
+ </addresses>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/68647408-85a7-4d9b-85e7-7f1e238983ad" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/68647408-85a7-4d9b-85e7-7f1e238983ad" rel="bookmark"/>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-STS/servers-list-resp.json b/doc/api_samples/OS-EXT-STS/servers-list-resp.json
deleted file mode 100644
index d0309cc1f..000000000
--- a/doc/api_samples/OS-EXT-STS/servers-list-resp.json
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "servers": [
- {
- "id": "a5dd5b16-552c-441c-8a03-f19c4da9e6f5",
- "links": [
- {
- "href": "http://openstack.example.com/v2/openstack/servers/a5dd5b16-552c-441c-8a03-f19c4da9e6f5",
- "rel": "self"
- },
- {
- "href": "http://openstack.example.com/openstack/servers/a5dd5b16-552c-441c-8a03-f19c4da9e6f5",
- "rel": "bookmark"
- }
- ],
- "name": "new-server-test"
- }
- ]
-} \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-STS/servers-list-resp.xml b/doc/api_samples/OS-EXT-STS/servers-list-resp.xml
deleted file mode 100644
index f1c3cabf4..000000000
--- a/doc/api_samples/OS-EXT-STS/servers-list-resp.xml
+++ /dev/null
@@ -1,7 +0,0 @@
-<?xml version='1.0' encoding='UTF-8'?>
-<servers xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
- <server name="new-server-test" id="7128d3b9-1993-402c-91ca-ed59a0193ffa">
- <atom:link href="http://openstack.example.com/v2/openstack/servers/7128d3b9-1993-402c-91ca-ed59a0193ffa" rel="self"/>
- <atom:link href="http://openstack.example.com/openstack/servers/7128d3b9-1993-402c-91ca-ed59a0193ffa" rel="bookmark"/>
- </server>
-</servers> \ No newline at end of file
diff --git a/doc/api_samples/all_extensions/extensions-get-resp.json b/doc/api_samples/all_extensions/extensions-get-resp.json
index 439d1af18..ba5e410eb 100644
--- a/doc/api_samples/all_extensions/extensions-get-resp.json
+++ b/doc/api_samples/all_extensions/extensions-get-resp.json
@@ -25,6 +25,14 @@
"updated": "2013-01-30T00:00:00+00:00"
},
{
+ "alias": "OS-EXT-IPS",
+ "description": "Adds type parameter to the ip list.",
+ "links": [],
+ "name": "ExtendedIps",
+ "namespace": "http://docs.openstack.org/compute/ext/extended_ips/api/v1.1",
+ "updated": "2013-01-06T00:00:00+00:00"
+ },
+ {
"alias": "OS-EXT-SRV-ATTR",
"description": "Extended Server Attributes support.",
"links": [],
@@ -90,11 +98,11 @@
},
{
"alias": "os-availability-zone",
- "description": "Add availability_zone to the Create Server v1.1 API.",
+ "description": "1. Add availability_zone to the Create Server v1.1 API.\n 2. Add availability zones describing.\n ",
"links": [],
"name": "AvailabilityZone",
"namespace": "http://docs.openstack.org/compute/ext/availabilityzone/api/v1.1",
- "updated": "2012-08-09T00:00:00+00:00"
+ "updated": "2012-12-21T00:00:00+00:00"
},
{
"alias": "os-baremetal-nodes",
@@ -202,7 +210,7 @@
},
{
"alias": "os-flavor-access",
- "description": "Flavor access supprt.",
+ "description": "Flavor access support.",
"links": [],
"name": "FlavorAccess",
"namespace": "http://docs.openstack.org/compute/ext/flavor_access/api/v2",
@@ -305,6 +313,14 @@
"updated": "2012-06-21T00:00:00+00:00"
},
{
+ "alias": "os-instance-actions",
+ "description": "View a log of actions taken on an instance",
+ "links": [],
+ "name": "InstanceActions",
+ "namespace": "http://docs.openstack.org/compute/ext/instance-actions/api/v1.1",
+ "updated": "2013-02-08T00:00:00+00:00"
+ },
+ {
"alias": "os-instance_usage_audit_log",
"description": "Admin-only Task Log Monitoring.",
"links": [],
@@ -337,14 +353,6 @@
"updated": "2011-12-23T00:00:00+00:00"
},
{
- "alias": "os-tenant-networks",
- "description": "Tenant-based Network Management Extension.",
- "links": [],
- "name": "OSTenantNetworks",
- "namespace": "http://docs.openstack.org/compute/ext/os-tenant-networks/api/v2",
- "updated": "2011-12-23T00:00:00+00:00"
- },
- {
"alias": "os-networks-associate",
"description": "Network association support.",
"links": [],
@@ -394,7 +402,7 @@
},
{
"alias": "os-server-password",
- "description": "Server password support",
+ "description": "Server password support.",
"links": [],
"name": "ServerPassword",
"namespace": "http://docs.openstack.org/compute/ext/server-password/api/v2",
@@ -425,6 +433,14 @@
"updated": "2011-08-19T00:00:00+00:00"
},
{
+ "alias": "os-tenant-networks",
+ "description": "Tenant-based Network Management Extension.",
+ "links": [],
+ "name": "OSTenantNetworks",
+ "namespace": "http://docs.openstack.org/compute/ext/os-tenant-networks/api/v2",
+ "updated": "2012-03-07T09:46:43-05:00"
+ },
+ {
"alias": "os-used-limits",
"description": "Provide data on limited resources that are being used.",
"links": [],
diff --git a/doc/api_samples/all_extensions/extensions-get-resp.xml b/doc/api_samples/all_extensions/extensions-get-resp.xml
index 71f79dc3a..a18e52437 100644
--- a/doc/api_samples/all_extensions/extensions-get-resp.xml
+++ b/doc/api_samples/all_extensions/extensions-get-resp.xml
@@ -9,6 +9,9 @@
<extension alias="OS-EXT-AZ" updated="2013-01-30T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" name="ExtendedAvailabilityZone">
<description>Extended Server Attributes support.</description>
</extension>
+ <extension alias="OS-EXT-IPS" updated="2013-01-06T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/extended_ips/api/v1.1" name="ExtendedIps">
+ <description>Adds type parameter to the ip list.</description>
+ </extension>
<extension alias="OS-EXT-SRV-ATTR" updated="2011-11-03T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" name="ExtendedServerAttributes">
<description>Extended Server Attributes support.</description>
</extension>
@@ -37,16 +40,17 @@
<extension alias="os-aggregates" updated="2012-01-12T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/aggregates/api/v1.1" name="Aggregates">
<description>Admin-only aggregate administration.</description>
</extension>
- <extension alias="os-availability-zone" updated="2012-08-09T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/availabilityzone/api/v1.1" name="AvailabilityZone">
- <description>Add availability_zone to the Create Server v1.1 API.</description>
+ <extension alias="os-availability-zone" updated="2012-12-21T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/availabilityzone/api/v1.1" name="AvailabilityZone">
+ <description>1. Add availability_zone to the Create Server v1.1 API.
+ 2. Add availability zones describing.
+ </description>
</extension>
<extension alias="os-baremetal-nodes" updated="2013-01-04T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/baremetal_nodes/api/v2" name="BareMetalNodes">
<description>Admin-only bare-metal node administration.</description>
</extension>
<extension alias="os-cells" updated="2011-09-21T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/cells/api/v1.1" name="Cells">
- <description>Enables cells-related functionality such as adding child cells,
- listing child cells, getting the capabilities of the local cell,
- and returning build plans to parent cells' schedulers
+ <description>Enables cells-related functionality such as adding neighbor cells,
+ listing neighbor cells, and getting the capabilities of the local cell.
</description>
</extension>
<extension alias="os-certificates" updated="2012-01-19T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/certificates/api/v1.1" name="Certificates">
@@ -91,7 +95,7 @@
<description>Fixed IPs support.</description>
</extension>
<extension alias="os-flavor-access" updated="2012-08-01T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/flavor_access/api/v2" name="FlavorAccess">
- <description>Flavor access supprt.</description>
+ <description>Flavor access support.</description>
</extension>
<extension alias="os-flavor-extra-specs" updated="2011-06-23T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/flavor_extra_specs/api/v1.1" name="FlavorExtraSpecs">
<description>Instance type (flavor) extra specs.</description>
@@ -131,6 +135,9 @@
<extension alias="os-hypervisors" updated="2012-06-21T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/hypervisors/api/v1.1" name="Hypervisors">
<description>Admin-only hypervisor administration.</description>
</extension>
+ <extension alias="os-instance-actions" updated="2013-02-08T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/instance-actions/api/v1.1" name="InstanceActions">
+ <description>View a log of actions taken on an instance</description>
+ </extension>
<extension alias="os-instance_usage_audit_log" updated="2012-07-06T01:00:00+00:00" namespace="http://docs.openstack.org/ext/services/api/v1.1" name="OSInstanceUsageAuditLog">
<description>Admin-only Task Log Monitoring.</description>
</extension>
@@ -146,9 +153,6 @@
<extension alias="os-networks-associate" updated="2012-11-19T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/networks_associate/api/v2" name="NetworkAssociationSupport">
<description>Network association support.</description>
</extension>
- <extension alias="os-tenant-networks" updated="2011-12-23T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/os-tenant-networks/api/v2" name="OSTenantNetworks">
- <description>Tenant-based Network Management Extension.</description>
- </extension>
<extension alias="os-quota-class-sets" updated="2012-03-12T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/quota-classes-sets/api/v1.1" name="QuotaClasses">
<description>Quota classes management support.</description>
</extension>
@@ -165,7 +169,7 @@
<description>Allow Admins to view server diagnostics through server action.</description>
</extension>
<extension alias="os-server-password" updated="2012-11-29T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/server-password/api/v2" name="ServerPassword">
- <description>Server password support</description>
+ <description>Server password support.</description>
</extension>
<extension alias="os-server-start-stop" updated="2012-01-23T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/servers/api/v1.1" name="ServerStartStop">
<description>Start/Stop instance compute API support.</description>
@@ -176,6 +180,9 @@
<extension alias="os-simple-tenant-usage" updated="2011-08-19T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/os-simple-tenant-usage/api/v1.1" name="SimpleTenantUsage">
<description>Simple tenant usage extension.</description>
</extension>
+ <extension alias="os-tenant-networks" updated="2012-03-07T09:46:43-05:00" namespace="http://docs.openstack.org/compute/ext/os-tenant-networks/api/v2" name="OSTenantNetworks">
+ <description>Tenant-based Network Management Extension.</description>
+ </extension>
<extension alias="os-used-limits" updated="2012-07-13T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/used_limits/api/v1.1" name="UsedLimits">
<description>Provide data on limited resources that are being used.</description>
</extension>
diff --git a/doc/api_samples/all_extensions/server-get-resp.json b/doc/api_samples/all_extensions/server-get-resp.json
index 56feee209..63f955ed6 100644
--- a/doc/api_samples/all_extensions/server-get-resp.json
+++ b/doc/api_samples/all_extensions/server-get-resp.json
@@ -1,9 +1,8 @@
{
"server": {
"OS-DCF:diskConfig": "AUTO",
- "OS-EXT-AZ:availability_zone": null,
- "OS-EXT-AZ:host_availability_zone": "nova",
- "OS-EXT-SRV-ATTR:host": "9373c31dbfe6422d9a9997c5f42a8789",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:host": "b00875071c774b5487d217b82f03dfa2",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"OS-EXT-STS:power_state": 1,
@@ -14,13 +13,14 @@
"addresses": {
"private": [
{
+ "OS-EXT-IPS:type": "fixed",
"addr": "192.168.0.3",
"version": 4
}
]
},
"config_drive": "",
- "created": "2013-01-30T14:03:57Z",
+ "created": "2013-02-07T18:58:56Z",
"flavor": {
"id": "1",
"links": [
@@ -30,8 +30,8 @@
}
]
},
- "hostId": "fc3a98f0b240ff341eb60ce8c0ae7412970d218381c5a827cfd398ee",
- "id": "61608b15-33ea-412f-b9eb-78c6a347ec7b",
+ "hostId": "ecbf72ad08d0d4f26768f526d17c2813812b4bc1598f081e16eb9b8b",
+ "id": "12c05fea-29ec-4f77-9025-b3d72584ef1d",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
@@ -44,11 +44,11 @@
"key_name": null,
"links": [
{
- "href": "http://openstack.example.com/v2/openstack/servers/61608b15-33ea-412f-b9eb-78c6a347ec7b",
+ "href": "http://openstack.example.com/v2/openstack/servers/12c05fea-29ec-4f77-9025-b3d72584ef1d",
"rel": "self"
},
{
- "href": "http://openstack.example.com/openstack/servers/61608b15-33ea-412f-b9eb-78c6a347ec7b",
+ "href": "http://openstack.example.com/openstack/servers/12c05fea-29ec-4f77-9025-b3d72584ef1d",
"rel": "bookmark"
}
],
@@ -64,7 +64,7 @@
],
"status": "ACTIVE",
"tenant_id": "openstack",
- "updated": "2013-01-30T14:04:01Z",
+ "updated": "2013-02-07T18:58:57Z",
"user_id": "fake"
}
-} \ No newline at end of file
+}
diff --git a/doc/api_samples/all_extensions/server-get-resp.xml b/doc/api_samples/all_extensions/server-get-resp.xml
index 45e873147..e2ded16e5 100644
--- a/doc/api_samples/all_extensions/server-get-resp.xml
+++ b/doc/api_samples/all_extensions/server-get-resp.xml
@@ -1,5 +1,5 @@
<?xml version='1.0' encoding='UTF-8'?>
-<server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-AZ="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="2013-01-30T14:35:43Z" hostId="1bd51b03d4cc4d191f24e50b7a439bc6f67154fb955c147f8d0fcd09" name="new-server-test" created="2013-01-30T14:35:42Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="072a5fff-8026-4e43-874d-3ed6068884e4" key_name="None" config_drive="" OS-EXT-SRV-ATTR:vm_state="active" OS-EXT-SRV-ATTR:task_state="None" OS-EXT-SRV-ATTR:power_state="1" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="c86d3d0e86c94eac8e87791740ca11f1" OS-EXT-SRV-ATTR:hypervisor_hostname="fake-mini" OS-EXT-AZ:host_availability_zone="nova" OS-EXT-AZ:availability_zone="None" OS-DCF:diskConfig="AUTO">
+<server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-IPS="http://docs.openstack.org/compute/ext/extended_ips/api/v1.1" xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-AZ="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="2013-02-07T19:01:59Z" hostId="06d1cfd52be5f5d197193db2842978235fd085bd2dfaea32e5068468" name="new-server-test" created="2013-02-07T19:01:58Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="6be39927-53b2-4aee-8593-1c72b3673168" key_name="None" config_drive="" OS-EXT-SRV-ATTR:vm_state="active" OS-EXT-SRV-ATTR:task_state="None" OS-EXT-SRV-ATTR:power_state="1" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="b98603db318e495e819601702d16c512" OS-EXT-SRV-ATTR:hypervisor_hostname="fake-mini" OS-EXT-AZ:availability_zone="nova" OS-DCF:diskConfig="AUTO">
<image id="70a599e0-31e7-49b7-b260-868f441e862b">
<atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
</image>
@@ -11,12 +11,12 @@
</metadata>
<addresses>
<network id="private">
- <ip version="4" addr="192.168.0.3"/>
+ <ip OS-EXT-IPS:type="fixed" version="4" addr="192.168.0.3"/>
</network>
</addresses>
- <atom:link href="http://openstack.example.com/v2/openstack/servers/072a5fff-8026-4e43-874d-3ed6068884e4" rel="self"/>
- <atom:link href="http://openstack.example.com/openstack/servers/072a5fff-8026-4e43-874d-3ed6068884e4" rel="bookmark"/>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/6be39927-53b2-4aee-8593-1c72b3673168" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/6be39927-53b2-4aee-8593-1c72b3673168" rel="bookmark"/>
<security_groups>
<security_group name="default"/>
</security_groups>
-</server> \ No newline at end of file
+</server>
diff --git a/doc/api_samples/all_extensions/servers-details-resp.json b/doc/api_samples/all_extensions/servers-details-resp.json
index ed5f4d204..cc945edb5 100644
--- a/doc/api_samples/all_extensions/servers-details-resp.json
+++ b/doc/api_samples/all_extensions/servers-details-resp.json
@@ -2,9 +2,8 @@
"servers": [
{
"OS-DCF:diskConfig": "AUTO",
- "OS-EXT-AZ:availability_zone": null,
- "OS-EXT-AZ:host_availability_zone": "nova",
- "OS-EXT-SRV-ATTR:host": "94d2ccc30d73475ab987661158405463",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:host": "33924d68ef4e4214bb9bc200178d23b8",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"OS-EXT-STS:power_state": 1,
@@ -15,13 +14,14 @@
"addresses": {
"private": [
{
+ "OS-EXT-IPS:type": "fixed",
"addr": "192.168.0.3",
"version": 4
}
]
},
"config_drive": "",
- "created": "2013-01-30T14:03:44Z",
+ "created": "2013-02-07T18:58:56Z",
"flavor": {
"id": "1",
"links": [
@@ -31,8 +31,8 @@
}
]
},
- "hostId": "f74e355ecde489405dfc0a1a48f2a85a5e2564e2ac6633d2b3e1b525",
- "id": "033cc72d-708b-473b-ae8e-41064ea3fa21",
+ "hostId": "e0028a678cb7760fe5987947ab495dbb0f79c1071850f87a9aa8227f",
+ "id": "3ec52036-bfee-4869-9c4c-81a579d72196",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
@@ -45,11 +45,11 @@
"key_name": null,
"links": [
{
- "href": "http://openstack.example.com/v2/openstack/servers/033cc72d-708b-473b-ae8e-41064ea3fa21",
+ "href": "http://openstack.example.com/v2/openstack/servers/3ec52036-bfee-4869-9c4c-81a579d72196",
"rel": "self"
},
{
- "href": "http://openstack.example.com/openstack/servers/033cc72d-708b-473b-ae8e-41064ea3fa21",
+ "href": "http://openstack.example.com/openstack/servers/3ec52036-bfee-4869-9c4c-81a579d72196",
"rel": "bookmark"
}
],
@@ -65,8 +65,8 @@
],
"status": "ACTIVE",
"tenant_id": "openstack",
- "updated": "2013-01-30T14:03:50Z",
+ "updated": "2013-02-07T18:58:57Z",
"user_id": "fake"
}
]
-} \ No newline at end of file
+}
diff --git a/doc/api_samples/all_extensions/servers-details-resp.xml b/doc/api_samples/all_extensions/servers-details-resp.xml
index 47e452ba1..44b3b09c5 100644
--- a/doc/api_samples/all_extensions/servers-details-resp.xml
+++ b/doc/api_samples/all_extensions/servers-details-resp.xml
@@ -1,6 +1,6 @@
<?xml version='1.0' encoding='UTF-8'?>
-<servers xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-AZ="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" xmlns="http://docs.openstack.org/compute/api/v1.1">
- <server status="ACTIVE" updated="2013-01-30T14:36:58Z" hostId="46d42af8fc0d50d4334ef6077b595a85291d2f5682ba8e95c37e69c3" name="new-server-test" created="2013-01-30T14:36:56Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="03665c18-c1f1-4eb9-83a8-da00a2c1d552" key_name="None" config_drive="" OS-EXT-SRV-ATTR:vm_state="active" OS-EXT-SRV-ATTR:task_state="None" OS-EXT-SRV-ATTR:power_state="1" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="d868eae67451474180a6193c24cb88c5" OS-EXT-SRV-ATTR:hypervisor_hostname="fake-mini" OS-EXT-AZ:host_availability_zone="nova" OS-EXT-AZ:availability_zone="None" OS-DCF:diskConfig="AUTO">
+<servers xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-IPS="http://docs.openstack.org/compute/ext/extended_ips/api/v1.1" xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-AZ="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server status="ACTIVE" updated="2013-02-07T19:01:59Z" hostId="641edaad8dd6a670afec58a4ce7e908d50379a6060f845236cd063db" name="new-server-test" created="2013-02-07T19:01:58Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="b45edf9d-30f6-41e8-a00b-ef8962376586" key_name="None" config_drive="" OS-EXT-SRV-ATTR:vm_state="active" OS-EXT-SRV-ATTR:task_state="None" OS-EXT-SRV-ATTR:power_state="1" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="f7954cfa4a5544278876b1d9224efe48" OS-EXT-SRV-ATTR:hypervisor_hostname="fake-mini" OS-EXT-AZ:availability_zone="nova" OS-DCF:diskConfig="AUTO">
<image id="70a599e0-31e7-49b7-b260-868f441e862b">
<atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
</image>
@@ -12,13 +12,13 @@
</metadata>
<addresses>
<network id="private">
- <ip version="4" addr="192.168.0.3"/>
+ <ip OS-EXT-IPS:type="fixed" version="4" addr="192.168.0.3"/>
</network>
</addresses>
- <atom:link href="http://openstack.example.com/v2/openstack/servers/03665c18-c1f1-4eb9-83a8-da00a2c1d552" rel="self"/>
- <atom:link href="http://openstack.example.com/openstack/servers/03665c18-c1f1-4eb9-83a8-da00a2c1d552" rel="bookmark"/>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/b45edf9d-30f6-41e8-a00b-ef8962376586" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/b45edf9d-30f6-41e8-a00b-ef8962376586" rel="bookmark"/>
<security_groups>
<security_group name="default"/>
</security_groups>
</server>
-</servers> \ No newline at end of file
+</servers>
diff --git a/doc/api_samples/os-config-drive/server-config-drive-get-resp.json b/doc/api_samples/os-config-drive/server-config-drive-get-resp.json
new file mode 100644
index 000000000..8745b3d62
--- /dev/null
+++ b/doc/api_samples/os-config-drive/server-config-drive-get-resp.json
@@ -0,0 +1,55 @@
+{
+ "server": {
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.0.3",
+ "version": 4
+ }
+ ]
+ },
+ "config_drive": "",
+ "created": "2013-02-04T13:17:50Z",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "8725fb615b191d8249a40f3e90d1efde88d914412e4edb2719176afd",
+ "id": "dd3b0715-a3fc-43d8-bbd2-2720beb226fb",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/dd3b0715-a3fc-43d8-bbd2-2720beb226fb",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/dd3b0715-a3fc-43d8-bbd2-2720beb226fb",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "2013-02-04T13:17:51Z",
+ "user_id": "fake"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-config-drive/server-config-drive-get-resp.xml b/doc/api_samples/os-config-drive/server-config-drive-get-resp.xml
new file mode 100644
index 000000000..6bb4c5e7c
--- /dev/null
+++ b/doc/api_samples/os-config-drive/server-config-drive-get-resp.xml
@@ -0,0 +1,19 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="2013-02-04T13:26:10Z" hostId="7a8c3fc15db5d6227d26d5ef559b77c880bbe99da5ce5f5871fc113e" name="new-server-test" created="2013-02-04T13:26:09Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="3b9e0572-3d7b-4e6f-9c21-35ad0f7dbf95" config_drive="">
+ <image id="70a599e0-31e7-49b7-b260-868f441e862b">
+ <atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="http://openstack.example.com/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="192.168.0.3"/>
+ </network>
+ </addresses>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/3b9e0572-3d7b-4e6f-9c21-35ad0f7dbf95" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/3b9e0572-3d7b-4e6f-9c21-35ad0f7dbf95" rel="bookmark"/>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/os-config-drive/server-post-req.json b/doc/api_samples/os-config-drive/server-post-req.json
new file mode 100644
index 000000000..d88eb4122
--- /dev/null
+++ b/doc/api_samples/os-config-drive/server-post-req.json
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-config-drive/server-post-req.xml b/doc/api_samples/os-config-drive/server-post-req.xml
new file mode 100644
index 000000000..0a3c8bb53
--- /dev/null
+++ b/doc/api_samples/os-config-drive/server-post-req.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" flavorRef="http://openstack.example.com/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/os-config-drive/server-post-resp.json b/doc/api_samples/os-config-drive/server-post-resp.json
new file mode 100644
index 000000000..231f5b9f4
--- /dev/null
+++ b/doc/api_samples/os-config-drive/server-post-resp.json
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "am5LKVsBVQ4s",
+ "id": "58da039c-dc81-4d8f-8688-a2f819e2f750",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/58da039c-dc81-4d8f-8688-a2f819e2f750",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/58da039c-dc81-4d8f-8688-a2f819e2f750",
+ "rel": "bookmark"
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-config-drive/server-post-resp.xml b/doc/api_samples/os-config-drive/server-post-resp.xml
new file mode 100644
index 000000000..3cbb199ce
--- /dev/null
+++ b/doc/api_samples/os-config-drive/server-post-resp.xml
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="00bba779-f8ae-403e-901d-1af18bcb9187" adminPass="XJqvFkH62TZh">
+ <metadata/>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/00bba779-f8ae-403e-901d-1af18bcb9187" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/00bba779-f8ae-403e-901d-1af18bcb9187" rel="bookmark"/>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/os-config-drive/servers-config-drive-details-resp.json b/doc/api_samples/os-config-drive/servers-config-drive-details-resp.json
new file mode 100644
index 000000000..1373721ef
--- /dev/null
+++ b/doc/api_samples/os-config-drive/servers-config-drive-details-resp.json
@@ -0,0 +1,57 @@
+{
+ "servers": [
+ {
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.0.3",
+ "version": 4
+ }
+ ]
+ },
+ "config_drive": "",
+ "created": "2013-02-04T13:21:44Z",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "76e154b0015e25fad65a7ab0c35a86dd79acfa8312075a6534ef6176",
+ "id": "720e688f-5ec8-4d4f-b585-dbd1a89ceeb0",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/720e688f-5ec8-4d4f-b585-dbd1a89ceeb0",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/720e688f-5ec8-4d4f-b585-dbd1a89ceeb0",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "2013-02-04T13:21:44Z",
+ "user_id": "fake"
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/os-config-drive/servers-config-drive-details-resp.xml b/doc/api_samples/os-config-drive/servers-config-drive-details-resp.xml
new file mode 100644
index 000000000..6de687a91
--- /dev/null
+++ b/doc/api_samples/os-config-drive/servers-config-drive-details-resp.xml
@@ -0,0 +1,21 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server status="ACTIVE" updated="2013-02-04T13:26:27Z" hostId="2a00edcff768661880eb9c96c951f56c2c5dcd873bb652361008efc7" name="new-server-test" created="2013-02-04T13:26:27Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="515d94d3-aee4-4bd5-bb4e-9601c657372f" config_drive="">
+ <image id="70a599e0-31e7-49b7-b260-868f441e862b">
+ <atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="http://openstack.example.com/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="192.168.0.3"/>
+ </network>
+ </addresses>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/515d94d3-aee4-4bd5-bb4e-9601c657372f" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/515d94d3-aee4-4bd5-bb4e-9601c657372f" rel="bookmark"/>
+ </server>
+</servers> \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json
new file mode 100644
index 000000000..934ed21b2
--- /dev/null
+++ b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json
@@ -0,0 +1,7 @@
+{
+ 'dns_entry' :
+ {
+ 'ip': '192.168.53.11',
+ 'dns_type': 'A'
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.xml b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.xml
new file mode 100644
index 000000000..36fa41120
--- /dev/null
+++ b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.xml
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<dns_entry>
+ <ip>192.168.53.11</ip>
+ <dns_type>A</dns_type>
+</dns_entry> \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json
new file mode 100644
index 000000000..70c903886
--- /dev/null
+++ b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json
@@ -0,0 +1,9 @@
+{
+ "dns_entry": {
+ "domain": "domain1.example.org",
+ "id": null,
+ "ip": "192.168.1.1",
+ "name": "instance1",
+ "type": "A"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.xml b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.xml
new file mode 100644
index 000000000..25afe599a
--- /dev/null
+++ b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.xml
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<dns_entry ip="192.168.1.1" domain="domain1.example.org" type="A" id="None" name="instance1"/> \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json
new file mode 100644
index 000000000..05aac422d
--- /dev/null
+++ b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json
@@ -0,0 +1,8 @@
+{
+ 'domain_entry' :
+ {
+ 'domain': 'domain1.example.org',
+ 'scope': 'public',
+ 'project': 'project1'
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.xml b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.xml
new file mode 100644
index 000000000..df12b61cf
--- /dev/null
+++ b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.xml
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<domain_entry>
+ <domain>domain1.example.org</domain>
+ <scope>public</scope>
+ <project>project1</project>
+</domain_entry> \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json
new file mode 100644
index 000000000..db43e92d3
--- /dev/null
+++ b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json
@@ -0,0 +1,8 @@
+{
+ "domain_entry": {
+ "availability_zone": null,
+ "domain": "domain1.example.org",
+ "project": "project1",
+ "scope": "public"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.xml b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.xml
new file mode 100644
index 000000000..8bdf42c41
--- /dev/null
+++ b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.xml
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<domain_entry project="project1" scope="public" domain="domain1.example.org" availability_zone="None"/> \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json
new file mode 100644
index 000000000..84ee3930a
--- /dev/null
+++ b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json
@@ -0,0 +1,9 @@
+{
+ "dns_entry": {
+ "domain": "domain1.example.org",
+ "id": null,
+ "ip": "192.168.1.1",
+ "name": "instance1",
+ "type": null
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.xml b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.xml
new file mode 100644
index 000000000..441b977e6
--- /dev/null
+++ b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.xml
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<dns_entry ip="192.168.1.1" domain="domain1.example.org" type="None" id="None" name="instance1"/> \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json
new file mode 100644
index 000000000..3263de43a
--- /dev/null
+++ b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json
@@ -0,0 +1,11 @@
+{
+ "dns_entries": [
+ {
+ "domain": "domain1.example.org",
+ "id": null,
+ "ip": "192.168.1.1",
+ "name": "instance1",
+ "type": null
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.xml b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.xml
new file mode 100644
index 000000000..8d910e09b
--- /dev/null
+++ b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.xml
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<dns_entries>
+ <dns_entry ip="192.168.1.1" domain="domain1.example.org" type="None" id="None" name="instance1"/>
+</dns_entries> \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json
new file mode 100644
index 000000000..8882c23a3
--- /dev/null
+++ b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json
@@ -0,0 +1,10 @@
+{
+ "domain_entries": [
+ {
+ "availability_zone": null,
+ "domain": "domain1.example.org",
+ "project": "project1",
+ "scope": "public"
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.xml b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.xml
new file mode 100644
index 000000000..d874ef058
--- /dev/null
+++ b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.xml
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<domain_entries>
+ <domain_entry project="project1" scope="public" domain="domain1.example.org" availability_zone="None"/>
+</domain_entries> \ No newline at end of file
diff --git a/doc/api_samples/os-instance-actions/instance-action-get-resp.json b/doc/api_samples/os-instance-actions/instance-action-get-resp.json
new file mode 100644
index 000000000..d5a2ff96c
--- /dev/null
+++ b/doc/api_samples/os-instance-actions/instance-action-get-resp.json
@@ -0,0 +1,27 @@
+{
+ "instanceAction": {
+ "action": "reboot",
+ "events": [
+ {
+ "event": "schedule",
+ "finish_time": "2012-12-05 01:02:00.000000",
+ "result": "Success",
+ "start_time": "2012-12-05 01:00:02.000000",
+ "traceback": ""
+ },
+ {
+ "event": "compute_create",
+ "finish_time": "2012-12-05 01:04:00.000000",
+ "result": "Success",
+ "start_time": "2012-12-05 01:03:00.000000",
+ "traceback": ""
+ }
+ ],
+ "instance_uuid": "b48316c5-71e8-45e4-9884-6c78055b9b13",
+ "message": "",
+ "project_id": "147",
+ "request_id": "req-3293a3f1-b44c-4609-b8d2-d81b105636b8",
+ "start_time": "2012-12-05 00:00:00.000000",
+ "user_id": "789"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-instance-actions/instance-action-get-resp.xml b/doc/api_samples/os-instance-actions/instance-action-get-resp.xml
new file mode 100644
index 000000000..720cdd39a
--- /dev/null
+++ b/doc/api_samples/os-instance-actions/instance-action-get-resp.xml
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<instanceAction instance_uuid="b48316c5-71e8-45e4-9884-6c78055b9b13" user_id="789" start_time="2012-12-05 00:00:00.000000" request_id="req-3293a3f1-b44c-4609-b8d2-d81b105636b8" action="reboot" message="" project_id="147">
+ <events finish_time="2012-12-05 01:02:00.000000" start_time="2012-12-05 01:00:02.000000" traceback="" event="schedule" result="Success"/>
+ <events finish_time="2012-12-05 01:04:00.000000" start_time="2012-12-05 01:03:00.000000" traceback="" event="compute_create" result="Success"/>
+</instanceAction> \ No newline at end of file
diff --git a/doc/api_samples/os-instance-actions/instance-actions-list-resp.json b/doc/api_samples/os-instance-actions/instance-actions-list-resp.json
new file mode 100644
index 000000000..22d29d076
--- /dev/null
+++ b/doc/api_samples/os-instance-actions/instance-actions-list-resp.json
@@ -0,0 +1,22 @@
+{
+ "instanceActions": [
+ {
+ "action": "resize",
+ "instance_uuid": "b48316c5-71e8-45e4-9884-6c78055b9b13",
+ "message": "",
+ "project_id": "842",
+ "request_id": "req-25517360-b757-47d3-be45-0e8d2a01b36a",
+ "start_time": "2012-12-05 01:00:00.000000",
+ "user_id": "789"
+ },
+ {
+ "action": "reboot",
+ "instance_uuid": "b48316c5-71e8-45e4-9884-6c78055b9b13",
+ "message": "",
+ "project_id": "147",
+ "request_id": "req-3293a3f1-b44c-4609-b8d2-d81b105636b8",
+ "start_time": "2012-12-05 00:00:00.000000",
+ "user_id": "789"
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/os-instance-actions/instance-actions-list-resp.xml b/doc/api_samples/os-instance-actions/instance-actions-list-resp.xml
new file mode 100644
index 000000000..33896df91
--- /dev/null
+++ b/doc/api_samples/os-instance-actions/instance-actions-list-resp.xml
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<instanceActions>
+ <instanceAction instance_uuid="b48316c5-71e8-45e4-9884-6c78055b9b13" user_id="789" start_time="2012-12-05 01:00:00.000000" request_id="req-25517360-b757-47d3-be45-0e8d2a01b36a" action="resize" message="" project_id="842"/>
+ <instanceAction instance_uuid="b48316c5-71e8-45e4-9884-6c78055b9b13" user_id="789" start_time="2012-12-05 00:00:00.000000" request_id="req-3293a3f1-b44c-4609-b8d2-d81b105636b8" action="reboot" message="" project_id="147"/>
+</instanceActions> \ No newline at end of file
diff --git a/doc/api_samples/os-services/service-disable-put-req.json b/doc/api_samples/os-services/service-disable-put-req.json
new file mode 100644
index 000000000..d11afaed9
--- /dev/null
+++ b/doc/api_samples/os-services/service-disable-put-req.json
@@ -0,0 +1,4 @@
+{
+ "host": "host1",
+ "service": "nova-compute"
+} \ No newline at end of file
diff --git a/doc/api_samples/os-services/service-disable-put-resp.json b/doc/api_samples/os-services/service-disable-put-resp.json
new file mode 100644
index 000000000..2e461bc6d
--- /dev/null
+++ b/doc/api_samples/os-services/service-disable-put-resp.json
@@ -0,0 +1,5 @@
+{
+ "disabled": true,
+ "host": "host1",
+ "service": "nova-compute"
+} \ No newline at end of file
diff --git a/doc/api_samples/os-services/service-enable-put-req.json b/doc/api_samples/os-services/service-enable-put-req.json
new file mode 100644
index 000000000..d11afaed9
--- /dev/null
+++ b/doc/api_samples/os-services/service-enable-put-req.json
@@ -0,0 +1,4 @@
+{
+ "host": "host1",
+ "service": "nova-compute"
+} \ No newline at end of file
diff --git a/doc/api_samples/os-services/service-enable-put-resp.json b/doc/api_samples/os-services/service-enable-put-resp.json
new file mode 100644
index 000000000..88b9dc7f9
--- /dev/null
+++ b/doc/api_samples/os-services/service-enable-put-resp.json
@@ -0,0 +1,5 @@
+{
+ "disabled": false,
+ "host": "host1",
+ "service": "nova-compute"
+} \ No newline at end of file
diff --git a/doc/api_samples/os-services/services-list-get-resp.json b/doc/api_samples/os-services/services-list-get-resp.json
new file mode 100644
index 000000000..dcda1a46e
--- /dev/null
+++ b/doc/api_samples/os-services/services-list-get-resp.json
@@ -0,0 +1,36 @@
+{
+ "services": [
+ {
+ "binary": "nova-scheduler",
+ "host": "host1",
+ "state": "up",
+ "status": "disabled",
+ "updated_at": "2012-10-29T13:42:02.000000",
+ "zone": "internal"
+ },
+ {
+ "binary": "nova-compute",
+ "host": "host1",
+ "state": "up",
+ "status": "disabled",
+ "updated_at": "2012-10-29T13:42:05.000000",
+ "zone": "nova"
+ },
+ {
+ "binary": "nova-scheduler",
+ "host": "host2",
+ "state": "down",
+ "status": "enabled",
+ "updated_at": "2012-09-19T06:55:34.000000",
+ "zone": "internal"
+ },
+ {
+ "binary": "nova-compute",
+ "host": "host2",
+ "state": "down",
+ "status": "disabled",
+ "updated_at": "2012-09-18T08:03:38.000000",
+ "zone": "nova"
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/source/_ga/layout.html b/doc/source/_ga/layout.html
index f29e90968..add67fa7c 100644
--- a/doc/source/_ga/layout.html
+++ b/doc/source/_ga/layout.html
@@ -8,9 +8,9 @@ document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.
</script>
<script type="text/javascript">
try {
-var pageTracker = _gat._getTracker("UA-17511903-3");
-pageTracker._setDomainName("none");
-pageTracker._setAllowLinker(true);
+//Tracking docs.openstack.org/developer/nova only
+var pageTracker = _gat._getTracker("UA-17511903-1");
+pageTracker._setCookiePath("/developer/nova");
pageTracker._trackPageview();
} catch(err) {}</script>
{% endblock %}
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 0bdaeb08e..9af5f5494 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -64,12 +64,11 @@ copyright = u'2010-present, OpenStack, LLC'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
-from nova import version as nova_version
-#import nova.version
+from nova.version import version_info
# The full version, including alpha/beta/rc tags.
-release = nova_version.version_string()
+release = version_info.release_string()
# The short X.Y version.
-version = nova_version.canonical_version_string()
+version = version_info.version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
diff --git a/doc/source/devref/filter_scheduler.rst b/doc/source/devref/filter_scheduler.rst
index 63ed95c82..31dcfde77 100644
--- a/doc/source/devref/filter_scheduler.rst
+++ b/doc/source/devref/filter_scheduler.rst
@@ -91,6 +91,8 @@ There are some standard filter classes to use (:mod:`nova.scheduler.filters`):
* |TypeAffinityFilter| - Only passes hosts that are not already running an
instance of the requested type.
* |AggregateTypeAffinityFilter| - limits instance_type by aggregate.
+* |GroupAntiAffinityFilter| - ensures that each instance in group is on a
+ different host.
Now we can focus on these standard filter classes in details. I will pass the
simplest ones, such as |AllHostsFilter|, |CoreFilter| and |RamFilter| are,
@@ -163,6 +165,10 @@ of the set of instances uses.
the network address of the current host is in the same sub network as it was
defined in the request.
+|GroupAntiAffinityFilter| its method `host_passes` returns `True` if host to
+place the instance on is not in a group of hosts. The group of hosts is
+maintained by a group name. The scheduler hint contains the group name.
+
|JsonFilter| - this filter provides the opportunity to write complicated
queries for the hosts capabilities filtering, based on simple JSON-like syntax.
There can be used the following operations for the host states properties:
@@ -336,6 +342,7 @@ in :mod:`nova.tests.scheduler`.
.. |JsonFilter| replace:: :class:`JsonFilter <nova.scheduler.filters.json_filter.JsonFilter>`
.. |RamFilter| replace:: :class:`RamFilter <nova.scheduler.filters.ram_filter.RamFilter>`
.. |SimpleCIDRAffinityFilter| replace:: :class:`SimpleCIDRAffinityFilter <nova.scheduler.filters.affinity_filter.SimpleCIDRAffinityFilter>`
+.. |GroupAntiAffinityFilter| replace:: :class:`GroupAntiAffinityFilter <nova.scheduler.filters.affinity_filter.GroupAntiAffinityFilter>`
.. |DifferentHostFilter| replace:: :class:`DifferentHostFilter <nova.scheduler.filters.affinity_filter.DifferentHostFilter>`
.. |SameHostFilter| replace:: :class:`SameHostFilter <nova.scheduler.filters.affinity_filter.SameHostFilter>`
.. |RetryFilter| replace:: :class:`RetryFilter <nova.scheduler.filters.retry_filter.RetryFilter>`
diff --git a/etc/nova/policy.json b/etc/nova/policy.json
index 923c4a528..2d3c4ed06 100644
--- a/etc/nova/policy.json
+++ b/etc/nova/policy.json
@@ -44,6 +44,7 @@
"compute_extension:extended_server_attributes": "rule:admin_api",
"compute_extension:extended_status": "",
"compute_extension:extended_availability_zone": "",
+ "compute_extension:extended_ips": "",
"compute_extension:fixed_ips": "rule:admin_api",
"compute_extension:flavor_access": "",
"compute_extension:flavor_disabled": "",
@@ -65,6 +66,8 @@
"compute_extension:hide_server_addresses": "is_admin:False",
"compute_extension:hosts": "rule:admin_api",
"compute_extension:hypervisors": "rule:admin_api",
+ "compute_extension:instance_actions": "",
+ "compute_extension:instance_actions:events": "rule:admin_api",
"compute_extension:instance_usage_audit_log": "rule:admin_api",
"compute_extension:keypairs": "",
"compute_extension:multinic": "",
diff --git a/etc/nova/rootwrap.d/compute.filters b/etc/nova/rootwrap.d/compute.filters
index 9562a23aa..6396315b9 100644
--- a/etc/nova/rootwrap.d/compute.filters
+++ b/etc/nova/rootwrap.d/compute.filters
@@ -88,6 +88,11 @@ dd: CommandFilter, /bin/dd, root
# nova/virt/xenapi/volume_utils.py: 'iscsiadm', '-m', ...
iscsiadm: CommandFilter, iscsiadm, root
+# nova/virt/libvirt/volume.py: 'aoe-revalidate', aoedev
+# nova/virt/libvirt/volume.py: 'aoe-discover'
+aoe-revalidate: CommandFilter, /usr/sbin/aoe-revalidate, root
+aoe-discover: CommandFilter, /usr/sbin/aoe-discover, root
+
# nova/virt/xenapi/vm_utils.py: parted, --script, ...
# nova/virt/xenapi/vm_utils.py: 'parted', '--script', dev_path, ..*.
parted: CommandFilter, parted, root
diff --git a/nova/api/ec2/ec2utils.py b/nova/api/ec2/ec2utils.py
index bc47b3e0d..bb0f7245a 100644
--- a/nova/api/ec2/ec2utils.py
+++ b/nova/api/ec2/ec2utils.py
@@ -115,10 +115,10 @@ def get_ip_info_for_instance(context, instance):
return get_ip_info_for_instance_from_nw_info(nw_info)
-def get_availability_zone_by_host(services, host):
+def get_availability_zone_by_host(services, host, conductor_api=None):
if len(services) > 0:
return availability_zones.get_host_availability_zone(
- context.get_admin_context(), host)
+ context.get_admin_context(), host, conductor_api)
return 'unknown zone'
diff --git a/nova/api/metadata/base.py b/nova/api/metadata/base.py
index 34d412268..fca580b29 100644
--- a/nova/api/metadata/base.py
+++ b/nova/api/metadata/base.py
@@ -26,8 +26,8 @@ import posixpath
from nova.api.ec2 import ec2utils
from nova.api.metadata import password
from nova import block_device
+from nova import conductor
from nova import context
-from nova import db
from nova import network
from nova.openstack.common import cfg
from nova.openstack.common import timeutils
@@ -83,7 +83,8 @@ class InvalidMetadataPath(Exception):
class InstanceMetadata():
"""Instance metadata."""
- def __init__(self, instance, address=None, content=[], extra_md=None):
+ def __init__(self, instance, address=None, content=[], extra_md=None,
+ conductor_api=None):
"""Creation of this object should basically cover all time consuming
collection. Methods after that should not cause time delays due to
network operations or lengthy cpu operations.
@@ -95,39 +96,32 @@ class InstanceMetadata():
self.instance = instance
self.extra_md = extra_md
+ if conductor_api:
+ self.conductor_api = conductor_api
+ else:
+ self.conductor_api = conductor.API()
+
ctxt = context.get_admin_context()
- services = db.service_get_all_by_host(ctxt.elevated(),
- instance['host'])
+ capi = self.conductor_api
+ services = capi.service_get_all_by_host(ctxt.elevated(),
+ instance['host'])
self.availability_zone = ec2utils.get_availability_zone_by_host(
- services, instance['host'])
+ services, instance['host'], capi)
self.ip_info = ec2utils.get_ip_info_for_instance(ctxt, instance)
- self.security_groups = db.security_group_get_by_instance(ctxt,
- instance['id'])
+ self.security_groups = capi.security_group_get_by_instance(ctxt,
+ instance)
- self.mappings = _format_instance_mapping(ctxt, instance)
+ self.mappings = _format_instance_mapping(capi, ctxt, instance)
if instance.get('user_data', None) is not None:
self.userdata_raw = base64.b64decode(instance['user_data'])
else:
self.userdata_raw = None
- self.ec2_ids = {}
-
- self.ec2_ids['instance-id'] = ec2utils.id_to_ec2_inst_id(
- instance['uuid'])
- self.ec2_ids['ami-id'] = ec2utils.glance_id_to_ec2_id(ctxt,
- instance['image_ref'])
-
- for image_type in ['kernel', 'ramdisk']:
- if self.instance.get('%s_id' % image_type):
- image_id = self.instance['%s_id' % image_type]
- ec2_image_type = ec2utils.image_type(image_type)
- ec2_id = ec2utils.glance_id_to_ec2_id(ctxt, image_id,
- ec2_image_type)
- self.ec2_ids['%s-id' % image_type] = ec2_id
+ self.ec2_ids = capi.get_ec2_ids(ctxt, instance)
self.address = address
@@ -145,7 +139,8 @@ class InstanceMetadata():
# get network info, and the rendered network template
ctxt = context.get_admin_context()
- network_info = network.API().get_instance_nw_info(ctxt, instance)
+ network_info = network.API().get_instance_nw_info(ctxt, instance,
+ conductor_api=capi)
self.network_config = None
cfg = netutils.get_injected_network_template(network_info)
@@ -404,23 +399,26 @@ class InstanceMetadata():
yield ('%s/%s/%s' % ("openstack", CONTENT_DIR, cid), content)
-def get_metadata_by_address(address):
+def get_metadata_by_address(conductor_api, address):
ctxt = context.get_admin_context()
fixed_ip = network.API().get_fixed_ip_by_address(ctxt, address)
- return get_metadata_by_instance_id(fixed_ip['instance_uuid'],
+ return get_metadata_by_instance_id(conductor_api,
+ fixed_ip['instance_uuid'],
address,
ctxt)
-def get_metadata_by_instance_id(instance_id, address, ctxt=None):
+def get_metadata_by_instance_id(conductor_api, instance_id, address,
+ ctxt=None):
ctxt = ctxt or context.get_admin_context()
- instance = db.instance_get_by_uuid(ctxt, instance_id)
+ instance = conductor_api.instance_get_by_uuid(ctxt, instance_id)
return InstanceMetadata(instance, address)
-def _format_instance_mapping(ctxt, instance):
- bdms = db.block_device_mapping_get_all_by_instance(ctxt, instance['uuid'])
+def _format_instance_mapping(conductor_api, ctxt, instance):
+ bdms = conductor_api.block_device_mapping_get_all_by_instance(
+ ctxt, instance)
return block_device.instance_block_mapping(instance, bdms)
diff --git a/nova/api/metadata/handler.py b/nova/api/metadata/handler.py
index 4a425f876..cedb27370 100644
--- a/nova/api/metadata/handler.py
+++ b/nova/api/metadata/handler.py
@@ -26,6 +26,7 @@ import webob.exc
from nova.api.metadata import base
from nova.common import memorycache
+from nova import conductor
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
@@ -58,6 +59,7 @@ class MetadataRequestHandler(wsgi.Application):
def __init__(self):
self._cache = memorycache.get_client()
+ self.conductor_api = conductor.API()
def get_metadata_by_remote_address(self, address):
if not address:
@@ -69,7 +71,7 @@ class MetadataRequestHandler(wsgi.Application):
return data
try:
- data = base.get_metadata_by_address(address)
+ data = base.get_metadata_by_address(self.conductor_api, address)
except exception.NotFound:
return None
@@ -84,7 +86,8 @@ class MetadataRequestHandler(wsgi.Application):
return data
try:
- data = base.get_metadata_by_instance_id(instance_id, address)
+ data = base.get_metadata_by_instance_id(self.conductor_api,
+ instance_id, address)
except exception.NotFound:
return None
diff --git a/nova/api/openstack/compute/contrib/admin_actions.py b/nova/api/openstack/compute/contrib/admin_actions.py
index 1c053ea59..dc3ee8fc4 100644
--- a/nova/api/openstack/compute/contrib/admin_actions.py
+++ b/nova/api/openstack/compute/contrib/admin_actions.py
@@ -282,6 +282,8 @@ class AdminActionsController(wsgi.Controller):
instance = self.compute_api.get(context, id)
self.compute_api.live_migrate(context, instance, block_migration,
disk_over_commit, host)
+ except exception.ComputeServiceUnavailable as ex:
+ raise exc.HTTPBadRequest(explanation=str(ex))
except Exception:
msg = _("Live migration of instance %(id)s to host %(host)s"
" failed") % locals()
diff --git a/nova/api/openstack/compute/contrib/console_output.py b/nova/api/openstack/compute/contrib/console_output.py
index 4f01bbaf4..60594cb53 100644
--- a/nova/api/openstack/compute/contrib/console_output.py
+++ b/nova/api/openstack/compute/contrib/console_output.py
@@ -65,6 +65,8 @@ class ConsoleOutputController(wsgi.Controller):
length)
except exception.NotFound:
raise webob.exc.HTTPNotFound(_('Unable to get console'))
+ except exception.InstanceNotReady as e:
+ raise webob.exc.HTTPConflict(explanation=unicode(e))
# XML output is not correctly escaped, so remove invalid characters
remove_re = re.compile('[\x00-\x08\x0B-\x0C\x0E-\x1F-\x0D]')
diff --git a/nova/api/openstack/compute/contrib/extended_availability_zone.py b/nova/api/openstack/compute/contrib/extended_availability_zone.py
index b7451cb6a..734ddf7c8 100644
--- a/nova/api/openstack/compute/contrib/extended_availability_zone.py
+++ b/nova/api/openstack/compute/contrib/extended_availability_zone.py
@@ -21,26 +21,35 @@ from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import availability_zones
+from nova.common import memorycache
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
+# NOTE(vish): azs don't change that often, so cache them for an hour to
+# avoid hitting the db multiple times on every request.
+AZ_CACHE_SECONDS = 60 * 60
authorize = extensions.soft_extension_authorizer('compute',
'extended_availability_zone')
class ExtendedAZController(wsgi.Controller):
+ def __init__(self):
+ self.mc = memorycache.get_client()
def _get_host_az(self, context, instance):
- admin_context = context.elevated()
- if instance['host']:
- return availability_zones.get_host_availability_zone(
- admin_context, instance['host'])
+ host = instance.get('host')
+ if not host:
+ return None
+ cache_key = "azcache-%s" % host
+ az = self.mc.get(cache_key)
+ if not az:
+ elevated = context.elevated()
+ az = availability_zones.get_host_availability_zone(elevated, host)
+ self.mc.set(cache_key, az, AZ_CACHE_SECONDS)
+ return az
def _extend_server(self, context, server, instance):
key = "%s:availability_zone" % Extended_availability_zone.alias
- server[key] = instance.get('availability_zone', None)
-
- key = "%s:host_availability_zone" % Extended_availability_zone.alias
server[key] = self._get_host_az(context, instance)
@wsgi.extends
@@ -81,10 +90,6 @@ class Extended_availability_zone(extensions.ExtensionDescriptor):
def make_server(elem):
elem.set('{%s}availability_zone' % Extended_availability_zone.namespace,
'%s:availability_zone' % Extended_availability_zone.alias)
- elem.set('{%s}host_availability_zone' %
- Extended_availability_zone.namespace,
- '%s:host_availability_zone' %
- Extended_availability_zone.alias)
class ExtendedAZTemplate(xmlutil.TemplateBuilder):
diff --git a/nova/api/openstack/compute/contrib/extended_ips.py b/nova/api/openstack/compute/contrib/extended_ips.py
new file mode 100644
index 000000000..0a3432a46
--- /dev/null
+++ b/nova/api/openstack/compute/contrib/extended_ips.py
@@ -0,0 +1,111 @@
+# Copyright 2013 Nebula, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""The Extended Ips API extension."""
+
+import itertools
+
+from nova.api.openstack import common
+from nova.api.openstack.compute import ips
+from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
+from nova.api.openstack import xmlutil
+from nova import compute
+from nova.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+authorize = extensions.soft_extension_authorizer('compute', 'extended_ips')
+
+
+class ExtendedIpsController(wsgi.Controller):
+ def __init__(self, *args, **kwargs):
+ super(ExtendedIpsController, self).__init__(*args, **kwargs)
+ self.compute_api = compute.API()
+
+ def _extend_server(self, context, server, instance):
+ key = "%s:type" % Extended_ips.alias
+ networks = common.get_networks_for_instance(context, instance)
+ for label, network in networks.items():
+ # NOTE(vish): ips are hidden in some states via the
+ # hide_server_addresses extension.
+ if label in server['addresses']:
+ all_ips = itertools.chain(network["ips"],
+ network["floating_ips"])
+ for i, ip in enumerate(all_ips):
+ server['addresses'][label][i][key] = ip['type']
+
+ @wsgi.extends
+ def show(self, req, resp_obj, id):
+ context = req.environ['nova.context']
+ if authorize(context):
+ # Attach our slave template to the response object
+ resp_obj.attach(xml=ExtendedIpsServerTemplate())
+ server = resp_obj.obj['server']
+ db_instance = req.get_db_instance(server['id'])
+ # server['id'] is guaranteed to be in the cache due to
+ # the core API adding it in its 'show' method.
+ self._extend_server(context, server, db_instance)
+
+ @wsgi.extends
+ def detail(self, req, resp_obj):
+ context = req.environ['nova.context']
+ if authorize(context):
+ # Attach our slave template to the response object
+ resp_obj.attach(xml=ExtendedIpsServersTemplate())
+ servers = list(resp_obj.obj['servers'])
+ for server in servers:
+ db_instance = req.get_db_instance(server['id'])
+ # server['id'] is guaranteed to be in the cache due to
+ # the core API adding it in its 'detail' method.
+ self._extend_server(context, server, db_instance)
+
+
+class Extended_ips(extensions.ExtensionDescriptor):
+ """Adds type parameter to the ip list."""
+
+ name = "ExtendedIps"
+ alias = "OS-EXT-IPS"
+ namespace = ("http://docs.openstack.org/compute/ext/"
+ "extended_ips/api/v1.1")
+ updated = "2013-01-06T00:00:00+00:00"
+
+ def get_controller_extensions(self):
+ controller = ExtendedIpsController()
+ extension = extensions.ControllerExtension(self, 'servers', controller)
+ return [extension]
+
+
+def make_server(elem):
+ elem.append(ips.AddressesTemplate())
+ ip = elem['addresses']['network']['ip']
+ ip.set('{%s}type' % Extended_ips.namespace,
+ '%s:type' % Extended_ips.alias)
+
+
+class ExtendedIpsServerTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('server', selector='server')
+ elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
+ make_server(root)
+ return xmlutil.SlaveTemplate(root, 1, nsmap={
+ Extended_ips.alias: Extended_ips.namespace})
+
+
+class ExtendedIpsServersTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('servers')
+ elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
+ make_server(elem)
+ return xmlutil.SlaveTemplate(root, 1, nsmap={
+ Extended_ips.alias: Extended_ips.namespace})
diff --git a/nova/api/openstack/compute/contrib/instance_actions.py b/nova/api/openstack/compute/contrib/instance_actions.py
new file mode 100644
index 000000000..4ab32ad4c
--- /dev/null
+++ b/nova/api/openstack/compute/contrib/instance_actions.py
@@ -0,0 +1,128 @@
+# Copyright 2013 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from webob import exc
+
+from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
+from nova.api.openstack import xmlutil
+from nova import compute
+from nova import db
+
+authorize_actions = extensions.extension_authorizer('compute',
+ 'instance_actions')
+authorize_events = extensions.soft_extension_authorizer('compute',
+ 'instance_actions:events')
+
+ACTION_KEYS = ['action', 'instance_uuid', 'request_id', 'user_id',
+ 'project_id', 'start_time', 'message']
+EVENT_KEYS = ['event', 'start_time', 'finish_time', 'result', 'traceback']
+
+
+def make_actions(elem):
+ for key in ACTION_KEYS:
+ elem.set(key)
+
+
+def make_action(elem):
+ for key in ACTION_KEYS:
+ elem.set(key)
+ event = xmlutil.TemplateElement('events', selector='events')
+ for key in EVENT_KEYS:
+ event.set(key)
+ elem.append(event)
+
+
+class InstanceActionsTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('instanceActions')
+ elem = xmlutil.SubTemplateElement(root, 'instanceAction',
+ selector='instanceActions')
+ make_actions(elem)
+ return xmlutil.MasterTemplate(root, 1)
+
+
+class InstanceActionTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('instanceAction',
+ selector='instanceAction')
+ make_action(root)
+ return xmlutil.MasterTemplate(root, 1)
+
+
+class InstanceActionsController(wsgi.Controller):
+
+ def __init__(self):
+ super(InstanceActionsController, self).__init__()
+ self.compute_api = compute.API()
+
+ def _format_action(self, action_raw):
+ action = {}
+ for key in ACTION_KEYS:
+ if key in action_raw:
+ action[key] = action_raw[key]
+ return action
+
+ def _format_event(self, event_raw):
+ event = {}
+ for key in EVENT_KEYS:
+ if key in event_raw:
+ event[key] = event_raw[key]
+ return event
+
+ @wsgi.serializers(xml=InstanceActionsTemplate)
+ def index(self, req, server_id):
+ """Returns the list of actions recorded for a given instance."""
+ context = req.environ["nova.context"]
+ instance = self.compute_api.get(context, server_id)
+ authorize_actions(context, target=instance)
+ actions_raw = db.actions_get(context, server_id)
+ actions = [self._format_action(action) for action in actions_raw]
+ return {'instanceActions': actions}
+
+ @wsgi.serializers(xml=InstanceActionTemplate)
+ def show(self, req, server_id, id):
+ """Return data about the given instance action."""
+ context = req.environ['nova.context']
+ instance = self.compute_api.get(context, server_id)
+ authorize_actions(context, target=instance)
+ action = db.action_get_by_request_id(context, server_id, id)
+ if action is None:
+ raise exc.HTTPNotFound()
+
+ action_id = action['id']
+ action = self._format_action(action)
+ if authorize_events(context):
+ events_raw = db.action_events_get(context, action_id)
+ action['events'] = [self._format_event(evt) for evt in events_raw]
+ return {'instanceAction': action}
+
+
+class Instance_actions(extensions.ExtensionDescriptor):
+ """View a log of actions and events taken on an instance."""
+
+ name = "InstanceActions"
+ alias = "os-instance-actions"
+ namespace = ("http://docs.openstack.org/compute/ext/"
+ "instance-actions/api/v1.1")
+ updated = "2013-02-08T00:00:00+00:00"
+
+ def get_resources(self):
+ ext = extensions.ResourceExtension('os-instance-actions',
+ InstanceActionsController(),
+ parent=dict(
+ member_name='server',
+ collection_name='servers'))
+ return [ext]
diff --git a/nova/api/openstack/compute/contrib/quotas.py b/nova/api/openstack/compute/contrib/quotas.py
index bdf82ea86..728c3fad6 100644
--- a/nova/api/openstack/compute/contrib/quotas.py
+++ b/nova/api/openstack/compute/contrib/quotas.py
@@ -23,10 +23,12 @@ from nova.api.openstack import xmlutil
from nova import db
from nova.db.sqlalchemy import api as sqlalchemy_api
from nova import exception
+from nova.openstack.common import log as logging
from nova import quota
QUOTAS = quota.QUOTAS
+LOG = logging.getLogger(__name__)
authorize_update = extensions.extension_authorizer('compute', 'quotas:update')
@@ -88,7 +90,14 @@ class QuotaSetsController(object):
project_id = id
for key in body['quota_set'].keys():
if key in QUOTAS:
- value = int(body['quota_set'][key])
+ try:
+ value = int(body['quota_set'][key])
+ except (ValueError, TypeError):
+ LOG.warn(_("Quota for %s should be integer.") % key)
+ # NOTE(hzzhoushaoyu): Do not prevent valid value to be
+ # updated. If raise BadRequest, some may be updated and
+ # others may be not.
+ continue
self._validate_quota_limit(value)
try:
db.quota_update(context, project_id, key, value)
diff --git a/nova/api/openstack/compute/contrib/security_groups.py b/nova/api/openstack/compute/contrib/security_groups.py
index a15c395ae..c49e7af70 100644
--- a/nova/api/openstack/compute/contrib/security_groups.py
+++ b/nova/api/openstack/compute/contrib/security_groups.py
@@ -30,6 +30,7 @@ from nova.compute import api as compute_api
from nova import db
from nova import exception
from nova.openstack.common import log as logging
+from nova.virt import netutils
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'security_groups')
@@ -332,6 +333,12 @@ class SecurityGroupRulesController(SecurityGroupControllerBase):
values['parent_group_id'] = security_group.id
+ if 'cidr' in values:
+ net, prefixlen = netutils.get_net_and_prefixlen(values['cidr'])
+ if net != '0.0.0.0' and prefixlen == '0':
+ msg = _("Bad prefix for network in cidr %s") % values['cidr']
+ raise exc.HTTPBadRequest(explanation=msg)
+
if self.security_group_api.rule_exists(security_group, values):
msg = _('This rule already exists in group %s') % parent_group_id
raise exc.HTTPBadRequest(explanation=msg)
diff --git a/nova/api/openstack/compute/contrib/server_start_stop.py b/nova/api/openstack/compute/contrib/server_start_stop.py
index 733972083..a13aabb05 100644
--- a/nova/api/openstack/compute/contrib/server_start_stop.py
+++ b/nova/api/openstack/compute/contrib/server_start_stop.py
@@ -44,7 +44,10 @@ class ServerStartStopActionController(wsgi.Controller):
context = req.environ['nova.context']
instance = self._get_instance(context, id)
LOG.debug(_('start instance'), instance=instance)
- self.compute_api.start(context, instance)
+ try:
+ self.compute_api.start(context, instance)
+ except exception.InstanceNotReady as e:
+ raise webob.exc.HTTPConflict(explanation=unicode(e))
return webob.Response(status_int=202)
@wsgi.action('os-stop')
@@ -53,7 +56,10 @@ class ServerStartStopActionController(wsgi.Controller):
context = req.environ['nova.context']
instance = self._get_instance(context, id)
LOG.debug(_('stop instance'), instance=instance)
- self.compute_api.stop(context, instance)
+ try:
+ self.compute_api.stop(context, instance)
+ except exception.InstanceNotReady as e:
+ raise webob.exc.HTTPConflict(explanation=unicode(e))
return webob.Response(status_int=202)
diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py
index 723997eba..c10c6e1b3 100644
--- a/nova/api/openstack/compute/servers.py
+++ b/nova/api/openstack/compute/servers.py
@@ -563,18 +563,11 @@ class Controller(wsgi.Controller):
return instance
def _check_string_length(self, value, name, max_length=None):
- if not isinstance(value, basestring):
- msg = _("%s is not a string or unicode") % name
- raise exc.HTTPBadRequest(explanation=msg)
-
- if not value.strip():
- msg = _("%s is an empty string") % name
- raise exc.HTTPBadRequest(explanation=msg)
-
- if max_length and len(value) > max_length:
- msg = _("%(name)s can be at most %(max_length)s "
- "characters.") % locals()
- raise exc.HTTPBadRequest(explanation=msg)
+ try:
+ utils.check_string_length(value, name, min_length=1,
+ max_length=max_length)
+ except exception.InvalidInput as e:
+ raise exc.HTTPBadRequest(explanation=str(e))
def _validate_server_name(self, value):
self._check_string_length(value, 'Server name', max_length=255)
@@ -1202,7 +1195,7 @@ class Controller(wsgi.Controller):
def _action_resize(self, req, id, body):
"""Resizes a given instance to the flavor size requested."""
try:
- flavor_ref = body["resize"]["flavorRef"]
+ flavor_ref = str(body["resize"]["flavorRef"])
if not flavor_ref:
msg = _("Resize request has invalid 'flavorRef' attribute.")
raise exc.HTTPBadRequest(explanation=msg)
diff --git a/nova/availability_zones.py b/nova/availability_zones.py
index 97faccc9f..8c9d4acf7 100644
--- a/nova/availability_zones.py
+++ b/nova/availability_zones.py
@@ -52,9 +52,13 @@ def set_availability_zones(context, services):
return services
-def get_host_availability_zone(context, host):
- metadata = db.aggregate_metadata_get_by_host(
- context, host, key='availability_zone')
+def get_host_availability_zone(context, host, conductor_api=None):
+ if conductor_api:
+ metadata = conductor_api.aggregate_metadata_get_by_host(
+ context, host, key='availability_zone')
+ else:
+ metadata = db.aggregate_metadata_get_by_host(
+ context, host, key='availability_zone')
if 'availability_zone' in metadata:
return list(metadata['availability_zone'])[0]
else:
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 43b87d6f2..5e160d2ef 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -131,6 +131,15 @@ def check_instance_state(vm_state=None, task_state=(None,)):
return outer
+def check_instance_host(function):
+ @functools.wraps(function)
+ def wrapped(self, context, instance, *args, **kwargs):
+ if not instance['host']:
+ raise exception.InstanceNotReady(instance_id=instance['uuid'])
+ return function(self, context, instance, *args, **kwargs)
+ return wrapped
+
+
def check_instance_lock(function):
@functools.wraps(function)
def inner(self, context, instance, *args, **kwargs):
@@ -1213,6 +1222,7 @@ class API(base.Base):
@wrap_check_policy
@check_instance_lock
+ @check_instance_host
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED,
vm_states.ERROR, vm_states.STOPPED],
task_state=[None])
@@ -1231,6 +1241,7 @@ class API(base.Base):
@wrap_check_policy
@check_instance_lock
+ @check_instance_host
@check_instance_state(vm_state=[vm_states.STOPPED])
def start(self, context, instance):
"""Start an instance."""
@@ -2135,11 +2146,9 @@ class API(base.Base):
file_contents=file_contents)
@wrap_check_policy
+ @check_instance_host
def get_vnc_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
- if not instance['host']:
- raise exception.InstanceNotReady(instance_id=instance['uuid'])
-
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
@@ -2149,20 +2158,17 @@ class API(base.Base):
return {'url': connect_info['access_url']}
+ @check_instance_host
def get_vnc_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
- if not instance['host']:
- raise exception.InstanceNotReady(instance_id=instance['uuid'])
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
+ @check_instance_host
def get_spice_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
- if not instance['host']:
- raise exception.InstanceNotReady(instance_id=instance['uuid'])
-
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
@@ -2172,15 +2178,15 @@ class API(base.Base):
return {'url': connect_info['access_url']}
+ @check_instance_host
def get_spice_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
- if not instance['host']:
- raise exception.InstanceNotReady(instance_id=instance['uuid'])
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
+ @check_instance_host
def get_console_output(self, context, instance, tail_length=None):
"""Get console output for an instance."""
return self.compute_rpcapi.get_console_output(context,
@@ -2238,7 +2244,7 @@ class API(base.Base):
context, device=device, instance=instance, volume_id=volume_id)
try:
volume = self.volume_api.get(context, volume_id)
- self.volume_api.check_attach(context, volume)
+ self.volume_api.check_attach(context, volume, instance=instance)
self.volume_api.reserve_volume(context, volume)
self.compute_rpcapi.attach_volume(context, instance=instance,
volume_id=volume_id, mountpoint=device)
@@ -2346,7 +2352,9 @@ class API(base.Base):
instance['uuid'])
def is_volume_backed_instance(self, context, instance, bdms):
- bdms = bdms or self.get_instance_bdms(context, instance)
+ if bdms is None:
+ bdms = self.get_instance_bdms(context, instance)
+
for bdm in bdms:
if (block_device.strip_dev(bdm.device_name) ==
block_device.strip_dev(instance['root_device_name'])):
@@ -2507,8 +2515,11 @@ class AggregateAPI(base.Base):
"""Creates the model for the aggregate."""
values = {"name": aggregate_name}
+ metadata = None
+ if availability_zone:
+ metadata = {'availability_zone': availability_zone}
aggregate = self.db.aggregate_create(context, values,
- metadata={'availability_zone': availability_zone})
+ metadata=metadata)
aggregate = self._get_aggregate_info(context, aggregate)
# To maintain the same API result as before.
del aggregate['hosts']
diff --git a/nova/compute/cells_api.py b/nova/compute/cells_api.py
index 50449df04..1e30331bc 100644
--- a/nova/compute/cells_api.py
+++ b/nova/compute/cells_api.py
@@ -524,7 +524,7 @@ class ComputeCellsAPI(compute_api.API):
context, device=device, instance=instance, volume_id=volume_id)
try:
volume = self.volume_api.get(context, volume_id)
- self.volume_api.check_attach(context, volume)
+ self.volume_api.check_attach(context, volume, instance=instance)
except Exception:
with excutils.save_and_reraise_exception():
self.db.block_device_mapping_destroy_by_instance_and_device(
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py
index 045a24d4d..4cc5d5d4e 100644
--- a/nova/compute/instance_types.py
+++ b/nova/compute/instance_types.py
@@ -86,6 +86,9 @@ def create(name, memory, vcpus, root_gb, ephemeral_gb=None, flavorid=None,
'rxtx_factor': rxtx_factor,
}
+ # ensure name do not exceed 255 characters
+ utils.check_string_length(name, 'name', min_length=1, max_length=255)
+
# ensure name does not contain any special characters
invalid_name = INVALID_NAME_REGEX.search(name)
if invalid_name:
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 504d16da2..26df2ac9b 100755
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -645,7 +645,8 @@ class ComputeManager(manager.SchedulerDependentManager):
if bdm['volume_id'] is not None:
volume = self.volume_api.get(context, bdm['volume_id'])
- self.volume_api.check_attach(context, volume)
+ self.volume_api.check_attach(context, volume,
+ instance=instance)
cinfo = self._attach_volume_boot(context,
instance,
volume,
@@ -1840,8 +1841,9 @@ class ComputeManager(manager.SchedulerDependentManager):
self.network_api.setup_networks_on_host(context, instance,
teardown=True)
- self.network_api.migrate_instance_start(context, instance,
- migration)
+ self.conductor_api.network_migrate_instance_start(context,
+ instance,
+ migration)
network_info = self._get_instance_nw_info(context, instance)
block_device_info = self._get_instance_volume_block_device_info(
@@ -1920,8 +1922,9 @@ class ComputeManager(manager.SchedulerDependentManager):
instance['uuid'], launched_at=timeutils.utcnow(),
expected_task_state=task_states.RESIZE_REVERTING)
- self.network_api.migrate_instance_finish(context, instance,
- migration)
+ self.conductor_api.network_migrate_instance_finish(context,
+ instance,
+ migration)
instance = self._instance_update(context, instance['uuid'],
vm_state=vm_states.ACTIVE, task_state=None)
@@ -1934,15 +1937,13 @@ class ComputeManager(manager.SchedulerDependentManager):
self._quota_commit(context, reservations)
- @staticmethod
- def _quota_commit(context, reservations):
+ def _quota_commit(self, context, reservations):
if reservations:
- QUOTAS.commit(context, reservations)
+ self.conductor_api.quota_commit(context, reservations)
- @staticmethod
- def _quota_rollback(context, reservations):
+ def _quota_rollback(self, context, reservations):
if reservations:
- QUOTAS.rollback(context, reservations)
+ self.conductor_api.quota_rollback(context, reservations)
def _prep_resize(self, context, image, instance, instance_type,
reservations, request_spec, filter_properties, node):
@@ -2096,8 +2097,9 @@ class ComputeManager(manager.SchedulerDependentManager):
self._terminate_volume_connections(context, instance)
- self.network_api.migrate_instance_start(context, instance,
- migration)
+ self.conductor_api.network_migrate_instance_start(context,
+ instance,
+ migration)
migration = self.conductor_api.migration_update(context,
migration, 'post-migrating')
@@ -2156,8 +2158,9 @@ class ComputeManager(manager.SchedulerDependentManager):
self.network_api.setup_networks_on_host(context, instance,
migration['dest_compute'])
- self.network_api.migrate_instance_finish(context, instance,
- migration)
+ self.conductor_api.network_migrate_instance_finish(context,
+ instance,
+ migration)
network_info = self._get_instance_nw_info(context, instance)
@@ -2727,9 +2730,12 @@ class ComputeManager(manager.SchedulerDependentManager):
Returns a dict values required for live migration without shared
storage.
"""
+ capi = self.conductor_api
+ bdms = capi.block_device_mapping_get_all_by_instance(ctxt, instance)
+
is_volume_backed = self.compute_api.is_volume_backed_instance(ctxt,
instance,
- None)
+ bdms)
dest_check_data['is_volume_backed'] = is_volume_backed
return self.driver.check_can_live_migrate_source(ctxt, instance,
dest_check_data)
@@ -2864,7 +2870,9 @@ class ComputeManager(manager.SchedulerDependentManager):
migration = {'source_compute': self.host,
'dest_compute': dest, }
- self.network_api.migrate_instance_start(ctxt, instance_ref, migration)
+ self.conductor_api.network_migrate_instance_start(ctxt,
+ instance_ref,
+ migration)
# Define domain at destination host, without doing it,
# pause/suspend/terminate do not work.
@@ -2919,7 +2927,9 @@ class ComputeManager(manager.SchedulerDependentManager):
self.host)
migration = {'source_compute': instance['host'],
'dest_compute': self.host, }
- self.network_api.migrate_instance_finish(context, instance, migration)
+ self.conductor_api.network_migrate_instance_finish(context,
+ instance,
+ migration)
network_info = self._get_instance_nw_info(context, instance)
block_device_info = self._get_instance_volume_block_device_info(
@@ -3430,7 +3440,7 @@ class ComputeManager(manager.SchedulerDependentManager):
# Note(maoy): here we call the API instead of
# brutally updating the vm_state in the database
# to allow all the hooks and checks to be performed.
- self.compute_api.stop(context, db_instance)
+ self.conductor_api.compute_stop(context, db_instance)
except Exception:
# Note(maoy): there is no need to propagate the error
# because the same power_state will be retrieved next
@@ -3443,7 +3453,7 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.warn(_("Instance is suspended unexpectedly. Calling "
"the stop API."), instance=db_instance)
try:
- self.compute_api.stop(context, db_instance)
+ self.conductor_api.compute_stop(context, db_instance)
except Exception:
LOG.exception(_("error during stop() in "
"sync_power_state."),
@@ -3473,7 +3483,7 @@ class ComputeManager(manager.SchedulerDependentManager):
try:
# Note(maoy): this assumes that the stop API is
# idempotent.
- self.compute_api.stop(context, db_instance)
+ self.conductor_api.compute_stop(context, db_instance)
except Exception:
LOG.exception(_("error during stop() in "
"sync_power_state."),
diff --git a/nova/conductor/api.py b/nova/conductor/api.py
index 3b193fff8..9026eb8a2 100644
--- a/nova/conductor/api.py
+++ b/nova/conductor/api.py
@@ -154,6 +154,12 @@ class LocalAPI(object):
aggregate,
key)
+ def aggregate_metadata_get_by_host(self, context, host,
+ key='availability_zone'):
+ return self._manager.aggregate_metadata_get_by_host(context,
+ host,
+ key)
+
def bw_usage_get(self, context, uuid, start_period, mac):
return self._manager.bw_usage_update(context, uuid, mac, start_period)
@@ -303,6 +309,28 @@ class LocalAPI(object):
return self._manager.security_groups_trigger_members_refresh(context,
group_ids)
+ def network_migrate_instance_start(self, context, instance, migration):
+ return self._manager.network_migrate_instance_start(context,
+ instance,
+ migration)
+
+ def network_migrate_instance_finish(self, context, instance, migration):
+ return self._manager.network_migrate_instance_finish(context,
+ instance,
+ migration)
+
+ def quota_commit(self, context, reservations):
+ return self._manager.quota_commit(context, reservations)
+
+ def quota_rollback(self, context, reservations):
+ return self._manager.quota_rollback(context, reservations)
+
+ def get_ec2_ids(self, context, instance):
+ return self._manager.get_ec2_ids(context, instance)
+
+ def compute_stop(self, context, instance, do_cast=True):
+ return self._manager.compute_stop(context, instance, do_cast)
+
class API(object):
"""Conductor API that does updates via RPC to the ConductorManager."""
@@ -447,6 +475,12 @@ class API(object):
aggregate,
key)
+ def aggregate_metadata_get_by_host(self, context, host,
+ key='availability_zone'):
+ return self.conductor_rpcapi.aggregate_metadata_get_by_host(context,
+ host,
+ key)
+
def bw_usage_get(self, context, uuid, start_period, mac):
return self.conductor_rpcapi.bw_usage_update(context, uuid, mac,
start_period)
@@ -604,3 +638,25 @@ class API(object):
def security_groups_trigger_members_refresh(self, context, group_ids):
return self.conductor_rpcapi.security_groups_trigger_members_refresh(
context, group_ids)
+
+ def network_migrate_instance_start(self, context, instance, migration):
+ return self.conductor_rpcapi.network_migrate_instance_start(context,
+ instance,
+ migration)
+
+ def network_migrate_instance_finish(self, context, instance, migration):
+ return self.conductor_rpcapi.network_migrate_instance_finish(context,
+ instance,
+ migration)
+
+ def quota_commit(self, context, reservations):
+ return self.conductor_rpcapi.quota_commit(context, reservations)
+
+ def quota_rollback(self, context, reservations):
+ return self.conductor_rpcapi.quota_rollback(context, reservations)
+
+ def get_ec2_ids(self, context, instance):
+ return self.conductor_rpcapi.get_ec2_ids(context, instance)
+
+ def compute_stop(self, context, instance, do_cast=True):
+ return self.conductor_rpcapi.compute_stop(context, instance, do_cast)
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index 0d2031a4a..a986b0415 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -14,16 +14,18 @@
"""Handles database requests from other nova services."""
+from nova.api.ec2 import ec2utils
from nova.compute import api as compute_api
from nova.compute import utils as compute_utils
from nova import exception
from nova import manager
+from nova import network
from nova import notifications
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
-
+from nova import quota
LOG = logging.getLogger(__name__)
@@ -46,12 +48,30 @@ datetime_fields = ['launched_at', 'terminated_at']
class ConductorManager(manager.SchedulerDependentManager):
"""Mission: TBD."""
- RPC_API_VERSION = '1.40'
+ RPC_API_VERSION = '1.43'
def __init__(self, *args, **kwargs):
super(ConductorManager, self).__init__(service_name='conductor',
*args, **kwargs)
self.security_group_api = compute_api.SecurityGroupAPI()
+ self._network_api = None
+ self._compute_api = None
+ self.quotas = quota.QUOTAS
+
+ @property
+ def network_api(self):
+ # NOTE(danms): We need to instantiate our network_api on first use
+ # to avoid the circular dependency that exists between our init
+ # and network_api's
+ if self._network_api is None:
+ self._network_api = network.API()
+ return self._network_api
+
+ @property
+ def compute_api(self):
+ if self._compute_api is None:
+ self._compute_api = compute_api.API()
+ return self._compute_api
def ping(self, context, arg):
return jsonutils.to_primitive({'service': 'conductor', 'arg': arg})
@@ -163,6 +183,11 @@ class ConductorManager(manager.SchedulerDependentManager):
self.db.aggregate_metadata_delete(context.elevated(),
aggregate['id'], key)
+ def aggregate_metadata_get_by_host(self, context, host,
+ key='availability_zone'):
+ result = self.db.aggregate_metadata_get_by_host(context, host, key)
+ return jsonutils.to_primitive(result)
+
def bw_usage_update(self, context, uuid, mac, start_period,
bw_in=None, bw_out=None,
last_ctr_in=None, last_ctr_out=None,
@@ -360,3 +385,34 @@ class ConductorManager(manager.SchedulerDependentManager):
def security_groups_trigger_members_refresh(self, context, group_ids):
self.security_group_api.trigger_members_refresh(context, group_ids)
+
+ def network_migrate_instance_start(self, context, instance, migration):
+ self.network_api.migrate_instance_start(context, instance, migration)
+
+ def network_migrate_instance_finish(self, context, instance, migration):
+ self.network_api.migrate_instance_finish(context, instance, migration)
+
+ def quota_commit(self, context, reservations):
+ quota.QUOTAS.commit(context, reservations)
+
+ def quota_rollback(self, context, reservations):
+ quota.QUOTAS.rollback(context, reservations)
+
+ def get_ec2_ids(self, context, instance):
+ ec2_ids = {}
+
+ ec2_ids['instance-id'] = ec2utils.id_to_ec2_inst_id(instance['uuid'])
+ ec2_ids['ami-id'] = ec2utils.glance_id_to_ec2_id(context,
+ instance['image_ref'])
+ for image_type in ['kernel', 'ramdisk']:
+ if '%s_id' % image_type in instance:
+ image_id = instance['%s_id' % image_type]
+ ec2_image_type = ec2utils.image_type(image_type)
+ ec2_id = ec2utils.glance_id_to_ec2_id(context, image_id,
+ ec2_image_type)
+ ec2_ids['%s-id' % image_type] = ec2_id
+
+ return ec2_ids
+
+ def compute_stop(self, context, instance, do_cast=True):
+ self.compute_api.stop(context, instance, do_cast)
diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py
index f5ce0b4cb..b82f2b8e1 100644
--- a/nova/conductor/rpcapi.py
+++ b/nova/conductor/rpcapi.py
@@ -75,6 +75,11 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
1.40 - Added security_groups_trigger_handler and
security_groups_trigger_members_refresh
Remove instance_get_active_by_window
+ 1.41 - Added fixed_ip_get_by_instance, network_get,
+ instance_floating_address_get_all, quota_commit,
+ quota_rollback
+ 1.42 - Added get_ec2_ids, aggregate_metadata_get_by_host
+ 1.43 - Added compute_stop
"""
BASE_RPC_API_VERSION = '1.0'
@@ -173,6 +178,11 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
key=key)
return self.call(context, msg, version='1.7')
+ def aggregate_metadata_get_by_host(self, context, host, key):
+ msg = self.make_msg('aggregate_metadata_get_by_host', host=host,
+ key=key)
+ return self.call(context, msg, version='1.42')
+
def bw_usage_update(self, context, uuid, mac, start_period,
bw_in=None, bw_out=None,
last_ctr_in=None, last_ctr_out=None,
@@ -382,3 +392,38 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
msg = self.make_msg('security_groups_trigger_members_refresh',
group_ids=group_ids)
return self.call(context, msg, version='1.40')
+
+ def network_migrate_instance_start(self, context, instance, migration):
+ instance_p = jsonutils.to_primitive(instance)
+ migration_p = jsonutils.to_primitive(migration)
+ msg = self.make_msg('network_migrate_instance_start',
+ instance=instance_p, migration=migration_p)
+ return self.call(context, msg, version='1.41')
+
+ def network_migrate_instance_finish(self, context, instance, migration):
+ instance_p = jsonutils.to_primitive(instance)
+ migration_p = jsonutils.to_primitive(migration)
+ msg = self.make_msg('network_migrate_instance_finish',
+ instance=instance_p, migration=migration_p)
+ return self.call(context, msg, version='1.41')
+
+ def quota_commit(self, context, reservations):
+ reservations_p = jsonutils.to_primitive(reservations)
+ msg = self.make_msg('quota_commit', reservations=reservations_p)
+ return self.call(context, msg, version='1.41')
+
+ def quota_rollback(self, context, reservations):
+ reservations_p = jsonutils.to_primitive(reservations)
+ msg = self.make_msg('quota_rollback', reservations=reservations_p)
+ return self.call(context, msg, version='1.41')
+
+ def get_ec2_ids(self, context, instance):
+ instance_p = jsonutils.to_primitive(instance)
+ msg = self.make_msg('get_ec2_ids', instance=instance_p)
+ return self.call(context, msg, version='1.42')
+
+ def compute_stop(self, context, instance, do_cast=True):
+ instance_p = jsonutils.to_primitive(instance)
+ msg = self.make_msg('compute_stop', instance=instance_p,
+ do_cast=do_cast)
+ return self.call(context, msg, version='1.43')
diff --git a/nova/context.py b/nova/context.py
index 8731e012d..60fd5b4c0 100644
--- a/nova/context.py
+++ b/nova/context.py
@@ -65,9 +65,6 @@ class RequestContext(object):
self.user_id = user_id
self.project_id = project_id
self.roles = roles or []
- self.is_admin = is_admin
- if self.is_admin is None:
- self.is_admin = policy.check_is_admin(self.roles)
self.read_deleted = read_deleted
self.remote_address = remote_address
if not timestamp:
@@ -90,7 +87,9 @@ class RequestContext(object):
self.quota_class = quota_class
self.user_name = user_name
self.project_name = project_name
-
+ self.is_admin = is_admin
+ if self.is_admin is None:
+ self.is_admin = policy.check_is_admin(self)
if overwrite or not hasattr(local.store, 'context'):
self.update_store()
diff --git a/nova/crypto.py b/nova/crypto.py
index 5c48c60b6..96e545893 100644
--- a/nova/crypto.py
+++ b/nova/crypto.py
@@ -24,9 +24,15 @@ Includes root and intermediate CAs, SSH key_pairs and x509 certificates.
from __future__ import absolute_import
+import base64
import hashlib
import os
+import re
import string
+import struct
+
+from pyasn1.codec.der import encoder as der_encoder
+from pyasn1.type import univ
from nova import context
from nova import db
@@ -181,23 +187,75 @@ def decrypt_text(project_id, text):
raise exception.DecryptionFailure(reason=exc.stderr)
+_RSA_OID = univ.ObjectIdentifier('1.2.840.113549.1.1.1')
+
+
+def _to_sequence(*vals):
+ seq = univ.Sequence()
+ for i in range(len(vals)):
+ seq.setComponentByPosition(i, vals[i])
+ return seq
+
+
+def convert_from_sshrsa_to_pkcs8(pubkey):
+ """Convert a ssh public key to openssl format
+ Equivalent to the ssh-keygen's -m option
+ """
+ # get the second field from the public key file.
+ try:
+ keydata = base64.b64decode(pubkey.split(None)[1])
+ except IndexError:
+ msg = _("Unable to find the key")
+ raise exception.EncryptionFailure(reason=msg)
+
+ # decode the parts of the key
+ parts = []
+ while keydata:
+ dlen = struct.unpack('>I', keydata[:4])[0]
+ data = keydata[4:dlen + 4]
+ keydata = keydata[4 + dlen:]
+ parts.append(data)
+
+ # Use asn to build the openssl key structure
+ #
+ # SEQUENCE(2 elem)
+ # +- SEQUENCE(2 elem)
+ # | +- OBJECT IDENTIFIER (1.2.840.113549.1.1.1)
+ # | +- NULL
+ # +- BIT STRING(1 elem)
+ # +- SEQUENCE(2 elem)
+ # +- INTEGER(2048 bit)
+ # +- INTEGER 65537
+
+ # Build the sequence for the bit string
+ n_val = eval(
+ '0x' + ''.join(['%02X' % struct.unpack('B', x)[0] for x in parts[2]]))
+ e_val = eval(
+ '0x' + ''.join(['%02X' % struct.unpack('B', x)[0] for x in parts[1]]))
+ pkinfo = _to_sequence(univ.Integer(n_val), univ.Integer(e_val))
+
+ # Convert the sequence into a bit string
+ pklong = long(der_encoder.encode(pkinfo).encode('hex'), 16)
+ pkbitstring = univ.BitString("'00%s'B" % bin(pklong)[2:])
+
+ # Build the key data structure
+ oid = _to_sequence(_RSA_OID, univ.Null())
+ pkcs1_seq = _to_sequence(oid, pkbitstring)
+ pkcs8 = base64.encodestring(der_encoder.encode(pkcs1_seq))
+
+ # Remove the embedded new line and format the key, each line
+ # should be 64 characters long
+ return ('-----BEGIN PUBLIC KEY-----\n%s\n-----END PUBLIC KEY-----\n' %
+ re.sub("(.{64})", "\\1\n", pkcs8.replace('\n', ''), re.DOTALL))
+
+
def ssh_encrypt_text(ssh_public_key, text):
"""Encrypt text with an ssh public key.
-
- Requires recent ssh-keygen binary in addition to openssl binary.
"""
with utils.tempdir() as tmpdir:
- sshkey = os.path.abspath(os.path.join(tmpdir, 'ssh.key'))
- with open(sshkey, 'w') as f:
- f.write(ssh_public_key)
sslkey = os.path.abspath(os.path.join(tmpdir, 'ssl.key'))
try:
- # NOTE(vish): -P is to skip prompt on bad keys
- out, _err = utils.execute('ssh-keygen',
- '-P', '',
- '-e',
- '-f', sshkey,
- '-m', 'PKCS8')
+ out = convert_from_sshrsa_to_pkcs8(ssh_public_key)
with open(sslkey, 'w') as f:
f.write(out)
enc, _err = utils.execute('openssl',
diff --git a/nova/db/api.py b/nova/db/api.py
index ffd153a46..6ec0b3a95 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -1630,9 +1630,9 @@ def actions_get(context, uuid):
return IMPL.actions_get(context, uuid)
-def action_get_by_id(context, uuid, action_id):
- """Get the action by id and given instance."""
- return IMPL.action_get_by_id(context, uuid, action_id)
+def action_get_by_request_id(context, uuid, request_id):
+ """Get the action by request_id and given instance."""
+ return IMPL.action_get_by_request_id(context, uuid, request_id)
def action_event_start(context, values):
@@ -1646,6 +1646,7 @@ def action_event_finish(context, values):
def action_events_get(context, action_id):
+ """Get the events by action id."""
return IMPL.action_events_get(context, action_id)
@@ -1714,3 +1715,25 @@ def task_log_get(context, task_name, period_beginning,
period_ending, host, state=None):
return IMPL.task_log_get(context, task_name, period_beginning,
period_ending, host, state)
+
+
+####################
+
+
+def archive_deleted_rows(context, max_rows=None):
+ """Move up to max_rows rows from production tables to corresponding shadow
+ tables.
+
+ :returns: number of rows archived.
+ """
+ return IMPL.archive_deleted_rows(context, max_rows=max_rows)
+
+
+def archive_deleted_rows_for_table(context, tablename, max_rows=None):
+ """Move up to max_rows rows from tablename to corresponding shadow
+ table.
+
+ :returns: number of rows archived.
+ """
+ return IMPL.archive_deleted_rows_for_table(context, tablename,
+ max_rows=max_rows)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index d0a58e44f..eb9181fce 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -26,13 +26,20 @@ import functools
import uuid
from sqlalchemy import and_
+from sqlalchemy import Boolean
from sqlalchemy.exc import IntegrityError
+from sqlalchemy.exc import NoSuchTableError
+from sqlalchemy import Integer
+from sqlalchemy import MetaData
from sqlalchemy import or_
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import joinedload_all
+from sqlalchemy.schema import Table
from sqlalchemy.sql.expression import asc
from sqlalchemy.sql.expression import desc
+from sqlalchemy.sql.expression import select
from sqlalchemy.sql import func
+from sqlalchemy import String
from nova import block_device
from nova.compute import task_states
@@ -63,6 +70,7 @@ CONF.import_opt('sql_connection',
LOG = logging.getLogger(__name__)
+get_engine = db_session.get_engine
get_session = db_session.get_session
@@ -1536,8 +1544,8 @@ def _build_instance_get(context, session=None):
@require_context
def instance_get_all(context, columns_to_join=None):
if columns_to_join is None:
- columns_to_join = ['info_cache', 'security_groups',
- 'metadata', 'instance_type']
+ columns_to_join = ['info_cache', 'security_groups', 'metadata',
+ 'instance_type', 'system_metadata']
query = model_query(context, models.Instance)
for column in columns_to_join:
query = query.options(joinedload(column))
@@ -1656,7 +1664,7 @@ def regex_filter(query, model, filters):
return query
-@require_admin_context
+@require_context
def instance_get_active_by_window_joined(context, begin, end=None,
project_id=None, host=None):
"""Return instances and joins that were active during window."""
@@ -1764,6 +1772,7 @@ def instance_get_all_hung_in_rebooting(context, reboot_window):
datetime.timedelta(seconds=reboot_window))
return model_query(context, models.Instance).\
+ options(joinedload('system_metadata')).\
filter(models.Instance.updated_at <= reboot_window).\
filter_by(task_state=task_states.REBOOTING).all()
@@ -4586,13 +4595,9 @@ def actions_get(context, instance_uuid):
return actions
-def action_get_by_id(context, instance_uuid, action_id):
- """Get the action by id and given instance."""
- action = model_query(context, models.InstanceAction).\
- filter_by(instance_uuid=instance_uuid).\
- filter_by(id=action_id).\
- first()
-
+def action_get_by_request_id(context, instance_uuid, request_id):
+ """Get the action by request_id and given instance."""
+ action = _action_get_by_request_id(context, instance_uuid, request_id)
return action
@@ -4789,3 +4794,94 @@ def task_log_end_task(context, task_name, period_beginning, period_ending,
if rows == 0:
#It's not running!
raise exception.TaskNotRunning(task_name=task_name, host=host)
+
+
+def _get_default_deleted_value(table):
+ # TODO(dripton): It would be better to introspect the actual default value
+ # from the column, but I don't see a way to do that in the low-level APIs
+ # of SQLAlchemy 0.7. 0.8 has better introspection APIs, which we should
+ # use when Nova is ready to require 0.8.
+ deleted_column_type = table.c.deleted.type
+ if isinstance(deleted_column_type, Integer):
+ return 0
+ elif isinstance(deleted_column_type, Boolean):
+ return False
+ elif isinstance(deleted_column_type, String):
+ return ""
+ else:
+ return None
+
+
+@require_admin_context
+def archive_deleted_rows_for_table(context, tablename, max_rows=None):
+ """Move up to max_rows rows from one tables to the corresponding
+ shadow table.
+
+ :returns: number of rows archived
+ """
+ # The context argument is only used for the decorator.
+ if max_rows is None:
+ max_rows = 5000
+ engine = get_engine()
+ conn = engine.connect()
+ metadata = MetaData()
+ metadata.bind = engine
+ table = Table(tablename, metadata, autoload=True)
+ default_deleted_value = _get_default_deleted_value(table)
+ shadow_tablename = "shadow_" + tablename
+ rows_archived = 0
+ try:
+ shadow_table = Table(shadow_tablename, metadata, autoload=True)
+ except NoSuchTableError:
+ # No corresponding shadow table; skip it.
+ return rows_archived
+ # Group the insert and delete in a transaction.
+ with conn.begin():
+ # TODO(dripton): It would be more efficient to insert(select) and then
+ # delete(same select) without ever returning the selected rows back to
+ # Python. sqlalchemy does not support that directly, but we have
+ # nova.db.sqlalchemy.utils.InsertFromSelect for the insert side. We
+ # need a corresponding function for the delete side.
+ try:
+ column = table.c.id
+ column_name = "id"
+ except AttributeError:
+ # We have one table (dns_domains) where the key is called
+ # "domain" rather than "id"
+ column = table.c.domain
+ column_name = "domain"
+ query = select([table],
+ table.c.deleted != default_deleted_value).\
+ order_by(column).limit(max_rows)
+ rows = conn.execute(query).fetchall()
+ if rows:
+ insert_statement = shadow_table.insert()
+ conn.execute(insert_statement, rows)
+ keys = [getattr(row, column_name) for row in rows]
+ delete_statement = table.delete(column.in_(keys))
+ result = conn.execute(delete_statement)
+ rows_archived = result.rowcount
+ return rows_archived
+
+
+@require_admin_context
+def archive_deleted_rows(context, max_rows=None):
+ """Move up to max_rows rows from production tables to the corresponding
+ shadow tables.
+
+ :returns: Number of rows archived.
+ """
+ # The context argument is only used for the decorator.
+ if max_rows is None:
+ max_rows = 5000
+ tablenames = []
+ for model_class in models.__dict__.itervalues():
+ if hasattr(model_class, "__tablename__"):
+ tablenames.append(model_class.__tablename__)
+ rows_archived = 0
+ for tablename in tablenames:
+ rows_archived += archive_deleted_rows_for_table(context, tablename,
+ max_rows=max_rows - rows_archived)
+ if rows_archived >= max_rows:
+ break
+ return rows_archived
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/154_add_shadow_tables.py b/nova/db/sqlalchemy/migrate_repo/versions/154_add_shadow_tables.py
new file mode 100644
index 000000000..7c9f69c2b
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/154_add_shadow_tables.py
@@ -0,0 +1,77 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 Red Hat, Inc.
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import BigInteger, Column, MetaData, Table
+from sqlalchemy.types import NullType
+
+from nova.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+def upgrade(migrate_engine):
+ meta = MetaData(migrate_engine)
+ meta.reflect(migrate_engine)
+ table_names = meta.tables.keys()
+
+ meta.bind = migrate_engine
+
+ for table_name in table_names:
+ if table_name.startswith('shadow'):
+ continue
+ table = Table(table_name, meta, autoload=True)
+
+ columns = []
+ for column in table.columns:
+ column_copy = None
+ # NOTE(boris-42): BigInteger is not supported by sqlite, so
+ # after copy it will have NullType, other
+ # types that are used in Nova are supported by
+ # sqlite.
+ if isinstance(column.type, NullType):
+ column_copy = Column(column.name, BigInteger(), default=0)
+ else:
+ column_copy = column.copy()
+ columns.append(column_copy)
+
+ shadow_table_name = 'shadow_' + table_name
+ shadow_table = Table(shadow_table_name, meta, *columns,
+ mysql_engine='InnoDB')
+ try:
+ shadow_table.create()
+ except Exception:
+ LOG.info(repr(shadow_table))
+ LOG.exception(_('Exception while creating table.'))
+ raise
+
+
+def downgrade(migrate_engine):
+ meta = MetaData(migrate_engine)
+ meta.reflect(migrate_engine)
+ table_names = meta.tables.keys()
+
+ meta.bind = migrate_engine
+
+ for table_name in table_names:
+ if table_name.startswith('shadow'):
+ continue
+ shadow_table_name = 'shadow_' + table_name
+ shadow_table = Table(shadow_table_name, meta, autoload=True)
+ try:
+ shadow_table.drop()
+ except Exception:
+ LOG.error(_("table '%s' not dropped") % shadow_table_name)
diff --git a/nova/db/sqlalchemy/types.py b/nova/db/sqlalchemy/types.py
index 275e61a4c..ef861b832 100644
--- a/nova/db/sqlalchemy/types.py
+++ b/nova/db/sqlalchemy/types.py
@@ -18,9 +18,21 @@
"""Custom SQLAlchemy types."""
from sqlalchemy.dialects import postgresql
-from sqlalchemy import String
+from sqlalchemy import types
+from nova import utils
-def IPAddress():
+
+class IPAddress(types.TypeDecorator):
"""An SQLAlchemy type representing an IP-address."""
- return String(39).with_variant(postgresql.INET(), 'postgresql')
+ impl = types.String(39).with_variant(postgresql.INET(), 'postgresql')
+
+ def process_bind_param(self, value, dialect):
+ """Process/Formats the value before insert it into the db."""
+ if dialect.name == 'postgresql':
+ return value
+ # NOTE(maurosr): The purpose here is to convert ipv6 to the shortened
+ # form, not validate it.
+ elif utils.is_valid_ipv6(value):
+ return utils.get_shortened_ipv6(value)
+ return value
diff --git a/nova/db/sqlalchemy/utils.py b/nova/db/sqlalchemy/utils.py
new file mode 100644
index 000000000..2faa5021f
--- /dev/null
+++ b/nova/db/sqlalchemy/utils.py
@@ -0,0 +1,117 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2013 Boris Pavlovic (boris@pavlovic.me).
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from migrate.changeset import UniqueConstraint
+from sqlalchemy.engine import reflection
+from sqlalchemy.ext.compiler import compiles
+from sqlalchemy import MetaData, Table, Column, Index
+from sqlalchemy.sql.expression import UpdateBase
+from sqlalchemy.types import NullType
+
+from nova import exception
+
+
+class InsertFromSelect(UpdateBase):
+ def __init__(self, table, select):
+ self.table = table
+ self.select = select
+
+
+@compiles(InsertFromSelect)
+def visit_insert_from_select(element, compiler, **kw):
+ return "INSERT INTO %s %s" % (
+ compiler.process(element.table, asfrom=True),
+ compiler.process(element.select))
+
+
+def _drop_unique_constraint_in_sqlite(migrate_engine, table_name, uc_name,
+ **col_name_col_instance):
+ insp = reflection.Inspector.from_engine(migrate_engine)
+ meta = MetaData(bind=migrate_engine)
+
+ table = Table(table_name, meta, autoload=True)
+ columns = []
+ for column in table.columns:
+ if isinstance(column.type, NullType):
+ try:
+ new_column = col_name_col_instance.get(column.name)
+ except Exception as e:
+ msg = _("Please specify column %s in col_name_col_instance "
+ "param. It is required because column has unsupported "
+ "type by sqlite).")
+ raise exception.NovaException(msg % column.name)
+
+ if not isinstance(new_column, Column):
+ msg = _("col_name_col_instance param has wrong type of "
+ "column instance for column %s It should be instance "
+ "of sqlalchemy.Column.")
+ raise exception.NovaException(msg % column.name)
+ columns.append(new_column)
+ else:
+ columns.append(column.copy())
+
+ constraints = [constraint for constraint in table.constraints
+ if not constraint.name == uc_name]
+
+ new_table = Table(table_name + "__tmp__", meta, *(columns + constraints))
+ new_table.create()
+
+ indexes = []
+ for index in insp.get_indexes(table_name):
+ column_names = [new_table.c[c] for c in index['column_names']]
+ indexes.append(Index(index["name"],
+ *column_names,
+ unique=index["unique"]))
+
+ ins = InsertFromSelect(new_table, table.select())
+ migrate_engine.execute(ins)
+ table.drop()
+
+ [index.create(migrate_engine) for index in indexes]
+ new_table.rename(table_name)
+
+
+def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns,
+ **col_name_col_instance):
+ """
+ This method drops UC from table and works for mysql, postgresql and sqlite.
+ In mysql and postgresql we are able to use "alter table" constuction. In
+ sqlite is only one way to drop UC:
+ 1) Create new table with same columns, indexes and constraints
+ (except one that we want to drop).
+ 2) Copy data from old table to new.
+ 3) Drop old table.
+ 4) Rename new table to the name of old table.
+
+ :param migrate_engine: sqlalchemy engine
+ :oaram table_name: name of table that contains uniq constarint.
+ :param uc_name: name of uniq constraint that will be dropped.
+ :param columns: columns that are in uniq constarint.
+ :param col_name_col_instance: constains pair column_name=column_instance.
+ column_instance is instance of Column. These params
+ are required only for columns that have unsupported
+ types by sqlite. For example BigInteger.
+ """
+ if migrate_engine.name in ["mysql", "postgresql"]:
+ meta = MetaData()
+ meta.bind = migrate_engine
+ t = Table(table_name, meta, autoload=True)
+ uc = UniqueConstraint(*fields, table=t, name=uc_name)
+ uc.drop()
+ else:
+ _drop_unique_constraint_in_sqlite(migrate_engine, table_name, uc_name,
+ **col_name_col_instance)
diff --git a/nova/exception.py b/nova/exception.py
index 3b20b7e78..9e9e5182b 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -344,7 +344,7 @@ class ComputeResourcesUnavailable(ServiceUnavailable):
class ComputeServiceUnavailable(ServiceUnavailable):
- message = _("Compute service is unavailable at this time.")
+ message = _("Compute service of %(host)s is unavailable at this time.")
class UnableToMigrateToSelf(Invalid):
@@ -1110,3 +1110,23 @@ class CryptoCRLFileNotFound(FileNotFound):
class InstanceRecreateNotSupported(Invalid):
message = _('Instance recreate is not implemented by this virt driver.')
+
+
+class ServiceGroupUnavailable(NovaException):
+ message = _("The service from servicegroup driver %(driver) is "
+ "temporarily unavailable.")
+
+
+class DBNotAllowed(NovaException):
+ message = _('%(binary)s attempted direct database access which is '
+ 'not allowed by policy')
+
+
+class UnsupportedVirtType(Invalid):
+ message = _("Virtualization type '%(virt)s' is not supported by "
+ "this compute driver")
+
+
+class UnsupportedHardware(Invalid):
+ message = _("Requested hardware '%(model)s' is not supported by "
+ "the '%(virt)s' virt driver")
diff --git a/nova/locale/nova.pot b/nova/locale/nova.pot
index 567ba7de8..6cfd4c904 100644
--- a/nova/locale/nova.pot
+++ b/nova/locale/nova.pot
@@ -6,9 +6,9 @@
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: nova 2013.1\n"
+"Project-Id-Version: nova jenkins.nova.propose.translation.update.157\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2013-02-11 00:02+0000\n"
+"POT-Creation-Date: 2013-02-13 00:02+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
@@ -17,7 +17,7 @@ msgstr ""
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 0.9.6\n"
-#: nova/block_device.py:167 nova/virt/hyperv/basevolumeutils.py:96
+#: nova/block_device.py:167 nova/virt/hyperv/basevolumeutils.py:97
#, python-format
msgid "block_device_list %s"
msgstr ""
@@ -27,50 +27,54 @@ msgstr ""
msgid "Arguments dropped when creating context: %s"
msgstr ""
-#: nova/context.py:102
+#: nova/context.py:101
#, python-format
msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r"
msgstr ""
-#: nova/crypto.py:47
+#: nova/crypto.py:53
msgid "Filename of root CA"
msgstr ""
-#: nova/crypto.py:50
+#: nova/crypto.py:56
msgid "Filename of private key"
msgstr ""
-#: nova/crypto.py:53
+#: nova/crypto.py:59
msgid "Filename of root Certificate Revocation List"
msgstr ""
-#: nova/crypto.py:56
+#: nova/crypto.py:62
msgid "Where we keep our keys"
msgstr ""
-#: nova/crypto.py:59
+#: nova/crypto.py:65
msgid "Where we keep our root CA"
msgstr ""
-#: nova/crypto.py:62
+#: nova/crypto.py:68
msgid "Should we use a CA for each project?"
msgstr ""
-#: nova/crypto.py:66
+#: nova/crypto.py:72
#, python-format
msgid "Subject for certificate for users, %s for project, user, timestamp"
msgstr ""
-#: nova/crypto.py:71
+#: nova/crypto.py:77
#, python-format
msgid "Subject for certificate for projects, %s for project, timestamp"
msgstr ""
-#: nova/crypto.py:332
+#: nova/crypto.py:208
+msgid "Unable to find the key"
+msgstr ""
+
+#: nova/crypto.py:390
msgid "Failed to write inbound.csr"
msgstr ""
-#: nova/crypto.py:335
+#: nova/crypto.py:393
#, python-format
msgid "Flags path: %s"
msgstr ""
@@ -171,7 +175,7 @@ msgid "Volume %(volume_id)s is not attached to anything"
msgstr ""
#: nova/exception.py:234 nova/api/ec2/cloud.py:461
-#: nova/api/openstack/compute/contrib/keypairs.py:98 nova/compute/api.py:2628
+#: nova/api/openstack/compute/contrib/keypairs.py:98 nova/compute/api.py:2634
msgid "Keypair data is invalid"
msgstr ""
@@ -1192,6 +1196,16 @@ msgstr ""
msgid "Instance recreate is not implemented by this virt driver."
msgstr ""
+#: nova/exception.py:1116
+#, python-format
+msgid "The service from servicegroup driver %(driver) is temporarily unavailable."
+msgstr ""
+
+#: nova/exception.py:1121
+#, python-format
+msgid "%(binary)s attempted direct database access which is not allowed by policy"
+msgstr ""
+
#: nova/hooks.py:62
#, python-format
msgid "Running %(name)s pre-hook: %(obj)s"
@@ -1649,8 +1663,8 @@ msgstr ""
msgid "Environment: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:554 nova/api/metadata/handler.py:133
-#: nova/api/metadata/handler.py:180
+#: nova/api/ec2/__init__.py:554 nova/api/metadata/handler.py:136
+#: nova/api/metadata/handler.py:183
msgid "An unknown error has occurred. Please try your request again."
msgstr ""
@@ -1899,27 +1913,27 @@ msgstr ""
msgid "Timestamp is invalid."
msgstr ""
-#: nova/api/metadata/handler.py:105
+#: nova/api/metadata/handler.py:108
msgid ""
"X-Instance-ID present in request headers. The "
"'service_quantum_metadata_proxy' option must be enabled to process this "
"header."
msgstr ""
-#: nova/api/metadata/handler.py:131 nova/api/metadata/handler.py:138
+#: nova/api/metadata/handler.py:134 nova/api/metadata/handler.py:141
#, python-format
msgid "Failed to get metadata for ip: %s"
msgstr ""
-#: nova/api/metadata/handler.py:150
+#: nova/api/metadata/handler.py:153
msgid "X-Instance-ID header is missing from request."
msgstr ""
-#: nova/api/metadata/handler.py:152
+#: nova/api/metadata/handler.py:155
msgid "Multiple X-Instance-ID headers found within request."
msgstr ""
-#: nova/api/metadata/handler.py:166
+#: nova/api/metadata/handler.py:169
#, python-format
msgid ""
"X-Instance-ID-Signature: %(signature)s does not match the expected value:"
@@ -1927,11 +1941,11 @@ msgid ""
"%(remote_address)s"
msgstr ""
-#: nova/api/metadata/handler.py:171
+#: nova/api/metadata/handler.py:174
msgid "Invalid proxy request signature."
msgstr ""
-#: nova/api/metadata/handler.py:178 nova/api/metadata/handler.py:185
+#: nova/api/metadata/handler.py:181 nova/api/metadata/handler.py:188
#, python-format
msgid "Failed to get metadata for instance id: %s"
msgstr ""
@@ -2830,7 +2844,7 @@ msgstr ""
#: nova/api/openstack/compute/contrib/floating_ips.py:215
#: nova/api/openstack/compute/contrib/floating_ips.py:271
-#: nova/api/openstack/compute/contrib/security_groups.py:414
+#: nova/api/openstack/compute/contrib/security_groups.py:421
msgid "Missing parameter dict"
msgstr ""
@@ -3063,24 +3077,29 @@ msgstr ""
msgid "Malformed scheduler_hints attribute"
msgstr ""
-#: nova/api/openstack/compute/contrib/security_groups.py:221
+#: nova/api/openstack/compute/contrib/security_groups.py:222
msgid "Security group id should be integer"
msgstr ""
-#: nova/api/openstack/compute/contrib/security_groups.py:330
+#: nova/api/openstack/compute/contrib/security_groups.py:331
msgid "Not enough parameters to build a valid rule."
msgstr ""
-#: nova/api/openstack/compute/contrib/security_groups.py:336
+#: nova/api/openstack/compute/contrib/security_groups.py:339
+#, python-format
+msgid "Bad prefix for network in cidr %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/security_groups.py:343
#, python-format
msgid "This rule already exists in group %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/security_groups.py:417
+#: nova/api/openstack/compute/contrib/security_groups.py:424
msgid "Security group not specified"
msgstr ""
-#: nova/api/openstack/compute/contrib/security_groups.py:421
+#: nova/api/openstack/compute/contrib/security_groups.py:428
msgid "Security group name cannot be empty"
msgstr ""
@@ -3088,7 +3107,7 @@ msgstr ""
msgid "start instance"
msgstr ""
-#: nova/api/openstack/compute/contrib/server_start_stop.py:55
+#: nova/api/openstack/compute/contrib/server_start_stop.py:58
msgid "stop instance"
msgstr ""
@@ -3281,220 +3300,220 @@ msgstr ""
msgid "Launching VPN for %s"
msgstr ""
-#: nova/compute/api.py:264
+#: nova/compute/api.py:273
msgid "Cannot run any more instances of this type."
msgstr ""
-#: nova/compute/api.py:271
+#: nova/compute/api.py:280
#, python-format
msgid "Can only run %s more instances of this type."
msgstr ""
-#: nova/compute/api.py:280
+#: nova/compute/api.py:289
#, python-format
msgid ""
"%(overs)s quota exceeded for %(pid)s, tried to run %(min_count)s "
"instances. %(msg)s"
msgstr ""
-#: nova/compute/api.py:300
+#: nova/compute/api.py:309
#, python-format
msgid ""
"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata "
"properties"
msgstr ""
-#: nova/compute/api.py:310
+#: nova/compute/api.py:319
msgid "Metadata property key blank"
msgstr ""
-#: nova/compute/api.py:314
+#: nova/compute/api.py:323
msgid "Metadata property key greater than 255 characters"
msgstr ""
-#: nova/compute/api.py:318
+#: nova/compute/api.py:327
msgid "Metadata property value greater than 255 characters"
msgstr ""
-#: nova/compute/api.py:443
+#: nova/compute/api.py:452
msgid "Cannot attach one or more volumes to multiple instances"
msgstr ""
-#: nova/compute/api.py:556
+#: nova/compute/api.py:565
#, python-format
msgid "Going to run %s instances..."
msgstr ""
-#: nova/compute/api.py:667
+#: nova/compute/api.py:676
#, python-format
msgid "bdm %s"
msgstr ""
-#: nova/compute/api.py:694
+#: nova/compute/api.py:703
#, python-format
msgid "block_device_mapping %s"
msgstr ""
-#: nova/compute/api.py:956
+#: nova/compute/api.py:965
msgid "instance termination disabled"
msgstr ""
-#: nova/compute/api.py:1090
+#: nova/compute/api.py:1099
#, python-format
msgid "instance type %(old_inst_type_id)d not found"
msgstr ""
-#: nova/compute/api.py:1096
+#: nova/compute/api.py:1105
msgid "going to delete a resizing instance"
msgstr ""
-#: nova/compute/api.py:1106
+#: nova/compute/api.py:1115
#, python-format
msgid "instance's host %s is down, deleting from database"
msgstr ""
-#: nova/compute/api.py:1150
+#: nova/compute/api.py:1159
msgid "Going to try to soft delete instance"
msgstr ""
-#: nova/compute/api.py:1172
+#: nova/compute/api.py:1181
msgid "Going to try to terminate instance"
msgstr ""
-#: nova/compute/api.py:1221
+#: nova/compute/api.py:1231
msgid "Going to try to stop instance"
msgstr ""
-#: nova/compute/api.py:1237
+#: nova/compute/api.py:1248
msgid "Going to try to start instance"
msgstr ""
-#: nova/compute/api.py:1307
+#: nova/compute/api.py:1318
#, python-format
msgid "Searching by: %s"
msgstr ""
-#: nova/compute/api.py:1551
+#: nova/compute/api.py:1562
#, python-format
msgid "snapshot for %s"
msgstr ""
-#: nova/compute/api.py:1907
+#: nova/compute/api.py:1918
msgid "flavor_id is None. Assuming migration."
msgstr ""
-#: nova/compute/api.py:1916
+#: nova/compute/api.py:1927
#, python-format
msgid ""
"Old instance type %(current_instance_type_name)s, new instance type "
"%(new_instance_type_name)s"
msgstr ""
-#: nova/compute/api.py:1958
+#: nova/compute/api.py:1969
#, python-format
msgid "%(overs)s quota exceeded for %(pid)s, tried to resize instance."
msgstr ""
-#: nova/compute/api.py:2194
+#: nova/compute/api.py:2200
msgid "Locking"
msgstr ""
-#: nova/compute/api.py:2202
+#: nova/compute/api.py:2208
msgid "Unlocking"
msgstr ""
-#: nova/compute/api.py:2270
+#: nova/compute/api.py:2276
msgid "Volume must be attached in order to detach."
msgstr ""
-#: nova/compute/api.py:2361
+#: nova/compute/api.py:2367
#, python-format
msgid "Going to try to live migrate instance to %s"
msgstr ""
-#: nova/compute/api.py:2380
+#: nova/compute/api.py:2386
msgid "vm evacuation scheduled"
msgstr ""
-#: nova/compute/api.py:2384
+#: nova/compute/api.py:2390
#, python-format
msgid ""
"Instance compute service state on %(host)s expected to be down, but it "
"was up."
msgstr ""
-#: nova/compute/api.py:2601
+#: nova/compute/api.py:2607
msgid "Keypair name contains unsafe characters"
msgstr ""
-#: nova/compute/api.py:2605
+#: nova/compute/api.py:2611
msgid "Keypair name must be between 1 and 255 characters long"
msgstr ""
-#: nova/compute/api.py:2706
+#: nova/compute/api.py:2712
#, python-format
msgid "Security group %s is not a string or unicode"
msgstr ""
-#: nova/compute/api.py:2709
+#: nova/compute/api.py:2715
#, python-format
msgid "Security group %s cannot be empty."
msgstr ""
-#: nova/compute/api.py:2717
+#: nova/compute/api.py:2723
#, python-format
msgid ""
"Value (%(value)s) for parameter Group%(property)s is invalid. Content "
"limited to '%(allowed)'."
msgstr ""
-#: nova/compute/api.py:2723
+#: nova/compute/api.py:2729
#, python-format
msgid "Security group %s should not be greater than 255 characters."
msgstr ""
-#: nova/compute/api.py:2743
+#: nova/compute/api.py:2749
msgid "Quota exceeded, too many security groups."
msgstr ""
-#: nova/compute/api.py:2746
+#: nova/compute/api.py:2752
#, python-format
msgid "Create Security Group %s"
msgstr ""
-#: nova/compute/api.py:2753
+#: nova/compute/api.py:2759
#, python-format
msgid "Security group %s already exists"
msgstr ""
-#: nova/compute/api.py:2818
+#: nova/compute/api.py:2824
msgid "Security group is still in use"
msgstr ""
-#: nova/compute/api.py:2826
+#: nova/compute/api.py:2832
msgid "Failed to update usages deallocating security group"
msgstr ""
-#: nova/compute/api.py:2829
+#: nova/compute/api.py:2835
#, python-format
msgid "Delete security group %s"
msgstr ""
-#: nova/compute/api.py:3087
+#: nova/compute/api.py:3093
#, python-format
msgid "Rule (%s) not found"
msgstr ""
-#: nova/compute/api.py:3096
+#: nova/compute/api.py:3102
msgid "Quota exceeded, too many security group rules."
msgstr ""
-#: nova/compute/api.py:3099
+#: nova/compute/api.py:3105
#, python-format
msgid "Authorize security group ingress %s"
msgstr ""
-#: nova/compute/api.py:3110
+#: nova/compute/api.py:3116
#, python-format
msgid "Revoke security group ingress %s"
msgstr ""
@@ -3746,8 +3765,8 @@ msgstr ""
msgid "Ignoring volume cleanup failure due to %s"
msgstr ""
-#: nova/compute/manager.py:1192 nova/compute/manager.py:2230
-#: nova/compute/manager.py:3612
+#: nova/compute/manager.py:1192 nova/compute/manager.py:2232
+#: nova/compute/manager.py:3618
#, python-format
msgid "%s. Setting instance vm_state to ERROR"
msgstr ""
@@ -3881,261 +3900,261 @@ msgstr ""
msgid "Migrating"
msgstr ""
-#: nova/compute/manager.py:2227
+#: nova/compute/manager.py:2229
#, python-format
msgid "Failed to rollback quota for failed finish_resize: %(qr_error)s"
msgstr ""
-#: nova/compute/manager.py:2282
+#: nova/compute/manager.py:2284
msgid "Pausing"
msgstr ""
-#: nova/compute/manager.py:2300
+#: nova/compute/manager.py:2302
msgid "Unpausing"
msgstr ""
-#: nova/compute/manager.py:2338
+#: nova/compute/manager.py:2340
msgid "Retrieving diagnostics"
msgstr ""
-#: nova/compute/manager.py:2369
+#: nova/compute/manager.py:2371
msgid "Resuming"
msgstr ""
-#: nova/compute/manager.py:2389
+#: nova/compute/manager.py:2391
msgid "Reset network"
msgstr ""
-#: nova/compute/manager.py:2394
+#: nova/compute/manager.py:2396
msgid "Inject network info"
msgstr ""
-#: nova/compute/manager.py:2397
+#: nova/compute/manager.py:2399
#, python-format
msgid "network_info to inject: |%s|"
msgstr ""
-#: nova/compute/manager.py:2414
+#: nova/compute/manager.py:2416
msgid "Get console output"
msgstr ""
-#: nova/compute/manager.py:2439
+#: nova/compute/manager.py:2441
msgid "Getting vnc console"
msgstr ""
-#: nova/compute/manager.py:2467
+#: nova/compute/manager.py:2469
msgid "Getting spice console"
msgstr ""
-#: nova/compute/manager.py:2497
+#: nova/compute/manager.py:2499
#, python-format
msgid "Booting with volume %(volume_id)s at %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2548
+#: nova/compute/manager.py:2550
#, python-format
msgid "Attaching volume %(volume_id)s to %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2557
+#: nova/compute/manager.py:2559
#, python-format
msgid ""
"Failed to connect to volume %(volume_id)s while attaching at "
"%(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2572
+#: nova/compute/manager.py:2574
#, python-format
msgid "Failed to attach volume %(volume_id)s at %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2602
+#: nova/compute/manager.py:2604
#, python-format
msgid "Detach volume %(volume_id)s from mountpoint %(mp)s"
msgstr ""
-#: nova/compute/manager.py:2612
+#: nova/compute/manager.py:2614
msgid "Detaching volume from unknown instance"
msgstr ""
-#: nova/compute/manager.py:2619
+#: nova/compute/manager.py:2621
#, python-format
msgid "Failed to detach volume %(volume_id)s from %(mp)s"
msgstr ""
-#: nova/compute/manager.py:2643
+#: nova/compute/manager.py:2645
msgid "Updating volume usage cache with totals"
msgstr ""
-#: nova/compute/manager.py:2680
+#: nova/compute/manager.py:2682
#, python-format
msgid "Host %(host)s not found"
msgstr ""
-#: nova/compute/manager.py:2753
+#: nova/compute/manager.py:2755
msgid "Instance has no volume."
msgstr ""
-#: nova/compute/manager.py:2814
+#: nova/compute/manager.py:2816
#, python-format
msgid "Pre live migration failed at %(dest)s"
msgstr ""
-#: nova/compute/manager.py:2842
+#: nova/compute/manager.py:2844
msgid "_post_live_migration() is started.."
msgstr ""
-#: nova/compute/manager.py:2895
+#: nova/compute/manager.py:2899
#, python-format
msgid "Migrating instance to %(dest)s finished successfully."
msgstr ""
-#: nova/compute/manager.py:2897
+#: nova/compute/manager.py:2901
msgid ""
"You may see the error \"libvirt: QEMU error: Domain not found: no domain "
"with matching name.\" This error can be safely ignored."
msgstr ""
-#: nova/compute/manager.py:2911
+#: nova/compute/manager.py:2915
msgid "Post operation of migration started"
msgstr ""
-#: nova/compute/manager.py:3049
+#: nova/compute/manager.py:3055
msgid "Updated the info_cache for instance"
msgstr ""
-#: nova/compute/manager.py:3094
+#: nova/compute/manager.py:3100
#, python-format
msgid ""
"Found %(migration_count)d unconfirmed migrations older than "
"%(confirm_window)d seconds"
msgstr ""
-#: nova/compute/manager.py:3100
+#: nova/compute/manager.py:3106
#, python-format
msgid "Setting migration %(migration_id)s to error: %(reason)s"
msgstr ""
-#: nova/compute/manager.py:3109
+#: nova/compute/manager.py:3115
#, python-format
msgid ""
"Automatically confirming migration %(migration_id)s for instance "
"%(instance_uuid)s"
msgstr ""
-#: nova/compute/manager.py:3116
+#: nova/compute/manager.py:3122
#, python-format
msgid "Instance %(instance_uuid)s not found"
msgstr ""
-#: nova/compute/manager.py:3120
+#: nova/compute/manager.py:3126
msgid "In ERROR state"
msgstr ""
-#: nova/compute/manager.py:3127
+#: nova/compute/manager.py:3133
#, python-format
msgid "In states %(vm_state)s/%(task_state)s, not RESIZED/None"
msgstr ""
-#: nova/compute/manager.py:3135
+#: nova/compute/manager.py:3141
#, python-format
msgid "Error auto-confirming resize: %(e)s. Will retry later."
msgstr ""
-#: nova/compute/manager.py:3152
+#: nova/compute/manager.py:3158
#, python-format
msgid ""
"Running instance usage audit for host %(host)s from %(begin_time)s to "
"%(end_time)s. %(number_instances)s instances."
msgstr ""
-#: nova/compute/manager.py:3171
+#: nova/compute/manager.py:3177
#, python-format
msgid "Failed to generate usage audit for instance on host %s"
msgstr ""
-#: nova/compute/manager.py:3195
+#: nova/compute/manager.py:3201
msgid "Updating bandwidth usage cache"
msgstr ""
-#: nova/compute/manager.py:3313
+#: nova/compute/manager.py:3319
msgid "Updating volume usage cache"
msgstr ""
-#: nova/compute/manager.py:3331
+#: nova/compute/manager.py:3337
msgid "Updating host status"
msgstr ""
-#: nova/compute/manager.py:3360
+#: nova/compute/manager.py:3366
#, python-format
msgid ""
"Found %(num_db_instances)s in the database and %(num_vm_instances)s on "
"the hypervisor."
msgstr ""
-#: nova/compute/manager.py:3366 nova/compute/manager.py:3404
+#: nova/compute/manager.py:3372 nova/compute/manager.py:3410
msgid "During sync_power_state the instance has a pending task. Skip."
msgstr ""
-#: nova/compute/manager.py:3391
+#: nova/compute/manager.py:3397
#, python-format
msgid ""
"During the sync_power process the instance has moved from host %(src)s to"
" host %(dst)s"
msgstr ""
-#: nova/compute/manager.py:3427
+#: nova/compute/manager.py:3433
msgid "Instance shutdown by itself. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:3439 nova/compute/manager.py:3448
-#: nova/compute/manager.py:3478
+#: nova/compute/manager.py:3445 nova/compute/manager.py:3454
+#: nova/compute/manager.py:3484
msgid "error during stop() in sync_power_state."
msgstr ""
-#: nova/compute/manager.py:3443
+#: nova/compute/manager.py:3449
msgid "Instance is suspended unexpectedly. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:3459
+#: nova/compute/manager.py:3465
msgid "Instance is paused unexpectedly. Ignore."
msgstr ""
-#: nova/compute/manager.py:3465
+#: nova/compute/manager.py:3471
msgid "Instance is unexpectedly not found. Ignore."
msgstr ""
-#: nova/compute/manager.py:3471
+#: nova/compute/manager.py:3477
msgid "Instance is not stopped. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:3487
+#: nova/compute/manager.py:3493
msgid "Instance is not (soft-)deleted."
msgstr ""
-#: nova/compute/manager.py:3495
+#: nova/compute/manager.py:3501
msgid "CONF.reclaim_instance_interval <= 0, skipping..."
msgstr ""
-#: nova/compute/manager.py:3510
+#: nova/compute/manager.py:3516
msgid "Reclaiming deleted instance"
msgstr ""
-#: nova/compute/manager.py:3565
+#: nova/compute/manager.py:3571
#, python-format
msgid ""
"Detected instance with name label '%(name)s' which is marked as DELETED "
"but still present on host."
msgstr ""
-#: nova/compute/manager.py:3572
+#: nova/compute/manager.py:3578
#, python-format
msgid ""
"Destroying instance with name label '%(name)s' which is marked as DELETED"
" but still present on host."
msgstr ""
-#: nova/compute/manager.py:3579
+#: nova/compute/manager.py:3585
#, python-format
msgid "Unrecognized value '%(action)s' for CONF.running_deleted_instance_action"
msgstr ""
@@ -4254,18 +4273,18 @@ msgstr ""
msgid "Using %(prefix)s instead of %(req_prefix)s"
msgstr ""
-#: nova/conductor/api.py:338
+#: nova/conductor/api.py:363
msgid ""
"Timed out waiting for nova-conductor. Is it running? Or did this service "
"start before nova-conductor?"
msgstr ""
-#: nova/conductor/manager.py:67
+#: nova/conductor/manager.py:80
#, python-format
msgid "Instance update attempted for '%(key)s' on %(instance_uuid)s"
msgstr ""
-#: nova/conductor/manager.py:227
+#: nova/conductor/manager.py:245
msgid "Invalid block_device_mapping_destroy invocation"
msgstr ""
@@ -4390,6 +4409,20 @@ msgstr ""
msgid "Upgrade DB using Essex release first."
msgstr ""
+#: nova/db/sqlalchemy/utils.py:53
+#, python-format
+msgid ""
+"Please specify column %s in col_name_col_instance param. It is required "
+"because column has unsupported type by sqlite)."
+msgstr ""
+
+#: nova/db/sqlalchemy/utils.py:59
+#, python-format
+msgid ""
+"col_name_col_instance param has wrong type of column instance for column "
+"%s It should be instance of sqlalchemy.Column."
+msgstr ""
+
#: nova/db/sqlalchemy/migrate_repo/versions/133_folsom.py:62
msgid "Exception while seeding instance_types table"
msgstr ""
@@ -4894,21 +4927,33 @@ msgstr ""
msgid "get_instance_nw_info() for %s"
msgstr ""
-#: nova/network/quantumv2/api.py:272 nova/network/quantumv2/api.py:299
+#: nova/network/quantumv2/api.py:271
+#, python-format
+msgid ""
+"Unable to update port %(portid)s on subnet %(subnet_id)s with failure: "
+"%(exception)s"
+msgstr ""
+
+#: nova/network/quantumv2/api.py:300
#, python-format
msgid "Unable to update port %(portid)s with failure: %(exception)s"
msgstr ""
-#: nova/network/quantumv2/api.py:309
+#: nova/network/quantumv2/api.py:310
#, python-format
msgid "validate_networks() for %s"
msgstr ""
-#: nova/network/quantumv2/api.py:561
+#: nova/network/quantumv2/api.py:562
#, python-format
msgid "Multiple floating IP pools matches found for name '%s'"
msgstr ""
+#: nova/openstack/common/jsonutils.py:88
+#, python-format
+msgid "Max serialization depth exceeded on object: %d %s"
+msgstr ""
+
#: nova/openstack/common/lockutils.py:98
#, python-format
msgid "Could not release the acquired lock `%s`"
@@ -5399,15 +5444,15 @@ msgstr ""
msgid "Setting instance to %(state)s state."
msgstr ""
-#: nova/scheduler/driver.py:139
+#: nova/scheduler/driver.py:152
msgid "Driver must implement schedule_prep_resize"
msgstr ""
-#: nova/scheduler/driver.py:147
+#: nova/scheduler/driver.py:160
msgid "Driver must implement schedule_run_instance"
msgstr ""
-#: nova/scheduler/driver.py:271
+#: nova/scheduler/driver.py:284
#, python-format
msgid ""
"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of "
@@ -5419,33 +5464,33 @@ msgstr ""
msgid "Attempting to build %(num_instances)d instance(s)"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:192
+#: nova/scheduler/filter_scheduler.py:201
msgid "Invalid value for 'scheduler_max_attempts', must be >= 1"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:209
+#: nova/scheduler/filter_scheduler.py:218
#, python-format
msgid "Error from last host: %(last_host)s (node %(last_node)s): %(exc)s"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:238
+#: nova/scheduler/filter_scheduler.py:247
#, python-format
msgid ""
"Exceeded max scheduling attempts %(max_attempts)d for instance "
"%(instance_uuid)s"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:292
+#: nova/scheduler/filter_scheduler.py:313
#, python-format
msgid "Filtered %(hosts)s"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:297
+#: nova/scheduler/filter_scheduler.py:318
#, python-format
msgid "Choosing host %(best_host)s"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:327
+#: nova/scheduler/filter_scheduler.py:351
#, python-format
msgid "Unable to migrate %(instance_uuid)s to %(dest)s: Lack of memory"
msgstr ""
@@ -5504,6 +5549,11 @@ msgid ""
"'nova.scheduler.filters.standard_filters'"
msgstr ""
+#: nova/scheduler/filters/affinity_filter.py:95
+#, python-format
+msgid "Group affinity: %(host)s in %(configured)s"
+msgstr ""
+
#: nova/scheduler/filters/aggregate_instance_extra_specs.py:49
#: nova/scheduler/filters/aggregate_instance_extra_specs.py:56
#: nova/scheduler/filters/compute_capabilities_filter.py:57
@@ -5588,41 +5638,41 @@ msgstr ""
msgid "least_cost has been deprecated in favor of the RAM Weigher."
msgstr ""
-#: nova/servicegroup/api.py:59
+#: nova/servicegroup/api.py:60
#, python-format
msgid "ServiceGroup driver defined as an instance of %s"
msgstr ""
-#: nova/servicegroup/api.py:65
+#: nova/servicegroup/api.py:66
#, python-format
msgid "unknown ServiceGroup driver name: %s"
msgstr ""
-#: nova/servicegroup/api.py:82
+#: nova/servicegroup/api.py:83
#, python-format
msgid ""
"Join new ServiceGroup member %(member_id)s to the %(group_id)s group, "
"service = %(service)s"
msgstr ""
-#: nova/servicegroup/api.py:89
+#: nova/servicegroup/api.py:90
#, python-format
msgid "Check if the given member [%s] is part of the ServiceGroup, is up"
msgstr ""
-#: nova/servicegroup/api.py:98
+#: nova/servicegroup/api.py:99
#, python-format
msgid ""
"Explicitly remove the given member %(member_id)s from the%(group_id)s "
"group monitoring"
msgstr ""
-#: nova/servicegroup/api.py:105
+#: nova/servicegroup/api.py:106
#, python-format
msgid "Returns ALL members of the [%s] ServiceGroup"
msgstr ""
-#: nova/servicegroup/api.py:113
+#: nova/servicegroup/api.py:114
#, python-format
msgid "Returns one member of the [%s] group"
msgstr ""
@@ -5651,6 +5701,30 @@ msgstr ""
msgid "model server went away"
msgstr ""
+#: nova/servicegroup/drivers/zk.py:79
+#, python-format
+msgid ""
+"ZooKeeperDriver: join new member %(id)s to the %(gr)s group, "
+"service=%(sr)s"
+msgstr ""
+
+#: nova/servicegroup/drivers/zk.py:89
+msgid ""
+"Unable to join. It is possible that either another node exists with the "
+"same name, or this node just restarted. We will try again in a short "
+"while to make sure."
+msgstr ""
+
+#: nova/servicegroup/drivers/zk.py:100
+#, python-format
+msgid "ZooKeeperDriver.leave: %(member)s from group %(group)s"
+msgstr ""
+
+#: nova/servicegroup/drivers/zk.py:108
+#, python-format
+msgid "ZooKeeperDriver.leave: %(id)s has not joined to the %(gr)s group"
+msgstr ""
+
#: nova/tests/fake_ldap.py:34
msgid "Attempted to instantiate singleton"
msgstr ""
@@ -5703,7 +5777,7 @@ msgstr ""
msgid "Expected a function in 'auth[1]' parameter"
msgstr ""
-#: nova/tests/test_hypervapi.py:406
+#: nova/tests/test_hypervapi.py:413
msgid "fake vswitch not found"
msgstr ""
@@ -5841,7 +5915,7 @@ msgstr ""
#: nova/tests/compute/test_compute.py:734
#: nova/tests/compute/test_compute.py:785
#: nova/tests/compute/test_compute.py:812
-#: nova/tests/compute/test_compute.py:2766
+#: nova/tests/compute/test_compute.py:2768
#, python-format
msgid "Running instances: %s"
msgstr ""
@@ -5858,12 +5932,12 @@ msgstr ""
msgid "Internal error"
msgstr ""
-#: nova/tests/compute/test_compute.py:2777
+#: nova/tests/compute/test_compute.py:2779
#, python-format
msgid "After force-killing instances: %s"
msgstr ""
-#: nova/tests/compute/test_compute.py:3275
+#: nova/tests/compute/test_compute.py:3277
msgid "wrong host/node"
msgstr ""
@@ -6628,56 +6702,58 @@ msgstr ""
msgid "Set permissions path=%(path)s user=%(user)s group=%(group)s"
msgstr ""
-#: nova/virt/hyperv/basevolumeutils.py:73
+#: nova/virt/hyperv/basevolumeutils.py:74
msgid "The ISCSI initiator name can't be found. Choosing the default one"
msgstr ""
-#: nova/virt/hyperv/driver.py:146 nova/virt/hyperv/driver.py:149
+#: nova/virt/hyperv/driver.py:147
msgid "plug_vifs called"
msgstr ""
-#: nova/virt/hyperv/driver.py:152
-msgid "ensure_filtering_rules_for_instance called"
+#: nova/virt/hyperv/driver.py:150
+msgid "unplug_vifs called"
msgstr ""
-#: nova/virt/hyperv/driver.py:156
-msgid "unfilter_instance called"
-msgstr ""
-
-#: nova/virt/hyperv/driver.py:159
-msgid "confirm_migration called"
-msgstr ""
-
-#: nova/virt/hyperv/driver.py:163
-msgid "finish_revert_migration called"
+#: nova/virt/hyperv/driver.py:153
+msgid "ensure_filtering_rules_for_instance called"
msgstr ""
-#: nova/virt/hyperv/driver.py:168
-msgid "finish_migration called"
+#: nova/virt/hyperv/driver.py:157
+msgid "unfilter_instance called"
msgstr ""
-#: nova/virt/hyperv/driver.py:171
+#: nova/virt/hyperv/driver.py:188
msgid "get_console_output called"
msgstr ""
-#: nova/virt/hyperv/hostops.py:89
+#: nova/virt/hyperv/hostops.py:92
#, python-format
msgid "Windows version: %s "
msgstr ""
-#: nova/virt/hyperv/hostops.py:101
+#: nova/virt/hyperv/hostops.py:104
msgid "get_available_resource called"
msgstr ""
-#: nova/virt/hyperv/hostops.py:131 nova/virt/libvirt/driver.py:3327
+#: nova/virt/hyperv/hostops.py:134 nova/virt/libvirt/driver.py:3324
#: nova/virt/xenapi/host.py:149
msgid "Updating host stats"
msgstr ""
-#: nova/virt/hyperv/hostops.py:155
+#: nova/virt/hyperv/hostops.py:158
msgid "get_host_stats called"
msgstr ""
+#: nova/virt/hyperv/hostops.py:173
+#, python-format
+msgid "Host IP address is: %s"
+msgstr ""
+
+#: nova/virt/hyperv/imagecache.py:42
+#, python-format
+msgid "The image is not a valid VHD: %s"
+msgstr ""
+
#: nova/virt/hyperv/livemigrationops.py:47
msgid "live_migration called"
msgstr ""
@@ -6696,11 +6772,11 @@ msgstr ""
msgid "pre_live_migration called"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:80
+#: nova/virt/hyperv/livemigrationops.py:75
msgid "post_live_migration_at_destination called"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:84
+#: nova/virt/hyperv/livemigrationops.py:79
#, python-format
msgid "compare_cpu called %s"
msgstr ""
@@ -6748,6 +6824,87 @@ msgstr ""
msgid "Starting live migration for VM: %s"
msgstr ""
+#: nova/virt/hyperv/migrationops.py:50
+msgid "Migration target is the source host"
+msgstr ""
+
+#: nova/virt/hyperv/migrationops.py:52
+#, python-format
+msgid "Migration target host: %s"
+msgstr ""
+
+#: nova/virt/hyperv/migrationops.py:73
+#, python-format
+msgid "Copying disk \"%(disk_file)s\" to \"%(dest_path)s\""
+msgstr ""
+
+#: nova/virt/hyperv/migrationops.py:96
+msgid "Cannot cleanup migration files"
+msgstr ""
+
+#: nova/virt/hyperv/migrationops.py:101
+msgid "migrate_disk_and_power_off called"
+msgstr ""
+
+#: nova/virt/hyperv/migrationops.py:119
+msgid "confirm_migration called"
+msgstr ""
+
+#: nova/virt/hyperv/migrationops.py:134
+msgid "finish_revert_migration called"
+msgstr ""
+
+#: nova/virt/hyperv/migrationops.py:153
+#, python-format
+msgid "Copying base disk %(base_vhd_path)s to %(base_vhd_copy_path)s"
+msgstr ""
+
+#: nova/virt/hyperv/migrationops.py:157
+#, python-format
+msgid ""
+"Reconnecting copied base VHD %(base_vhd_copy_path)s and diff VHD "
+"%(diff_vhd_path)s"
+msgstr ""
+
+#: nova/virt/hyperv/migrationops.py:163
+#, python-format
+msgid "Merging base disk %(base_vhd_copy_path)s and diff disk %(diff_vhd_path)s"
+msgstr ""
+
+#: nova/virt/hyperv/migrationops.py:175
+#, python-format
+msgid "Getting info for disk: %s"
+msgstr ""
+
+#: nova/virt/hyperv/migrationops.py:180
+#, python-format
+msgid "Resizing disk \"%(vhd_path)s\" to new max size %(new_size)s"
+msgstr ""
+
+#: nova/virt/hyperv/migrationops.py:188
+msgid "Unexpected base VHD path"
+msgstr ""
+
+#: nova/virt/hyperv/migrationops.py:195
+#, python-format
+msgid ""
+"Reconnecting copied base VHD %(base_vhd_path)s and diff VHD "
+"%(diff_vhd_path)s"
+msgstr ""
+
+#: nova/virt/hyperv/migrationops.py:204
+msgid "finish_migration called"
+msgstr ""
+
+#: nova/virt/hyperv/migrationops.py:213
+#, python-format
+msgid "Cannot find boot VHD file: %s"
+msgstr ""
+
+#: nova/virt/hyperv/migrationops.py:226
+msgid "Cannot resize a VHD to a smaller size"
+msgstr ""
+
#: nova/virt/hyperv/networkutils.py:46
#, python-format
msgid "vswitch \"%s\" not found"
@@ -6758,32 +6915,32 @@ msgstr ""
msgid "Failed to create vswitch port %(port_name)s on switch %(vswitch_path)s"
msgstr ""
-#: nova/virt/hyperv/pathutils.py:42 nova/virt/hyperv/pathutils.py:62
+#: nova/virt/hyperv/pathutils.py:83
#, python-format
-msgid "Creating folder %s "
+msgid "Creating directory: %s"
msgstr ""
-#: nova/virt/hyperv/pathutils.py:60
+#: nova/virt/hyperv/pathutils.py:88 nova/virt/hyperv/snapshotops.py:115
#, python-format
-msgid "Removing existing folder %s "
+msgid "Removing directory: %s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:56
+#: nova/virt/hyperv/snapshotops.py:55
#, python-format
msgid "Creating snapshot for instance %s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:65
+#: nova/virt/hyperv/snapshotops.py:64
#, python-format
msgid "Getting info for VHD %s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:73
+#: nova/virt/hyperv/snapshotops.py:72
#, python-format
msgid "Copying VHD %(src_vhd_path)s to %(dest_vhd_path)s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:83
+#: nova/virt/hyperv/snapshotops.py:82
#, python-format
msgid "Copying base disk %(src_vhd_path)s to %(dest_base_disk_path)s"
msgstr ""
@@ -6822,238 +6979,204 @@ msgstr ""
msgid "Failed to remove snapshot for VM %s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:115
-#, python-format
-msgid "Removing folder %s "
-msgstr ""
-
#: nova/virt/hyperv/vif.py:75
#, python-format
msgid "Creating vswitch port for instance: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:85
+#: nova/virt/hyperv/vmops.py:89
#, python-format
msgid "VIF driver not found for network_api_class: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:94
+#: nova/virt/hyperv/vmops.py:98
msgid "get_info called for instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:135
+#: nova/virt/hyperv/vmops.py:118
#, python-format
-msgid "cache image failed: %s"
+msgid ""
+"Creating differencing VHD. Parent: %(base_vhd_path)s, Target: "
+"%(boot_vhd_path)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:157
-#, python-format
-msgid "Creating nic for instance: %s"
+#: nova/virt/hyperv/vmops.py:130
+msgid "Spawning new instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:172
+#: nova/virt/hyperv/vmops.py:153
msgid "Spawn instance failed"
msgstr ""
-#: nova/virt/hyperv/vmops.py:176
+#: nova/virt/hyperv/vmops.py:178
+#, python-format
+msgid "Creating nic for instance: %s"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:186
#, python-format
msgid "Invalid config_drive_format \"%s\""
msgstr ""
-#: nova/virt/hyperv/vmops.py:179
+#: nova/virt/hyperv/vmops.py:189
#, python-format
msgid "Using config drive for instance: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:192 nova/virt/libvirt/driver.py:1529
+#: nova/virt/hyperv/vmops.py:202 nova/virt/libvirt/driver.py:1530
#, python-format
msgid "Creating config drive at %(path)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:199 nova/virt/libvirt/driver.py:1535
+#: nova/virt/hyperv/vmops.py:210 nova/virt/libvirt/driver.py:1536
#, python-format
msgid "Creating config drive failed with error: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:227
+#: nova/virt/hyperv/vmops.py:246
#, python-format
msgid "Got request to destroy instance: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:236
+#: nova/virt/hyperv/vmops.py:259
#, python-format
msgid "Instance not found: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:239
+#: nova/virt/hyperv/vmops.py:265
#, python-format
msgid "Failed to destroy instance: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:244
+#: nova/virt/hyperv/vmops.py:270
msgid "reboot instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:250
+#: nova/virt/hyperv/vmops.py:276
msgid "Pause instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:256
+#: nova/virt/hyperv/vmops.py:282
msgid "Unpause instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:263
+#: nova/virt/hyperv/vmops.py:289
msgid "Suspend instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:269
+#: nova/virt/hyperv/vmops.py:295
msgid "Resume instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:275
+#: nova/virt/hyperv/vmops.py:301
msgid "Power off instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:281
+#: nova/virt/hyperv/vmops.py:307
msgid "Power on instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:288
+#: nova/virt/hyperv/vmops.py:314
#, python-format
msgid "Successfully changed state of VM %(vm_name)s to: %(req_state)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:292
+#: nova/virt/hyperv/vmops.py:318
#, python-format
msgid "Failed to change vm state of %(vm_name)s to %(req_state)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:321
-#, python-format
-msgid "Use CoW image: %s"
-msgstr ""
-
-#: nova/virt/hyperv/vmops.py:326
-#, python-format
-msgid "Creating differencing VHD. Parent: %(parent_path)s, Target: %(target)s"
-msgstr ""
-
-#: nova/virt/hyperv/vmops.py:333
-#, python-format
-msgid "Failed to create a differencing disk from %(parent_path)s to %(target)s"
-msgstr ""
-
#: nova/virt/hyperv/vmutils.py:74
#, python-format
msgid "Cannot get VM summary data for: %s"
msgstr ""
-#: nova/virt/hyperv/vmutils.py:145
+#: nova/virt/hyperv/vmutils.py:151
#, python-format
msgid "Creating VM %s"
msgstr ""
-#: nova/virt/hyperv/vmutils.py:154
+#: nova/virt/hyperv/vmutils.py:160
#, python-format
msgid "Setting memory for vm %s"
msgstr ""
-#: nova/virt/hyperv/vmutils.py:157
+#: nova/virt/hyperv/vmutils.py:163
#, python-format
msgid "Set vCPUs for vm %s"
msgstr ""
-#: nova/virt/hyperv/vmutils.py:255
+#: nova/virt/hyperv/vmutils.py:261
msgid "Controller not found"
msgstr ""
-#: nova/virt/hyperv/vmutils.py:318
+#: nova/virt/hyperv/vmutils.py:324
#, python-format
msgid "Successfully changed vm state of %(vm_name)s to %(req_state)s"
msgstr ""
-#: nova/virt/hyperv/vmutils.py:360
-#, python-format
-msgid "Deleting disk file: %(disk)s"
-msgstr ""
-
-#: nova/virt/hyperv/vmutils.py:374
+#: nova/virt/hyperv/vmutils.py:365
#, python-format
msgid "Operation failed with return value: %s"
msgstr ""
-#: nova/virt/hyperv/vmutils.py:392
+#: nova/virt/hyperv/vmutils.py:383
#, python-format
msgid ""
"WMI job failed with status %(job_state)d. Error details: %(err_sum_desc)s"
" - %(err_desc)s - Error code: %(err_code)d"
msgstr ""
-#: nova/virt/hyperv/vmutils.py:400
+#: nova/virt/hyperv/vmutils.py:391
#, python-format
msgid "WMI job failed with status %(job_state)d. Error details: %(error)s"
msgstr ""
-#: nova/virt/hyperv/vmutils.py:404
+#: nova/virt/hyperv/vmutils.py:395
#, python-format
msgid "WMI job failed with status %(job_state)d. No error description available"
msgstr ""
-#: nova/virt/hyperv/vmutils.py:410
+#: nova/virt/hyperv/vmutils.py:401
#, python-format
msgid "WMI job succeeded: %(desc)s, Elapsed=%(elap)s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:74 nova/virt/xenapi/vm_utils.py:496
-#, python-format
-msgid "block device info: %s"
-msgstr ""
-
-#: nova/virt/hyperv/volumeops.py:96
-#, python-format
-msgid "Attach boot from volume failed: %s"
-msgstr ""
-
-#: nova/virt/hyperv/volumeops.py:99
-#, python-format
-msgid "Unable to attach boot volume to instance %s"
-msgstr ""
-
-#: nova/virt/hyperv/volumeops.py:107
+#: nova/virt/hyperv/volumeops.py:90
#, python-format
msgid "Attach_volume: %(connection_info)s to %(instance_name)s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:128
+#: nova/virt/hyperv/volumeops.py:120
#, python-format
msgid "Attach volume failed: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:130 nova/virt/xenapi/volumeops.py:113
+#: nova/virt/hyperv/volumeops.py:122
#, python-format
msgid "Unable to attach volume to instance %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:139
+#: nova/virt/hyperv/volumeops.py:136
#, python-format
msgid "Detach_volume: %(connection_info)s from %(instance_name)s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:148
+#: nova/virt/hyperv/volumeops.py:145
#, python-format
msgid "Detaching physical disk from instance: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:159 nova/virt/libvirt/driver.py:644
+#: nova/virt/hyperv/volumeops.py:156 nova/virt/libvirt/driver.py:645
msgid "Could not determine iscsi initiator name"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:170 nova/virt/hyperv/volumeops.py:184
+#: nova/virt/hyperv/volumeops.py:167 nova/virt/hyperv/volumeops.py:181
#, python-format
msgid "Unable to find a mounted disk for target_iqn: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:173
+#: nova/virt/hyperv/volumeops.py:170
#, python-format
msgid "Device number: %(device_number)s, target lun: %(target_lun)s"
msgstr ""
@@ -7088,183 +7211,183 @@ msgstr ""
msgid "Unable to determine disk bus for '%s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:351
+#: nova/virt/libvirt/driver.py:352
#, python-format
msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater."
msgstr ""
-#: nova/virt/libvirt/driver.py:357
+#: nova/virt/libvirt/driver.py:358
#, python-format
msgid "Connecting to libvirt: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:378
+#: nova/virt/libvirt/driver.py:379
msgid "Connection to libvirt broke"
msgstr ""
-#: nova/virt/libvirt/driver.py:400 nova/virt/libvirt/driver.py:403
+#: nova/virt/libvirt/driver.py:401 nova/virt/libvirt/driver.py:404
#, python-format
msgid "Can not handle authentication request for %d credentials"
msgstr ""
-#: nova/virt/libvirt/driver.py:421
+#: nova/virt/libvirt/driver.py:422
#, python-format
msgid "Connection to libvirt failed: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:509
+#: nova/virt/libvirt/driver.py:510
#, python-format
msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:523
+#: nova/virt/libvirt/driver.py:524
msgid "During wait destroy, instance disappeared."
msgstr ""
-#: nova/virt/libvirt/driver.py:528
+#: nova/virt/libvirt/driver.py:529
msgid "Instance destroyed successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:551
+#: nova/virt/libvirt/driver.py:552
msgid "Error from libvirt during undefineFlags. Retrying with undefine"
msgstr ""
-#: nova/virt/libvirt/driver.py:566
+#: nova/virt/libvirt/driver.py:567
#, python-format
msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:580
+#: nova/virt/libvirt/driver.py:581
#, python-format
msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:598
+#: nova/virt/libvirt/driver.py:599
#, python-format
msgid "Deleting instance files %(target)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:607
+#: nova/virt/libvirt/driver.py:608
#, python-format
msgid "Failed to cleanup directory %(target)s: %(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:759
+#: nova/virt/libvirt/driver.py:760
msgid "During detach_volume, instance disappeared."
msgstr ""
-#: nova/virt/libvirt/driver.py:852
+#: nova/virt/libvirt/driver.py:853
msgid "Beginning live snapshot process"
msgstr ""
-#: nova/virt/libvirt/driver.py:855
+#: nova/virt/libvirt/driver.py:856
msgid "Beginning cold snapshot process"
msgstr ""
-#: nova/virt/libvirt/driver.py:884
+#: nova/virt/libvirt/driver.py:885
msgid "Snapshot extracted, beginning image upload"
msgstr ""
-#: nova/virt/libvirt/driver.py:896
+#: nova/virt/libvirt/driver.py:897
msgid "Snapshot image upload complete"
msgstr ""
-#: nova/virt/libvirt/driver.py:969
+#: nova/virt/libvirt/driver.py:970
msgid "Instance soft rebooted successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:973
+#: nova/virt/libvirt/driver.py:974
msgid "Failed to soft reboot instance."
msgstr ""
-#: nova/virt/libvirt/driver.py:1008
+#: nova/virt/libvirt/driver.py:1009
msgid "Instance shutdown successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:1016
+#: nova/virt/libvirt/driver.py:1017
msgid "Instance may have been rebooted during soft reboot, so return now."
msgstr ""
-#: nova/virt/libvirt/driver.py:1058
+#: nova/virt/libvirt/driver.py:1059
msgid "Instance rebooted successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:1201
+#: nova/virt/libvirt/driver.py:1202
msgid "Instance is running"
msgstr ""
-#: nova/virt/libvirt/driver.py:1208 nova/virt/powervm/operator.py:272
+#: nova/virt/libvirt/driver.py:1209 nova/virt/powervm/operator.py:272
msgid "Instance spawned successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:1224
+#: nova/virt/libvirt/driver.py:1225
#, python-format
msgid "data: %(data)r, fpath: %(fpath)r"
msgstr ""
-#: nova/virt/libvirt/driver.py:1261 nova/virt/libvirt/driver.py:1287
+#: nova/virt/libvirt/driver.py:1262 nova/virt/libvirt/driver.py:1288
#, python-format
msgid "Truncated console log returned, %d bytes ignored"
msgstr ""
-#: nova/virt/libvirt/driver.py:1276
+#: nova/virt/libvirt/driver.py:1277
msgid "Guest does not have a console available"
msgstr ""
-#: nova/virt/libvirt/driver.py:1345
+#: nova/virt/libvirt/driver.py:1346
#, python-format
msgid "Path '%(path)s' supports direct I/O"
msgstr ""
-#: nova/virt/libvirt/driver.py:1349
+#: nova/virt/libvirt/driver.py:1350
#, python-format
msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:1353 nova/virt/libvirt/driver.py:1357
+#: nova/virt/libvirt/driver.py:1354 nova/virt/libvirt/driver.py:1358
#, python-format
msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:1423
+#: nova/virt/libvirt/driver.py:1424
msgid "Creating image"
msgstr ""
-#: nova/virt/libvirt/driver.py:1520
+#: nova/virt/libvirt/driver.py:1521
msgid "Using config drive"
msgstr ""
-#: nova/virt/libvirt/driver.py:1568
+#: nova/virt/libvirt/driver.py:1569
#, python-format
msgid "Injecting %(inj)s into image %(img_id)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1577
+#: nova/virt/libvirt/driver.py:1578
#, python-format
msgid "Error injecting data into image %(img_id)s (%(e)s)"
msgstr ""
-#: nova/virt/libvirt/driver.py:1635
+#: nova/virt/libvirt/driver.py:1636
#, python-format
msgid ""
"Config requested an explicit CPU model, but the current libvirt "
"hypervisor '%s' does not support selecting CPU models"
msgstr ""
-#: nova/virt/libvirt/driver.py:1641
+#: nova/virt/libvirt/driver.py:1642
msgid "Config requested a custom CPU model, but no model name was provided"
msgstr ""
-#: nova/virt/libvirt/driver.py:1645
+#: nova/virt/libvirt/driver.py:1646
msgid "A CPU model name should not be set when a host CPU model is requested"
msgstr ""
-#: nova/virt/libvirt/driver.py:1649
+#: nova/virt/libvirt/driver.py:1650
#, python-format
msgid "CPU mode '%(mode)s' model '%(model)s' was chosen"
msgstr ""
-#: nova/virt/libvirt/driver.py:1665
+#: nova/virt/libvirt/driver.py:1666
msgid ""
"Passthrough of the host CPU was requested but this libvirt version does "
"not support this feature"
@@ -7296,62 +7419,62 @@ msgid ""
"for this platform. "
msgstr ""
-#: nova/virt/libvirt/driver.py:2220
+#: nova/virt/libvirt/driver.py:2217
#, python-format
msgid "List of domains returned by libVirt: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2222
+#: nova/virt/libvirt/driver.py:2219
#, python-format
msgid "libVirt can't find a domain with id: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2304
+#: nova/virt/libvirt/driver.py:2301
msgid "libvirt version is too old (does not support getVersion)"
msgstr ""
-#: nova/virt/libvirt/driver.py:2387
+#: nova/virt/libvirt/driver.py:2384
#, python-format
msgid "Trying to get stats for the volume %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2411
+#: nova/virt/libvirt/driver.py:2408
#, python-format
msgid ""
"Getting block stats failed, device might have been detached. "
"Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2415
+#: nova/virt/libvirt/driver.py:2412
#, python-format
msgid ""
"Could not find domain in libvirt for instance %s. Cannot get block stats "
"for device"
msgstr ""
-#: nova/virt/libvirt/driver.py:2531
+#: nova/virt/libvirt/driver.py:2528
msgid "Block migration can not be used with shared storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:2539
+#: nova/virt/libvirt/driver.py:2536
msgid "Live migration can not be used without shared storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:2576
+#: nova/virt/libvirt/driver.py:2573
#, python-format
msgid ""
"Unable to migrate %(instance_uuid)s: Disk of instance is too "
"large(available on destination host:%(available)s < need:%(necessary)s)"
msgstr ""
-#: nova/virt/libvirt/driver.py:2601
+#: nova/virt/libvirt/driver.py:2598
#, python-format
msgid ""
"Instance launched has CPU info:\n"
"%s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2613
+#: nova/virt/libvirt/driver.py:2610
#, python-format
msgid ""
"CPU doesn't have compatibility.\n"
@@ -7361,62 +7484,62 @@ msgid ""
"Refer to %(u)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2630
+#: nova/virt/libvirt/driver.py:2627
#, python-format
msgid ""
"Creating tmpfile %s to notify to other compute nodes that they should "
"mount the same storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:2678
+#: nova/virt/libvirt/driver.py:2675
#, python-format
msgid "The firewall filter for %s does not exist"
msgstr ""
-#: nova/virt/libvirt/driver.py:2750
+#: nova/virt/libvirt/driver.py:2747
#, python-format
msgid "Live Migration failure: %(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2843
+#: nova/virt/libvirt/driver.py:2840
#, python-format
msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s."
msgstr ""
-#: nova/virt/libvirt/driver.py:2953
+#: nova/virt/libvirt/driver.py:2950
#, python-format
msgid ""
"Error from libvirt while getting description of %(instance_name)s: [Error"
" Code %(error_code)s] %(ex)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2970
+#: nova/virt/libvirt/driver.py:2967
#, python-format
msgid "skipping %(path)s since it looks like volume"
msgstr ""
-#: nova/virt/libvirt/driver.py:3019
+#: nova/virt/libvirt/driver.py:3016
#, python-format
msgid "Getting disk size of %(i_name)s: %(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3067
+#: nova/virt/libvirt/driver.py:3064
msgid "Starting migrate_disk_and_power_off"
msgstr ""
-#: nova/virt/libvirt/driver.py:3126
+#: nova/virt/libvirt/driver.py:3123
msgid "Instance running successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:3132
+#: nova/virt/libvirt/driver.py:3129
msgid "Starting finish_migration"
msgstr ""
-#: nova/virt/libvirt/driver.py:3188
+#: nova/virt/libvirt/driver.py:3185
msgid "Starting finish_revert_migration"
msgstr ""
-#: nova/virt/libvirt/driver.py:3301
+#: nova/virt/libvirt/driver.py:3298
#, python-format
msgid "Checking instance files accessability%(instance_path)s"
msgstr ""
@@ -7661,28 +7784,43 @@ msgstr ""
msgid "Failed while unplugging vif"
msgstr ""
-#: nova/virt/libvirt/volume.py:221
+#: nova/virt/libvirt/volume.py:224
#, python-format
msgid "iSCSI device not found at %s"
msgstr ""
-#: nova/virt/libvirt/volume.py:224
+#: nova/virt/libvirt/volume.py:227
#, python-format
msgid ""
"ISCSI volume not yet found at: %(disk_dev)s. Will rescan & retry. Try "
"number: %(tries)s"
msgstr ""
-#: nova/virt/libvirt/volume.py:236
+#: nova/virt/libvirt/volume.py:239
#, python-format
msgid "Found iSCSI node %(disk_dev)s (after %(tries)s rescans)"
msgstr ""
-#: nova/virt/libvirt/volume.py:309
+#: nova/virt/libvirt/volume.py:312
#, python-format
msgid "%s is already mounted"
msgstr ""
+#: nova/virt/libvirt/volume.py:368
+#, python-format
+msgid "AoE device not found at %s"
+msgstr ""
+
+#: nova/virt/libvirt/volume.py:370
+#, python-format
+msgid "AoE volume not yet found at: %(aoedevpath)s. Try number: %(tries)s"
+msgstr ""
+
+#: nova/virt/libvirt/volume.py:384
+#, python-format
+msgid "Found AoE device %(aoedevpath)s (after %(tries)s rediscover)"
+msgstr ""
+
#: nova/virt/libvirt/volume_nfs.py:36
msgid ""
"The nova.virt.libvirt.volume_nfs.NfsVolumeDriver class is deprecated and "
@@ -8074,57 +8212,57 @@ msgstr ""
msgid "Got total of %s instances"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:204
+#: nova/virt/vmwareapi/vmops.py:205
msgid "Creating VM on the ESX host"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:212
+#: nova/virt/vmwareapi/vmops.py:213
msgid "Created VM on the ESX host"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:236
+#: nova/virt/vmwareapi/vmops.py:237
#, python-format
msgid ""
"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter "
"type %(adapter_type)s on the ESX host local store %(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:255
+#: nova/virt/vmwareapi/vmops.py:256
#, python-format
msgid ""
"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB and type "
"%(disk_type)s on the ESX host local store %(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:264
+#: nova/virt/vmwareapi/vmops.py:265
#, python-format
msgid ""
"Deleting the file %(vmdk_path)s on the ESX host localstore "
"%(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:278
+#: nova/virt/vmwareapi/vmops.py:279
#, python-format
msgid ""
"Deleted the file %(vmdk_path)s on the ESX host local store "
"%(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:286
+#: nova/virt/vmwareapi/vmops.py:287
#, python-format
msgid ""
"Downloading image file data %(image_ref)s to the ESX data store "
"%(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:306
+#: nova/virt/vmwareapi/vmops.py:307
#, python-format
msgid ""
"Downloaded image file data %(image_ref)s to %(upload_vmdk_name)s on the "
"ESX data store %(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:320
+#: nova/virt/vmwareapi/vmops.py:321
#, python-format
msgid ""
"Copying Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter type"
@@ -8132,283 +8270,283 @@ msgid ""
" type %(disk_type)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:341
+#: nova/virt/vmwareapi/vmops.py:342
#, python-format
msgid ""
"Copied Virtual Disk of size %(vmdk_file_size_in_kb)s KB and type "
"%(disk_type)s on the ESX host local store %(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:420
+#: nova/virt/vmwareapi/vmops.py:421
msgid "Powering on the VM instance"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:426
+#: nova/virt/vmwareapi/vmops.py:427
msgid "Powered on the VM instance"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:472
+#: nova/virt/vmwareapi/vmops.py:473
msgid "Creating Snapshot of the VM instance"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:482
+#: nova/virt/vmwareapi/vmops.py:483
msgid "Created Snapshot of the VM instance"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:527
+#: nova/virt/vmwareapi/vmops.py:528
msgid "Copying disk data before snapshot of the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:540
+#: nova/virt/vmwareapi/vmops.py:541
msgid "Copied disk data before snapshot of the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:549
+#: nova/virt/vmwareapi/vmops.py:550
#, python-format
msgid "Uploading image %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:563
+#: nova/virt/vmwareapi/vmops.py:564
#, python-format
msgid "Uploaded image %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:576
+#: nova/virt/vmwareapi/vmops.py:577
#, python-format
msgid "Deleting temporary vmdk file %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:585
+#: nova/virt/vmwareapi/vmops.py:586
#, python-format
msgid "Deleted temporary vmdk file %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:617
+#: nova/virt/vmwareapi/vmops.py:618
msgid "instance is not powered on"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:624
+#: nova/virt/vmwareapi/vmops.py:625
msgid "Rebooting guest OS of VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:627
+#: nova/virt/vmwareapi/vmops.py:628
msgid "Rebooted guest OS of VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:629
+#: nova/virt/vmwareapi/vmops.py:630
msgid "Doing hard reboot of VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:633
+#: nova/virt/vmwareapi/vmops.py:634
msgid "Did hard reboot of VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:645 nova/virt/vmwareapi/vmops.py:677
-#: nova/virt/vmwareapi/vmops.py:970
+#: nova/virt/vmwareapi/vmops.py:646 nova/virt/vmwareapi/vmops.py:678
+#: nova/virt/vmwareapi/vmops.py:971
msgid "instance not present"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:651 nova/virt/vmwareapi/vmops.py:974
+#: nova/virt/vmwareapi/vmops.py:652 nova/virt/vmwareapi/vmops.py:975
msgid "Destroying the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:656 nova/virt/vmwareapi/vmops.py:979
+#: nova/virt/vmwareapi/vmops.py:657 nova/virt/vmwareapi/vmops.py:980
msgid "Destroyed the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:658
+#: nova/virt/vmwareapi/vmops.py:659
#, python-format
msgid "In vmwareapi:vmops:delete, got this exception while destroying the VM: %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:696 nova/virt/vmwareapi/vmops.py:849
+#: nova/virt/vmwareapi/vmops.py:697 nova/virt/vmwareapi/vmops.py:850
msgid "Powering off the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:701 nova/virt/vmwareapi/vmops.py:854
+#: nova/virt/vmwareapi/vmops.py:702 nova/virt/vmwareapi/vmops.py:855
msgid "Powered off the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:705
+#: nova/virt/vmwareapi/vmops.py:706
msgid "Unregistering the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:708
+#: nova/virt/vmwareapi/vmops.py:709
msgid "Unregistered the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:710
+#: nova/virt/vmwareapi/vmops.py:711
#, python-format
msgid ""
"In vmwareapi:vmops:destroy, got this exception while un-registering the "
"VM: %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:723
+#: nova/virt/vmwareapi/vmops.py:724
#, python-format
msgid "Deleting contents of the VM from datastore %(datastore_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:735
+#: nova/virt/vmwareapi/vmops.py:736
#, python-format
msgid "Deleted contents of the VM from datastore %(datastore_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:740
+#: nova/virt/vmwareapi/vmops.py:741
#, python-format
msgid ""
"In vmwareapi:vmops:destroy, got this exception while deleting the VM "
"contents from the disk: %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:748
+#: nova/virt/vmwareapi/vmops.py:749
msgid "pause not supported for vmwareapi"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:752
+#: nova/virt/vmwareapi/vmops.py:753
msgid "unpause not supported for vmwareapi"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:766
+#: nova/virt/vmwareapi/vmops.py:767
msgid "Suspending the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:770
+#: nova/virt/vmwareapi/vmops.py:771
msgid "Suspended the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:773
+#: nova/virt/vmwareapi/vmops.py:774
msgid "instance is powered off and cannot be suspended."
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:776
+#: nova/virt/vmwareapi/vmops.py:777
msgid "VM was already in suspended state. So returning without doing anything"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:789
+#: nova/virt/vmwareapi/vmops.py:790
msgid "Resuming the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:794
+#: nova/virt/vmwareapi/vmops.py:795
msgid "Resumed the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:796
+#: nova/virt/vmwareapi/vmops.py:797
msgid "instance is not in a suspended state"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:857
+#: nova/virt/vmwareapi/vmops.py:858
msgid "instance is suspended and cannot be powered off."
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:860
+#: nova/virt/vmwareapi/vmops.py:861
msgid "VM was already in powered off state. So returning without doing anything"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:873
+#: nova/virt/vmwareapi/vmops.py:874
msgid "VM was already in powered on state. So returning without doing anything"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:877
+#: nova/virt/vmwareapi/vmops.py:878
msgid "Powering on the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:882
+#: nova/virt/vmwareapi/vmops.py:883
msgid "Powered on the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:899
+#: nova/virt/vmwareapi/vmops.py:900
#, python-format
msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:930
+#: nova/virt/vmwareapi/vmops.py:931
#, python-format
msgid "Renaming the VM to %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:936
+#: nova/virt/vmwareapi/vmops.py:937
#, python-format
msgid "Renamed the VM to %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:951
+#: nova/virt/vmwareapi/vmops.py:952
#, python-format
msgid "Cloning VM to host %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:959
+#: nova/virt/vmwareapi/vmops.py:960
#, python-format
msgid "Cloned VM to host %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:981
+#: nova/virt/vmwareapi/vmops.py:982
#, python-format
msgid ""
"In vmwareapi:vmops:confirm_migration, got this exception while destroying"
" the VM: %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:996
+#: nova/virt/vmwareapi/vmops.py:997
#, python-format
msgid "Renaming the VM from %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1002
+#: nova/virt/vmwareapi/vmops.py:1003
#, python-format
msgid "Renamed the VM from %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1025
+#: nova/virt/vmwareapi/vmops.py:1026
#, python-format
msgid "Migrating VM to host %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1037
+#: nova/virt/vmwareapi/vmops.py:1038
#, python-format
msgid "Migrated VM to host %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1047 nova/virt/xenapi/vmops.py:1263
+#: nova/virt/vmwareapi/vmops.py:1048 nova/virt/xenapi/vmops.py:1263
#, python-format
msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1051
+#: nova/virt/vmwareapi/vmops.py:1052
#, python-format
msgid "Automatically hard rebooting %d"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1087
+#: nova/virt/vmwareapi/vmops.py:1088
msgid "get_diagnostics not implemented for vmwareapi"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1163
+#: nova/virt/vmwareapi/vmops.py:1164
#, python-format
msgid "Reconfiguring VM instance to set the machine id with ip - %(ip_addr)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1170
+#: nova/virt/vmwareapi/vmops.py:1171
#, python-format
msgid "Reconfigured VM instance to set the machine id with ip - %(ip_addr)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1185
+#: nova/virt/vmwareapi/vmops.py:1186
#, python-format
msgid "Reconfiguring VM instance to enable vnc on port - %(port)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1192
+#: nova/virt/vmwareapi/vmops.py:1193
#, python-format
msgid "Reconfigured VM instance to enable vnc on port - %(port)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1285
+#: nova/virt/vmwareapi/vmops.py:1286
#, python-format
msgid "Creating directory with path %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1291
+#: nova/virt/vmwareapi/vmops.py:1292
#, python-format
msgid "Created directory with path %s"
msgstr ""
@@ -8453,7 +8591,7 @@ msgstr ""
msgid "Rescanned HBA %s "
msgstr ""
-#: nova/virt/vmwareapi/volume_util.py:178 nova/virt/xenapi/volume_utils.py:348
+#: nova/virt/vmwareapi/volume_util.py:178 nova/virt/xenapi/volume_utils.py:267
#, python-format
msgid "Mountpoint cannot be translated: %s"
msgstr ""
@@ -8501,7 +8639,7 @@ msgstr ""
msgid "Unable to discovered iSCSI target %(target_iqn)s from %(target_portal)s."
msgstr ""
-#: nova/virt/vmwareapi/volumeops.py:130 nova/virt/xenapi/volumeops.py:45
+#: nova/virt/vmwareapi/volumeops.py:130 nova/virt/xenapi/volumeops.py:46
#, python-format
msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s"
msgstr ""
@@ -8510,12 +8648,12 @@ msgstr ""
msgid "Unable to find iSCSI Target"
msgstr ""
-#: nova/virt/vmwareapi/volumeops.py:159 nova/virt/xenapi/volumeops.py:58
+#: nova/virt/vmwareapi/volumeops.py:159 nova/virt/xenapi/volumeops.py:59
#, python-format
msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s"
msgstr ""
-#: nova/virt/vmwareapi/volumeops.py:169 nova/virt/xenapi/volumeops.py:118
+#: nova/virt/vmwareapi/volumeops.py:169 nova/virt/xenapi/volumeops.py:102
#, python-format
msgid "Detach_volume: %(instance_name)s, %(mountpoint)s"
msgstr ""
@@ -8524,7 +8662,7 @@ msgstr ""
msgid "Unable to find volume"
msgstr ""
-#: nova/virt/vmwareapi/volumeops.py:190 nova/virt/xenapi/volumeops.py:131
+#: nova/virt/vmwareapi/volumeops.py:190 nova/virt/xenapi/volumeops.py:122
#, python-format
msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s"
msgstr ""
@@ -8876,6 +9014,11 @@ msgstr ""
msgid "SR not present and could not be introduced"
msgstr ""
+#: nova/virt/xenapi/vm_utils.py:496
+#, python-format
+msgid "block device info: %s"
+msgstr ""
+
#: nova/virt/xenapi/vm_utils.py:582
#, python-format
msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s"
@@ -9416,149 +9559,75 @@ msgstr ""
msgid "Migrate Send failed"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:42
-msgid "creating sr within volume_utils"
-msgstr ""
-
-#: nova/virt/xenapi/volume_utils.py:45 nova/virt/xenapi/volume_utils.py:74
+#: nova/virt/xenapi/volume_utils.py:51
#, python-format
-msgid "type is = %s"
-msgstr ""
-
-#: nova/virt/xenapi/volume_utils.py:48 nova/virt/xenapi/volume_utils.py:77
-#, python-format
-msgid "name = %s"
+msgid "Creating SR %(label)s"
msgstr ""
#: nova/virt/xenapi/volume_utils.py:61
#, python-format
-msgid "Created %(label)s as %(sr_ref)s."
+msgid "Introducing SR %(label)s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:66 nova/virt/xenapi/volume_utils.py:165
-msgid "Unable to create Storage Repository"
+#: nova/virt/xenapi/volume_utils.py:68
+msgid "Creating PBD for SR"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:70
-msgid "introducing sr within volume_utils"
-msgstr ""
-
-#: nova/virt/xenapi/volume_utils.py:94 nova/virt/xenapi/volume_utils.py:161
-#: nova/virt/xenapi/volumeops.py:72
-#, python-format
-msgid "Introduced %(label)s as %(sr_ref)s."
-msgstr ""
-
-#: nova/virt/xenapi/volume_utils.py:97
-msgid "Creating pbd for SR"
-msgstr ""
-
-#: nova/virt/xenapi/volume_utils.py:99
+#: nova/virt/xenapi/volume_utils.py:71
msgid "Plugging SR"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:107 nova/virt/xenapi/volumeops.py:76
-msgid "Unable to introduce Storage Repository"
+#: nova/virt/xenapi/volume_utils.py:82
+msgid "Forgetting SR..."
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:118
-msgid "Unable to get SR using uuid"
-msgstr ""
-
-#: nova/virt/xenapi/volume_utils.py:120
-#, python-format
-msgid "Forgetting SR %s..."
-msgstr ""
-
-#: nova/virt/xenapi/volume_utils.py:128
-msgid "Unable to forget Storage Repository"
-msgstr ""
-
-#: nova/virt/xenapi/volume_utils.py:148
-#, python-format
-msgid "Introducing %s..."
-msgstr ""
-
-#: nova/virt/xenapi/volume_utils.py:177
+#: nova/virt/xenapi/volume_utils.py:104
#, python-format
msgid "Unable to find SR from VBD %s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:195
+#: nova/virt/xenapi/volume_utils.py:121
#, python-format
msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:201
+#: nova/virt/xenapi/volume_utils.py:129
#, python-format
msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:225
+#: nova/virt/xenapi/volume_utils.py:153
#, python-format
msgid "Unable to introduce VDI on SR %s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:233
+#: nova/virt/xenapi/volume_utils.py:161
#, python-format
msgid "Unable to get record of VDI %s on"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:255
+#: nova/virt/xenapi/volume_utils.py:183
#, python-format
msgid "Unable to introduce VDI for SR %s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:265
-#, python-format
-msgid "Error finding vdis in SR %s"
-msgstr ""
-
-#: nova/virt/xenapi/volume_utils.py:272
-#, python-format
-msgid "Unable to find vbd for vdi %s"
+#: nova/virt/xenapi/volume_utils.py:193
+msgid "Cannot purge SR with referenced VDIs"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:283
+#: nova/virt/xenapi/volume_utils.py:202
#, python-format
msgid "Unable to obtain target information %(mountpoint)s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:322
+#: nova/virt/xenapi/volume_utils.py:241
#, python-format
msgid "Unable to obtain target information %(connection_data)s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:388
-#, python-format
-msgid "Introducing SR %s"
-msgstr ""
-
-#: nova/virt/xenapi/volume_utils.py:391
-msgid "SR found in xapi database. No need to introduce"
-msgstr ""
-
-#: nova/virt/xenapi/volume_utils.py:396
-msgid "Could not introduce SR"
-msgstr ""
-
-#: nova/virt/xenapi/volume_utils.py:403
-#, python-format
-msgid "SR %s not found in the xapi database"
-msgstr ""
-
-#: nova/virt/xenapi/volume_utils.py:409
-msgid "Could not forget SR"
-msgstr ""
-
-#: nova/virt/xenapi/volumeops.py:94
-#, python-format
-msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s"
-msgstr ""
-
-#: nova/virt/xenapi/volumeops.py:104
+#: nova/virt/xenapi/volumeops.py:114
#, python-format
-msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s"
+msgid "Skipping detach because VBD for %(instance_name)s was not found"
msgstr ""
#: nova/virt/xenapi/imageupload/glance.py:34
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index 08a2ae354..eb3560179 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -23,6 +23,7 @@ import calendar
import inspect
import netaddr
import os
+import re
from nova import db
from nova import exception
@@ -61,9 +62,14 @@ linux_net_opts = [
cfg.IntOpt('dhcp_lease_time',
default=120,
help='Lifetime of a DHCP lease in seconds'),
- cfg.StrOpt('dns_server',
- default=None,
- help='if set, uses specific dns server for dnsmasq'),
+ cfg.MultiStrOpt('dns_server',
+ default=[],
+ help='if set, uses specific dns server for dnsmasq. Can'
+ 'be specified multiple times.'),
+ cfg.BoolOpt('use_network_dns_servers',
+ default=False,
+ help='if set, uses the dns1 and dns2 from the network ref.'
+ 'as dns servers.'),
cfg.ListOpt('dmz_cidr',
default=[],
help='A list of dmz range that should be accepted'),
@@ -97,6 +103,14 @@ linux_net_opts = [
cfg.IntOpt('metadata_port',
default=8775,
help='the port for the metadata api port'),
+ cfg.StrOpt('iptables_top_regex',
+ default='',
+ help='Regular expression to match iptables rule that should'
+ 'always be on the top.'),
+ cfg.StrOpt('iptables_bottom_regex',
+ default='',
+ help='Regular expression to match iptables rule that should'
+ 'always be on the bottom.'),
]
CONF = cfg.CONF
@@ -420,6 +434,25 @@ class IptablesManager(object):
new_filter = filter(lambda line: binary_name not in line,
current_lines)
+ top_rules = []
+ bottom_rules = []
+
+ if CONF.iptables_top_regex:
+ regex = re.compile(CONF.iptables_top_regex)
+ temp_filter = filter(lambda line: regex.search(line), new_filter)
+ for rule_str in temp_filter:
+ new_filter = filter(lambda s: s.strip() != rule_str.strip(),
+ new_filter)
+ top_rules = temp_filter
+
+ if CONF.iptables_bottom_regex:
+ regex = re.compile(CONF.iptables_bottom_regex)
+ temp_filter = filter(lambda line: regex.search(line), new_filter)
+ for rule_str in temp_filter:
+ new_filter = filter(lambda s: s.strip() != rule_str.strip(),
+ new_filter)
+ bottom_rules = temp_filter
+
seen_chains = False
rules_index = 0
for rules_index, rule in enumerate(new_filter):
@@ -433,7 +466,7 @@ class IptablesManager(object):
if not seen_chains:
rules_index = 2
- our_rules = []
+ our_rules = top_rules
bot_rules = []
for rule in rules:
rule_str = str(rule)
@@ -479,6 +512,8 @@ class IptablesManager(object):
(binary_name, name,)
for name in chains]
+ commit_index = new_filter.index('COMMIT')
+ new_filter[commit_index:commit_index] = bottom_rules
seen_lines = set()
def _weed_out_duplicates(line):
@@ -961,11 +996,21 @@ def restart_dhcp(context, dev, network_ref):
'--dhcp-hostsfile=%s' % _dhcp_file(dev, 'conf'),
'--dhcp-script=%s' % CONF.dhcpbridge,
'--leasefile-ro']
- if network_ref['multi_host'] and not CONF.dns_server:
- cmd += ['--no-hosts', '--addn-hosts=%s' % _dhcp_file(dev, 'hosts')]
- if CONF.dns_server:
- cmd += ['-h', '-R', '--server=%s' % CONF.dns_server]
+ dns_servers = set(CONF.dns_server)
+ if CONF.use_network_dns_servers:
+ if network_ref.get('dns1'):
+ dns_servers.add(network_ref.get('dns1'))
+ if network_ref.get('dns2'):
+ dns_servers.add(network_ref.get('dns2'))
+ if network_ref['multi_host'] or dns_servers:
+ cmd.append('--no-hosts')
+ if network_ref['multi_host']:
+ '--addn-hosts=%s' % _dhcp_file(dev, 'hosts')
+ if dns_servers:
+ cmd.append('--no-resolv')
+ for dns_server in dns_servers:
+ cmd.append('--server=%s' % dns_server)
if CONF.use_single_default_gateway:
cmd += ['--dhcp-optsfile=%s' % _dhcp_file(dev, 'opts')]
diff --git a/nova/network/minidns.py b/nova/network/minidns.py
index c565f368e..dc9dc6f17 100644
--- a/nova/network/minidns.py
+++ b/nova/network/minidns.py
@@ -168,7 +168,6 @@ class MiniDNS(dns_driver.DNSDriver):
return entries
def delete_dns_file(self):
- LOG.warn(_("This shouldn't be getting called except during testing."))
if os.path.exists(self.filename):
try:
os.remove(self.filename)
diff --git a/nova/network/quantumv2/api.py b/nova/network/quantumv2/api.py
index 420396ed4..ee4ceb9cd 100644
--- a/nova/network/quantumv2/api.py
+++ b/nova/network/quantumv2/api.py
@@ -260,19 +260,20 @@ class API(base.Base):
data = quantumv2.get_client(context).list_ports(**search_opts)
ports = data['ports']
for p in ports:
- fixed_ips = p['fixed_ips']
for subnet in ipam_subnets:
- fixed_ip = {'subnet_id': subnet['id']}
- fixed_ips.append(fixed_ip)
- port_req_body = {'port': {'fixed_ips': fixed_ips}}
- try:
- quantumv2.get_client(context).update_port(p['id'],
- port_req_body)
- except Exception as ex:
- msg = _("Unable to update port %(portid)s with"
- " failure: %(exception)s")
- LOG.debug(msg, {'portid': p['id'], 'exception': ex})
- return
+ fixed_ips = [{'subnet_id': subnet['id']}]
+ port_req_body = {'port': {'fixed_ips': fixed_ips}}
+ try:
+ quantumv2.get_client(context).update_port(p['id'],
+ port_req_body)
+ return
+ except Exception as ex:
+ msg = _("Unable to update port %(portid)s on subnet "
+ "%(subnet_id)s with failure: %(exception)s")
+ LOG.debug(msg, {'portid': p['id'],
+ 'subnet_id': subnet['id'],
+ 'exception': ex})
+
raise exception.NetworkNotFoundForInstance(
instance_id=instance['uuid'])
@@ -669,6 +670,8 @@ class API(base.Base):
bridge = None
ovs_interfaceid = None
+ # Network model metadata
+ should_create_bridge = None
vif_type = port.get('binding:vif_type')
# TODO(berrange) Quantum should pass the bridge name
# in another binding metadata field
@@ -677,6 +680,7 @@ class API(base.Base):
ovs_interfaceid = port['id']
elif vif_type == network_model.VIF_TYPE_BRIDGE:
bridge = "brq" + port['network_id']
+ should_create_bridge = True
if bridge is not None:
bridge = bridge[:network_model.NIC_NAME_LEN]
@@ -692,6 +696,8 @@ class API(base.Base):
tenant_id=net['tenant_id']
)
network['subnets'] = subnets
+ if should_create_bridge is not None:
+ network['should_create_bridge'] = should_create_bridge
nw_info.append(network_model.VIF(
id=port['id'],
address=port['mac_address'],
diff --git a/nova/openstack/__init__.py b/nova/openstack/__init__.py
index 0a3b98867..e69de29bb 100644
--- a/nova/openstack/__init__.py
+++ b/nova/openstack/__init__.py
@@ -1,15 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
diff --git a/nova/openstack/common/__init__.py b/nova/openstack/common/__init__.py
index 0a3b98867..e69de29bb 100644
--- a/nova/openstack/common/__init__.py
+++ b/nova/openstack/common/__init__.py
@@ -1,15 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
diff --git a/nova/openstack/common/cfg.py b/nova/openstack/common/cfg.py
index 534a610c0..baecc0c3b 100644
--- a/nova/openstack/common/cfg.py
+++ b/nova/openstack/common/cfg.py
@@ -863,7 +863,7 @@ class SubCommandOpt(Opt):
description=self.description,
help=self.help)
- if not self.handler is None:
+ if self.handler is not None:
self.handler(subparsers)
@@ -1297,6 +1297,24 @@ class ConfigOpts(collections.Mapping):
__import__(module_str)
self._get_opt_info(name, group)
+ def import_group(self, group, module_str):
+ """Import an option group from a module.
+
+ Import a module and check that a given option group is registered.
+
+ This is intended for use with global configuration objects
+ like cfg.CONF where modules commonly register options with
+ CONF at module load time. If one module requires an option group
+ defined by another module it can use this method to explicitly
+ declare the dependency.
+
+ :param group: an option OptGroup object or group name
+ :param module_str: the name of a module to import
+ :raises: ImportError, NoSuchGroupError
+ """
+ __import__(module_str)
+ self._get_group(group)
+
@__clear_cache
def set_override(self, name, override, group=None):
"""Override an opt value.
@@ -1547,8 +1565,8 @@ class ConfigOpts(collections.Mapping):
group = group_or_name if isinstance(group_or_name, OptGroup) else None
group_name = group.name if group else group_or_name
- if not group_name in self._groups:
- if not group is None or not autocreate:
+ if group_name not in self._groups:
+ if group is not None or not autocreate:
raise NoSuchGroupError(group_name)
self.register_group(OptGroup(name=group_name))
@@ -1568,7 +1586,7 @@ class ConfigOpts(collections.Mapping):
group = self._get_group(group)
opts = group._opts
- if not opt_name in opts:
+ if opt_name not in opts:
raise NoSuchOptError(opt_name, group)
return opts[opt_name]
@@ -1606,7 +1624,7 @@ class ConfigOpts(collections.Mapping):
opt = info['opt']
if opt.required:
- if ('default' in info or 'override' in info):
+ if 'default' in info or 'override' in info:
continue
if self._get(opt.dest, group) is None:
@@ -1625,7 +1643,7 @@ class ConfigOpts(collections.Mapping):
"""
self._args = args
- for opt, group in self._all_cli_opts():
+ for opt, group in sorted(self._all_cli_opts()):
opt._add_to_cli(self._oparser, group)
return vars(self._oparser.parse_args(args))
diff --git a/nova/openstack/common/cliutils.py b/nova/openstack/common/cliutils.py
index 8f4dc44dd..411bd58f3 100644
--- a/nova/openstack/common/cliutils.py
+++ b/nova/openstack/common/cliutils.py
@@ -15,7 +15,6 @@
# under the License.
import inspect
-import string
class MissingArgs(Exception):
@@ -25,12 +24,10 @@ class MissingArgs(Exception):
def __str__(self):
if len(self.missing) == 1:
- return ("An argument is missing: %(missing)s" %
- dict(missing=self.missing[0]))
+ return "An argument is missing"
else:
- return ("%(num)d arguments are missing: %(missing)s" %
- dict(num=len(self.missing),
- missing=string.join(self.missing, ', ')))
+ return ("%(num)d arguments are missing" %
+ dict(num=len(self.missing)))
def validate_args(fn, *args, **kwargs):
@@ -39,11 +36,11 @@ def validate_args(fn, *args, **kwargs):
>>> validate_args(lambda a: None)
Traceback (most recent call last):
...
- MissingArgs: An argument is missing: a
+ MissingArgs: An argument is missing
>>> validate_args(lambda a, b, c, d: None, 0, c=1)
Traceback (most recent call last):
...
- MissingArgs: 2 arguments are missing: b, d
+ MissingArgs: 2 arguments are missing
:param fn: the function to check
:param arg: the positional arguments supplied
diff --git a/nova/openstack/common/excutils.py b/nova/openstack/common/excutils.py
index 5dd483017..e507efced 100644
--- a/nova/openstack/common/excutils.py
+++ b/nova/openstack/common/excutils.py
@@ -24,6 +24,8 @@ import logging
import sys
import traceback
+from nova.openstack.common.gettextutils import _
+
@contextlib.contextmanager
def save_and_reraise_exception():
@@ -43,7 +45,7 @@ def save_and_reraise_exception():
try:
yield
except Exception:
- logging.error('Original exception being dropped: %s' %
- (traceback.format_exception(type_, value, tb)))
+ logging.error(_('Original exception being dropped: %s'),
+ traceback.format_exception(type_, value, tb))
raise
raise type_, value, tb
diff --git a/nova/openstack/common/jsonutils.py b/nova/openstack/common/jsonutils.py
index 290435450..b06055117 100644
--- a/nova/openstack/common/jsonutils.py
+++ b/nova/openstack/common/jsonutils.py
@@ -38,13 +38,17 @@ import functools
import inspect
import itertools
import json
+import logging
import xmlrpclib
+from nova.openstack.common.gettextutils import _
from nova.openstack.common import timeutils
+LOG = logging.getLogger(__name__)
+
def to_primitive(value, convert_instances=False, convert_datetime=True,
- level=0):
+ level=0, max_depth=3):
"""Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
@@ -80,7 +84,9 @@ def to_primitive(value, convert_instances=False, convert_datetime=True,
if getattr(value, '__module__', None) == 'mox':
return 'mock'
- if level > 3:
+ if level > max_depth:
+ LOG.error(_('Max serialization depth exceeded on object: %d %s'),
+ level, value)
return '?'
# The try block may not be necessary after the class check above,
@@ -89,7 +95,8 @@ def to_primitive(value, convert_instances=False, convert_datetime=True,
recursive = functools.partial(to_primitive,
convert_instances=convert_instances,
convert_datetime=convert_datetime,
- level=level)
+ level=level,
+ max_depth=max_depth)
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
diff --git a/nova/openstack/common/plugin/pluginmanager.py b/nova/openstack/common/plugin/pluginmanager.py
index 51d06d851..157ecbf44 100644
--- a/nova/openstack/common/plugin/pluginmanager.py
+++ b/nova/openstack/common/plugin/pluginmanager.py
@@ -16,6 +16,7 @@
import pkg_resources
from nova.openstack.common import cfg
+from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier_api
diff --git a/nova/openstack/common/policy.py b/nova/openstack/common/policy.py
index 496ed972d..626a61aa5 100644
--- a/nova/openstack/common/policy.py
+++ b/nova/openstack/common/policy.py
@@ -574,19 +574,19 @@ class ParseState(object):
for reduction, methname in self.reducers:
if (len(self.tokens) >= len(reduction) and
- self.tokens[-len(reduction):] == reduction):
- # Get the reduction method
- meth = getattr(self, methname)
+ self.tokens[-len(reduction):] == reduction):
+ # Get the reduction method
+ meth = getattr(self, methname)
- # Reduce the token stream
- results = meth(*self.values[-len(reduction):])
+ # Reduce the token stream
+ results = meth(*self.values[-len(reduction):])
- # Update the tokens and values
- self.tokens[-len(reduction):] = [r[0] for r in results]
- self.values[-len(reduction):] = [r[1] for r in results]
+ # Update the tokens and values
+ self.tokens[-len(reduction):] = [r[0] for r in results]
+ self.values[-len(reduction):] = [r[1] for r in results]
- # Check for any more reductions
- return self.reduce()
+ # Check for any more reductions
+ return self.reduce()
def shift(self, tok, value):
"""Adds one more token to the state. Calls reduce()."""
diff --git a/nova/openstack/common/rpc/impl_zmq.py b/nova/openstack/common/rpc/impl_zmq.py
index 2c0631548..d562b932f 100644
--- a/nova/openstack/common/rpc/impl_zmq.py
+++ b/nova/openstack/common/rpc/impl_zmq.py
@@ -772,8 +772,9 @@ def _get_ctxt():
return ZMQ_CTX
-def _get_matchmaker():
+def _get_matchmaker(*args, **kwargs):
global matchmaker
if not matchmaker:
- matchmaker = importutils.import_object(CONF.rpc_zmq_matchmaker)
+ matchmaker = importutils.import_object(
+ CONF.rpc_zmq_matchmaker, *args, **kwargs)
return matchmaker
diff --git a/nova/openstack/common/rpc/matchmaker.py b/nova/openstack/common/rpc/matchmaker.py
index 8b2c67a44..9eec1df83 100644
--- a/nova/openstack/common/rpc/matchmaker.py
+++ b/nova/openstack/common/rpc/matchmaker.py
@@ -201,24 +201,25 @@ class FanoutRingExchange(RingExchange):
class LocalhostExchange(Exchange):
"""Exchange where all direct topics are local."""
- def __init__(self):
+ def __init__(self, host='localhost'):
+ self.host = host
super(Exchange, self).__init__()
def run(self, key):
- return [(key.split('.')[0] + '.localhost', 'localhost')]
+ return [('.'.join((key.split('.')[0], self.host)), self.host)]
class DirectExchange(Exchange):
"""
Exchange where all topic keys are split, sending to second half.
- i.e. "compute.host" sends a message to "compute" running on "host"
+ i.e. "compute.host" sends a message to "compute.host" running on "host"
"""
def __init__(self):
super(Exchange, self).__init__()
def run(self, key):
- b, e = key.split('.', 1)
- return [(b, e)]
+ e = key.split('.', 1)[1]
+ return [(key, e)]
class MatchMakerRing(MatchMakerBase):
@@ -237,11 +238,11 @@ class MatchMakerLocalhost(MatchMakerBase):
Match Maker where all bare topics resolve to localhost.
Useful for testing.
"""
- def __init__(self):
+ def __init__(self, host='localhost'):
super(MatchMakerLocalhost, self).__init__()
- self.add_binding(FanoutBinding(), LocalhostExchange())
+ self.add_binding(FanoutBinding(), LocalhostExchange(host))
self.add_binding(DirectBinding(), DirectExchange())
- self.add_binding(TopicBinding(), LocalhostExchange())
+ self.add_binding(TopicBinding(), LocalhostExchange(host))
class MatchMakerStub(MatchMakerBase):
diff --git a/nova/openstack/common/setup.py b/nova/openstack/common/setup.py
index e6f72f034..35680b304 100644
--- a/nova/openstack/common/setup.py
+++ b/nova/openstack/common/setup.py
@@ -1,6 +1,7 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
+# Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -19,7 +20,7 @@
Utilities with minimum-depends for use in setup.py
"""
-import datetime
+import email
import os
import re
import subprocess
@@ -33,11 +34,12 @@ def parse_mailmap(mailmap='.mailmap'):
if os.path.exists(mailmap):
with open(mailmap, 'r') as fp:
for l in fp:
- l = l.strip()
- if not l.startswith('#') and ' ' in l:
- canonical_email, alias = [x for x in l.split(' ')
- if x.startswith('<')]
- mapping[alias] = canonical_email
+ try:
+ canonical_email, alias = re.match(
+ r'[^#]*?(<.+>).*(<.+>).*', l).groups()
+ except AttributeError:
+ continue
+ mapping[alias] = canonical_email
return mapping
@@ -45,8 +47,8 @@ def canonicalize_emails(changelog, mapping):
"""Takes in a string and an email alias mapping and replaces all
instances of the aliases in the string with their real email.
"""
- for alias, email in mapping.iteritems():
- changelog = changelog.replace(alias, email)
+ for alias, email_address in mapping.iteritems():
+ changelog = changelog.replace(alias, email_address)
return changelog
@@ -106,23 +108,17 @@ def parse_dependency_links(requirements_files=['requirements.txt',
return dependency_links
-def write_requirements():
- venv = os.environ.get('VIRTUAL_ENV', None)
- if venv is not None:
- with open("requirements.txt", "w") as req_file:
- output = subprocess.Popen(["pip", "-E", venv, "freeze", "-l"],
- stdout=subprocess.PIPE)
- requirements = output.communicate()[0].strip()
- req_file.write(requirements)
-
-
-def _run_shell_command(cmd):
+def _run_shell_command(cmd, throw_on_error=False):
if os.name == 'nt':
output = subprocess.Popen(["cmd.exe", "/C", cmd],
- stdout=subprocess.PIPE)
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
else:
output = subprocess.Popen(["/bin/sh", "-c", cmd],
- stdout=subprocess.PIPE)
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ if output.returncode and throw_on_error:
+ raise Exception("%s returned %d" % cmd, output.returncode)
out = output.communicate()
if len(out) == 0:
return None
@@ -131,57 +127,6 @@ def _run_shell_command(cmd):
return out[0].strip()
-def _get_git_next_version_suffix(branch_name):
- datestamp = datetime.datetime.now().strftime('%Y%m%d')
- if branch_name == 'milestone-proposed':
- revno_prefix = "r"
- else:
- revno_prefix = ""
- _run_shell_command("git fetch origin +refs/meta/*:refs/remotes/meta/*")
- milestone_cmd = "git show meta/openstack/release:%s" % branch_name
- milestonever = _run_shell_command(milestone_cmd)
- if milestonever:
- first_half = "%s~%s" % (milestonever, datestamp)
- else:
- first_half = datestamp
-
- post_version = _get_git_post_version()
- # post version should look like:
- # 0.1.1.4.gcc9e28a
- # where the bit after the last . is the short sha, and the bit between
- # the last and second to last is the revno count
- (revno, sha) = post_version.split(".")[-2:]
- second_half = "%s%s.%s" % (revno_prefix, revno, sha)
- return ".".join((first_half, second_half))
-
-
-def _get_git_current_tag():
- return _run_shell_command("git tag --contains HEAD")
-
-
-def _get_git_tag_info():
- return _run_shell_command("git describe --tags")
-
-
-def _get_git_post_version():
- current_tag = _get_git_current_tag()
- if current_tag is not None:
- return current_tag
- else:
- tag_info = _get_git_tag_info()
- if tag_info is None:
- base_version = "0.0"
- cmd = "git --no-pager log --oneline"
- out = _run_shell_command(cmd)
- revno = len(out.split("\n"))
- sha = _run_shell_command("git describe --always")
- else:
- tag_infos = tag_info.split("-")
- base_version = "-".join(tag_infos[:-2])
- (revno, sha) = tag_infos[-2:]
- return "%s.%s.%s" % (base_version, revno, sha)
-
-
def write_git_changelog():
"""Write a changelog based on the git changelog."""
new_changelog = 'ChangeLog'
@@ -227,26 +172,6 @@ _rst_template = """%(heading)s
"""
-def read_versioninfo(project):
- """Read the versioninfo file. If it doesn't exist, we're in a github
- zipball, and there's really no way to know what version we really
- are, but that should be ok, because the utility of that should be
- just about nil if this code path is in use in the first place."""
- versioninfo_path = os.path.join(project, 'versioninfo')
- if os.path.exists(versioninfo_path):
- with open(versioninfo_path, 'r') as vinfo:
- version = vinfo.read().strip()
- else:
- version = "0.0.0"
- return version
-
-
-def write_versioninfo(project, version):
- """Write a simple file containing the version of the package."""
- with open(os.path.join(project, 'versioninfo'), 'w') as fil:
- fil.write("%s\n" % version)
-
-
def get_cmdclass():
"""Return dict of commands to run from setup.py."""
@@ -276,6 +201,9 @@ def get_cmdclass():
from sphinx.setup_command import BuildDoc
class LocalBuildDoc(BuildDoc):
+
+ builders = ['html', 'man']
+
def generate_autoindex(self):
print "**Autodocumenting from %s" % os.path.abspath(os.curdir)
modules = {}
@@ -311,56 +239,97 @@ def get_cmdclass():
if not os.getenv('SPHINX_DEBUG'):
self.generate_autoindex()
- for builder in ['html', 'man']:
+ for builder in self.builders:
self.builder = builder
self.finalize_options()
self.project = self.distribution.get_name()
self.version = self.distribution.get_version()
self.release = self.distribution.get_version()
BuildDoc.run(self)
+
+ class LocalBuildLatex(LocalBuildDoc):
+ builders = ['latex']
+
cmdclass['build_sphinx'] = LocalBuildDoc
+ cmdclass['build_sphinx_latex'] = LocalBuildLatex
except ImportError:
pass
return cmdclass
-def get_git_branchname():
- for branch in _run_shell_command("git branch --color=never").split("\n"):
- if branch.startswith('*'):
- _branch_name = branch.split()[1].strip()
- if _branch_name == "(no":
- _branch_name = "no-branch"
- return _branch_name
+def _get_revno():
+ """Return the number of commits since the most recent tag.
+ We use git-describe to find this out, but if there are no
+ tags then we fall back to counting commits since the beginning
+ of time.
+ """
+ describe = _run_shell_command("git describe --always")
+ if "-" in describe:
+ return describe.rsplit("-", 2)[-2]
-def get_pre_version(projectname, base_version):
- """Return a version which is leading up to a version that will
- be released in the future."""
- if os.path.isdir('.git'):
- current_tag = _get_git_current_tag()
- if current_tag is not None:
- version = current_tag
- else:
- branch_name = os.getenv('BRANCHNAME',
- os.getenv('GERRIT_REFNAME',
- get_git_branchname()))
- version_suffix = _get_git_next_version_suffix(branch_name)
- version = "%s~%s" % (base_version, version_suffix)
- write_versioninfo(projectname, version)
- return version
- else:
- version = read_versioninfo(projectname)
- return version
+ # no tags found
+ revlist = _run_shell_command("git rev-list --abbrev-commit HEAD")
+ return len(revlist.splitlines())
-def get_post_version(projectname):
+def _get_version_from_git(pre_version):
"""Return a version which is equal to the tag that's on the current
revision if there is one, or tag plus number of additional revisions
if the current revision has no tag."""
if os.path.isdir('.git'):
- version = _get_git_post_version()
- write_versioninfo(projectname, version)
+ if pre_version:
+ try:
+ return _run_shell_command(
+ "git describe --exact-match",
+ throw_on_error=True).replace('-', '.')
+ except Exception:
+ sha = _run_shell_command("git log -n1 --pretty=format:%h")
+ return "%s.a%s.g%s" % (pre_version, _get_revno(), sha)
+ else:
+ return _run_shell_command(
+ "git describe --always").replace('-', '.')
+ return None
+
+
+def _get_version_from_pkg_info(package_name):
+ """Get the version from PKG-INFO file if we can."""
+ try:
+ pkg_info_file = open('PKG-INFO', 'r')
+ except (IOError, OSError):
+ return None
+ try:
+ pkg_info = email.message_from_file(pkg_info_file)
+ except email.MessageError:
+ return None
+ # Check to make sure we're in our own dir
+ if pkg_info.get('Name', None) != package_name:
+ return None
+ return pkg_info.get('Version', None)
+
+
+def get_version(package_name, pre_version=None):
+ """Get the version of the project. First, try getting it from PKG-INFO, if
+ it exists. If it does, that means we're in a distribution tarball or that
+ install has happened. Otherwise, if there is no PKG-INFO file, pull the
+ version from git.
+
+ We do not support setup.py version sanity in git archive tarballs, nor do
+ we support packagers directly sucking our git repo into theirs. We expect
+ that a source tarball be made from our git repo - or that if someone wants
+ to make a source tarball from a fork of our repo with additional tags in it
+ that they understand and desire the results of doing that.
+ """
+ version = os.environ.get("OSLO_PACKAGE_VERSION", None)
+ if version:
+ return version
+ version = _get_version_from_pkg_info(package_name)
+ if version:
+ return version
+ version = _get_version_from_git(pre_version)
+ if version:
return version
- return read_versioninfo(projectname)
+ raise Exception("Versioning for this project requires either an sdist"
+ " tarball, or access to an upstream git repository.")
diff --git a/nova/openstack/common/timeutils.py b/nova/openstack/common/timeutils.py
index 0f346087f..e2c274057 100644
--- a/nova/openstack/common/timeutils.py
+++ b/nova/openstack/common/timeutils.py
@@ -98,6 +98,11 @@ def utcnow():
return datetime.datetime.utcnow()
+def iso8601_from_timestamp(timestamp):
+ """Returns a iso8601 formated date from timestamp"""
+ return isotime(datetime.datetime.utcfromtimestamp(timestamp))
+
+
utcnow.override_time = None
@@ -162,3 +167,16 @@ def delta_seconds(before, after):
except AttributeError:
return ((delta.days * 24 * 3600) + delta.seconds +
float(delta.microseconds) / (10 ** 6))
+
+
+def is_soon(dt, window):
+ """
+ Determines if time is going to happen in the next window seconds.
+
+ :params dt: the time
+ :params window: minimum seconds to remain to consider the time not soon
+
+ :return: True if expiration is within the given duration
+ """
+ soon = (utcnow() + datetime.timedelta(seconds=window))
+ return normalize_time(dt) <= soon
diff --git a/nova/openstack/common/version.py b/nova/openstack/common/version.py
new file mode 100644
index 000000000..57803b3d5
--- /dev/null
+++ b/nova/openstack/common/version.py
@@ -0,0 +1,94 @@
+
+# Copyright 2012 OpenStack LLC
+# Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Utilities for consuming the version from pkg_resources.
+"""
+
+import pkg_resources
+
+
+class VersionInfo(object):
+
+ def __init__(self, package):
+ """Object that understands versioning for a package
+ :param package: name of the python package, such as glance, or
+ python-glanceclient
+ """
+ self.package = package
+ self.release = None
+ self.version = None
+ self._cached_version = None
+
+ def __str__(self):
+ """Make the VersionInfo object behave like a string."""
+ return self.version_string()
+
+ def __repr__(self):
+ """Include the name."""
+ return "VersionInfo(%s:%s)" % (self.package, self.version_string())
+
+ def _get_version_from_pkg_resources(self):
+ """Get the version of the package from the pkg_resources record
+ associated with the package."""
+ try:
+ requirement = pkg_resources.Requirement.parse(self.package)
+ provider = pkg_resources.get_provider(requirement)
+ return provider.version
+ except pkg_resources.DistributionNotFound:
+ # The most likely cause for this is running tests in a tree
+ # produced from a tarball where the package itself has not been
+ # installed into anything. Revert to setup-time logic.
+ from nova.openstack.common import setup
+ return setup.get_version(self.package)
+
+ def release_string(self):
+ """Return the full version of the package including suffixes indicating
+ VCS status.
+ """
+ if self.release is None:
+ self.release = self._get_version_from_pkg_resources()
+
+ return self.release
+
+ def version_string(self):
+ """Return the short version minus any alpha/beta tags."""
+ if self.version is None:
+ parts = []
+ for part in self.release_string().split('.'):
+ if part[0].isdigit():
+ parts.append(part)
+ else:
+ break
+ self.version = ".".join(parts)
+
+ return self.version
+
+ # Compatibility functions
+ canonical_version_string = version_string
+ version_string_with_vcs = release_string
+
+ def cached_version_string(self, prefix=""):
+ """Generate an object which will expand in a string context to
+ the results of version_string(). We do this so that don't
+ call into pkg_resources every time we start up a program when
+ passing version information into the CONF constructor, but
+ rather only do the calculation when and if a version is requested
+ """
+ if not self._cached_version:
+ self._cached_version = "%s%s" % (prefix,
+ self.version_string())
+ return self._cached_version
diff --git a/nova/policy.py b/nova/policy.py
index 27e261eac..ac2f2e730 100644
--- a/nova/policy.py
+++ b/nova/policy.py
@@ -101,14 +101,15 @@ def enforce(context, action, target, do_raise=True):
return policy.check(action, target, credentials, **extra)
-def check_is_admin(roles):
+def check_is_admin(context):
"""Whether or not roles contains 'admin' role according to policy setting.
"""
init()
- target = {}
- credentials = {'roles': roles}
+ #the target is user-self
+ credentials = context.to_dict()
+ target = credentials
return policy.check('context_is_admin', target, credentials)
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index 226e31bba..2565e4e40 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -82,13 +82,16 @@ def handle_schedule_error(context, ex, instance_uuid, request_spec):
'scheduler.run_instance', notifier.ERROR, payload)
-def instance_update_db(context, instance_uuid):
+def instance_update_db(context, instance_uuid, extra_values=None):
'''Clear the host and node - set the scheduled_at field of an Instance.
:returns: An Instance with the updated fields set properly.
'''
now = timeutils.utcnow()
values = {'host': None, 'node': None, 'scheduled_at': now}
+ if extra_values:
+ values.update(extra_values)
+
return db.instance_update(context, instance_uuid, values)
@@ -132,6 +135,16 @@ class Scheduler(object):
for service in services
if self.servicegroup_api.service_is_up(service)]
+ def group_hosts(self, context, group):
+ """Return the list of hosts that have VM's from the group."""
+
+ # The system_metadata 'group' will be filtered
+ members = db.instance_get_all_by_filters(context,
+ {'deleted': False, 'group': group})
+ return [member['host']
+ for member in members
+ if member.get('host') is not None]
+
def schedule_prep_resize(self, context, image, request_spec,
filter_properties, instance, instance_type,
reservations):
@@ -209,7 +222,10 @@ class Scheduler(object):
"""
# Checking dest exists and compute node.
- dservice_ref = db.service_get_by_compute_host(context, dest)
+ try:
+ dservice_ref = db.service_get_by_compute_host(context, dest)
+ except exception.NotFound:
+ raise exception.ComputeServiceUnavailable(host=dest)
# Checking dest host is alive.
if not self.servicegroup_api.service_is_up(dservice_ref):
diff --git a/nova/scheduler/filter_scheduler.py b/nova/scheduler/filter_scheduler.py
index b35c81837..c9118cb22 100644
--- a/nova/scheduler/filter_scheduler.py
+++ b/nova/scheduler/filter_scheduler.py
@@ -19,6 +19,8 @@ You can customize this scheduler by specifying your own Host Filters and
Weighing Functions.
"""
+import random
+
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
@@ -30,6 +32,21 @@ CONF = cfg.CONF
LOG = logging.getLogger(__name__)
+filter_scheduler_opts = [
+ cfg.IntOpt('scheduler_host_subset_size',
+ default=1,
+ help='New instances will be scheduled on a host chosen '
+ 'randomly from a subset of the N best hosts. This '
+ 'property defines the subset size that a host is '
+ 'chosen from. A value of 1 chooses the '
+ 'first host returned by the weighing functions. '
+ 'This value must be at least 1. Any value less than 1 '
+ 'will be ignored, and 1 will be used instead')
+]
+
+CONF.register_opts(filter_scheduler_opts)
+
+
class FilterScheduler(driver.Scheduler):
"""Scheduler that can be used for filtering and weighing."""
def __init__(self, *args, **kwargs):
@@ -133,8 +150,17 @@ class FilterScheduler(driver.Scheduler):
'scheduler.run_instance.scheduled', notifier.INFO,
payload)
+ # Update the metadata if necessary
+ scheduler_hints = filter_properties.get('scheduler_hints') or {}
+ group = scheduler_hints.get('group', None)
+ values = None
+ if group:
+ values = request_spec['instance_properties']['system_metadata']
+ values.update({'group': group})
+ values = {'system_metadata': values}
+
updated_instance = driver.instance_update_db(context,
- instance_uuid)
+ instance_uuid, extra_values=values)
self._post_select_populate_filter_properties(filter_properties,
weighed_host.obj)
@@ -248,6 +274,18 @@ class FilterScheduler(driver.Scheduler):
instance_properties = request_spec['instance_properties']
instance_type = request_spec.get("instance_type", None)
+ # Get the group
+ update_group_hosts = False
+ scheduler_hints = filter_properties.get('scheduler_hints') or {}
+ group = scheduler_hints.get('group', None)
+ if group:
+ group_hosts = self.group_hosts(elevated, group)
+ update_group_hosts = True
+ if 'group_hosts' not in filter_properties:
+ filter_properties.update({'group_hosts': []})
+ configured_hosts = filter_properties['group_hosts']
+ filter_properties['group_hosts'] = configured_hosts + group_hosts
+
config_options = self._get_configuration_options()
# check retry policy. Rather ugly use of instance_uuids[0]...
@@ -293,12 +331,23 @@ class FilterScheduler(driver.Scheduler):
weighed_hosts = self.host_manager.get_weighed_hosts(hosts,
filter_properties)
- best_host = weighed_hosts[0]
- LOG.debug(_("Choosing host %(best_host)s") % locals())
- selected_hosts.append(best_host)
+
+ scheduler_host_subset_size = CONF.scheduler_host_subset_size
+ if scheduler_host_subset_size > len(weighed_hosts):
+ scheduler_host_subset_size = len(weighed_hosts)
+ if scheduler_host_subset_size < 1:
+ scheduler_host_subset_size = 1
+
+ chosen_host = random.choice(
+ weighed_hosts[0:scheduler_host_subset_size])
+ LOG.debug(_("Choosing host %(chosen_host)s") % locals())
+ selected_hosts.append(chosen_host)
+
# Now consume the resources so the filter/weights
# will change for the next instance.
- best_host.obj.consume_from_instance(instance_properties)
+ chosen_host.obj.consume_from_instance(instance_properties)
+ if update_group_hosts is True:
+ filter_properties['group_hosts'].append(chosen_host.obj.host)
return selected_hosts
def _assert_compute_node_has_enough_memory(self, context,
diff --git a/nova/scheduler/filters/affinity_filter.py b/nova/scheduler/filters/affinity_filter.py
index 7e51a15f2..a7e894320 100644
--- a/nova/scheduler/filters/affinity_filter.py
+++ b/nova/scheduler/filters/affinity_filter.py
@@ -18,8 +18,11 @@
import netaddr
from nova.compute import api as compute
+from nova.openstack.common import log as logging
from nova.scheduler import filters
+LOG = logging.getLogger(__name__)
+
class AffinityFilter(filters.BaseHostFilter):
def __init__(self):
@@ -80,3 +83,20 @@ class SimpleCIDRAffinityFilter(AffinityFilter):
# We don't have an affinity host address.
return True
+
+
+class GroupAntiAffinityFilter(AffinityFilter):
+ """Schedule the instance on a different host from a set of group
+ instances.
+ """
+
+ def host_passes(self, host_state, filter_properties):
+ group_hosts = filter_properties.get('group_hosts') or []
+ LOG.debug(_("Group affinity: %(host)s in %(configured)s"),
+ {'host': host_state.host,
+ 'configured': group_hosts})
+ if group_hosts:
+ return not host_state.host in group_hosts
+
+ # No groups configured
+ return True
diff --git a/nova/scheduler/host_manager.py b/nova/scheduler/host_manager.py
index 8d17349fa..7203fb735 100644
--- a/nova/scheduler/host_manager.py
+++ b/nova/scheduler/host_manager.py
@@ -369,6 +369,7 @@ class HostManager(object):
# Get resource usage across the available compute nodes:
compute_nodes = db.compute_node_get_all(context)
+ seen_nodes = set()
for compute in compute_nodes:
service = compute['service']
if not service:
@@ -388,5 +389,14 @@ class HostManager(object):
service=dict(service.iteritems()))
self.host_state_map[state_key] = host_state
host_state.update_from_compute_node(compute)
+ seen_nodes.add(state_key)
+
+ # remove compute nodes from host_state_map if they are not active
+ dead_nodes = set(self.host_state_map.keys()) - seen_nodes
+ for state_key in dead_nodes:
+ host, node = state_key
+ LOG.info(_("Removing dead compute node %(host)s:%(node)s "
+ "from scheduler") % locals())
+ del self.host_state_map[state_key]
return self.host_state_map.itervalues()
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index a129a1b6d..64a388f60 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -24,6 +24,7 @@ Scheduler Service
import sys
from nova.compute import rpcapi as compute_rpcapi
+from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor import api as conductor_api
@@ -92,6 +93,16 @@ class SchedulerManager(manager.Manager):
return self.driver.schedule_live_migration(
context, instance, dest,
block_migration, disk_over_commit)
+ except exception.ComputeServiceUnavailable as ex:
+ request_spec = {'instance_properties': {
+ 'uuid': instance['uuid'], },
+ }
+ with excutils.save_and_reraise_exception():
+ self._set_vm_state_and_notify('live_migration',
+ dict(vm_state=instance['vm_state'],
+ task_state=None,
+ expected_task_state=task_states.MIGRATING,),
+ context, ex, request_spec)
except Exception as ex:
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify('live_migration',
diff --git a/nova/servicegroup/api.py b/nova/servicegroup/api.py
index 0fb30cdf5..6dc1aa6d1 100644
--- a/nova/servicegroup/api.py
+++ b/nova/servicegroup/api.py
@@ -19,7 +19,6 @@
from nova.openstack.common import cfg
from nova.openstack.common import importutils
-from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova import utils
@@ -40,10 +39,10 @@ class API(object):
_driver = None
_driver_name_class_mapping = {
- 'db': 'nova.servicegroup.drivers.db.DbDriver'
+ 'db': 'nova.servicegroup.drivers.db.DbDriver',
+ 'zk': 'nova.servicegroup.drivers.zk.ZooKeeperDriver'
}
- @lockutils.synchronized('nova.servicegroup.api.new', 'nova-')
def __new__(cls, *args, **kwargs):
'''Create an instance of the servicegroup API.
diff --git a/nova/servicegroup/drivers/zk.py b/nova/servicegroup/drivers/zk.py
new file mode 100644
index 000000000..c4e3f7b71
--- /dev/null
+++ b/nova/servicegroup/drivers/zk.py
@@ -0,0 +1,157 @@
+# Copyright (c) AT&T 2012-2013 Yun Mao <yunmao@gmail.com>
+#
+# Copyright (c) IBM 2012 Pavel Kravchenco <kpavel at il dot ibm dot com>
+# Alexey Roytman <roytman at il dot ibm dot com>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+import eventlet
+import evzookeeper
+from evzookeeper import membership
+import zookeeper
+
+from nova import exception
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova.servicegroup import api
+from nova import utils
+
+
+zk_driver_opts = [
+ cfg.StrOpt('address',
+ default=None,
+ help='The ZooKeeper addresses for servicegroup service in the '
+ 'format of host1:port,host2:port,host3:port'),
+ cfg.IntOpt('recv_timeout',
+ default=4000,
+ help='recv_timeout parameter for the zk session'),
+ cfg.StrOpt('sg_prefix',
+ default="/servicegroups",
+ help='The prefix used in ZooKeeper to store ephemeral nodes'),
+ cfg.IntOpt('sg_retry_interval',
+ default=5,
+ help='Number of seconds to wait until retrying to join the '
+ 'session'),
+ ]
+
+CONF = cfg.CONF
+CONF.register_opts(zk_driver_opts, group="zk")
+
+LOG = logging.getLogger(__name__)
+
+
+class ZooKeeperDriver(api.ServiceGroupDriver):
+ """ZooKeeper driver for the service group API."""
+
+ def __init__(self, *args, **kwargs):
+ """Create the zk session object."""
+ null = open(os.devnull, "w")
+ self._session = evzookeeper.ZKSession(CONF.zk.address,
+ recv_timeout=
+ CONF.zk.recv_timeout,
+ zklog_fd=null)
+ self._memberships = {}
+ self._monitors = {}
+ # Make sure the prefix exists
+ try:
+ self._session.create(CONF.zk.sg_prefix, "",
+ acl=[evzookeeper.ZOO_OPEN_ACL_UNSAFE])
+ except zookeeper.NodeExistsException:
+ pass
+
+ super(ZooKeeperDriver, self).__init__()
+
+ def join(self, member_id, group, service=None):
+ """Join the given service with its group."""
+ LOG.debug(_('ZooKeeperDriver: join new member %(id)s to the '
+ '%(gr)s group, service=%(sr)s'),
+ {'id': member_id, 'gr': group, 'sr': service})
+ member = self._memberships.get((group, member_id), None)
+ if member is None:
+ # the first time to join. Generate a new object
+ path = "%s/%s" % (CONF.zk.sg_prefix, group)
+ try:
+ member = membership.Membership(self._session, path, member_id)
+ except RuntimeError:
+ LOG.exception(_("Unable to join. It is possible that either "
+ "another node exists with the same name, or "
+ "this node just restarted. We will try "
+ "again in a short while to make sure."))
+ eventlet.sleep(CONF.zk.sg_retry_interval)
+ member = membership.Membership(self._session, path, member_id)
+ self._memberships[(group, member_id)] = member
+ return FakeLoopingCall(self, member_id, group)
+
+ def leave(self, member_id, group):
+ """Remove the given member from the service group."""
+ LOG.debug(_('ZooKeeperDriver.leave: %(member)s from group %(group)s'),
+ {'member': member_id, 'group': group})
+ try:
+ key = (group, member_id)
+ member = self._memberships[key]
+ member.leave()
+ del self._memberships[key]
+ except KeyError:
+ LOG.error(_('ZooKeeperDriver.leave: %(id)s has not joined to the '
+ '%(gr)s group'), {'id': member_id, 'gr': group})
+
+ def is_up(self, service_ref):
+ group_id = service_ref['topic']
+ member_id = service_ref['host']
+ all_members = self.get_all(group_id)
+ return member_id in all_members
+
+ def get_all(self, group_id):
+ """Return all members in a list, or a ServiceGroupUnavailable
+ exception.
+ """
+ monitor = self._monitors.get(group_id, None)
+ if monitor is None:
+ path = "%s/%s" % (CONF.zk.sg_prefix, group_id)
+ monitor = membership.MembershipMonitor(self._session, path)
+ self._monitors[group_id] = monitor
+ # Note(maoy): When initialized for the first time, it takes a
+ # while to retrieve all members from zookeeper. To prevent
+ # None to be returned, we sleep 5 sec max to wait for data to
+ # be ready.
+ for _retry in range(50):
+ eventlet.sleep(0.1)
+ all_members = monitor.get_all()
+ if all_members is not None:
+ return all_members
+ all_members = monitor.get_all()
+ if all_members is None:
+ raise exception.ServiceGroupUnavailable(driver="ZooKeeperDriver")
+ return all_members
+
+
+class FakeLoopingCall(utils.LoopingCallBase):
+ """The fake Looping Call implementation, created for backward
+ compatibility with a membership based on DB.
+ """
+ def __init__(self, driver, host, group):
+ self._driver = driver
+ self._group = group
+ self._host = host
+
+ def stop(self):
+ self._driver.leave(self._host, self._group)
+
+ def start(self, interval, initial_delay=None):
+ pass
+
+ def wait(self):
+ pass
diff --git a/nova/testing/__init__.py b/nova/testing/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/nova/testing/__init__.py
+++ /dev/null
diff --git a/nova/testing/README.rst b/nova/tests/README.rst
index 4c341b7ed..76b92258a 100644
--- a/nova/testing/README.rst
+++ b/nova/tests/README.rst
@@ -2,16 +2,8 @@
OpenStack Nova Testing Infrastructure
=====================================
-A note of clarification is in order, to help those who are new to testing in
-OpenStack nova:
-
-- actual unit tests are created in the "tests" directory;
-- the "testing" directory is used to house the infrastructure needed to support
- testing in OpenStack Nova.
-
This README file attempts to provide current and prospective contributors with
-everything they need to know in order to start creating unit tests and
-utilizing the convenience code provided in nova.testing.
+everything they need to know in order to start creating unit tests for nova.
Note: the content for the rest of this file will be added as the work items in
the following blueprint are completed:
diff --git a/nova/tests/api/openstack/compute/contrib/test_admin_actions.py b/nova/tests/api/openstack/compute/contrib/test_admin_actions.py
index dfb687cf4..76351e489 100644
--- a/nova/tests/api/openstack/compute/contrib/test_admin_actions.py
+++ b/nova/tests/api/openstack/compute/contrib/test_admin_actions.py
@@ -64,13 +64,6 @@ def fake_compute_api_get(self, context, instance_id):
'task_state': None}
-def fake_scheduler_api_live_migration(self, context, dest,
- block_migration=False,
- disk_over_commit=False, instance=None,
- instance_id=None, topic=None):
- return None
-
-
class AdminActionsTest(test.TestCase):
_actions = ('pause', 'unpause', 'suspend', 'resume', 'migrate',
@@ -93,9 +86,6 @@ class AdminActionsTest(test.TestCase):
self.UUID = uuid.uuid4()
for _method in self._methods:
self.stubs.Set(compute_api.API, _method, fake_compute_api)
- self.stubs.Set(scheduler_rpcapi.SchedulerAPI,
- 'live_migration',
- fake_scheduler_api_live_migration)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
@@ -150,7 +140,16 @@ class AdminActionsTest(test.TestCase):
task_state, expected_task_state):
return None
+ def fake_scheduler_api_live_migration(self, context, dest,
+ block_migration=False,
+ disk_over_commit=False, instance=None,
+ instance_id=None, topic=None):
+ return None
+
self.stubs.Set(compute_api.API, 'update', fake_update)
+ self.stubs.Set(scheduler_rpcapi.SchedulerAPI,
+ 'live_migration',
+ fake_scheduler_api_live_migration)
res = req.get_response(app)
self.assertEqual(res.status_int, 202)
@@ -174,6 +173,44 @@ class AdminActionsTest(test.TestCase):
res = req.get_response(app)
self.assertEqual(res.status_int, 400)
+ def test_migrate_live_compute_service_unavailable(self):
+ ctxt = context.get_admin_context()
+ ctxt.user_id = 'fake'
+ ctxt.project_id = 'fake'
+ ctxt.is_admin = True
+ app = fakes.wsgi_app(fake_auth_context=ctxt, init_only=('servers',))
+ req = webob.Request.blank('/v2/fake/servers/%s/action' % self.UUID)
+ req.method = 'POST'
+ req.body = jsonutils.dumps({
+ 'os-migrateLive': {
+ 'host': 'hostname',
+ 'block_migration': False,
+ 'disk_over_commit': False,
+ }
+ })
+ req.content_type = 'application/json'
+
+ def fake_update(inst, context, instance,
+ task_state, expected_task_state):
+ return None
+
+ def fake_scheduler_api_live_migration(context, dest,
+ block_migration=False,
+ disk_over_commit=False, instance=None,
+ instance_id=None, topic=None):
+ raise exception.ComputeServiceUnavailable(host='host')
+
+ self.stubs.Set(compute_api.API, 'update', fake_update)
+ self.stubs.Set(scheduler_rpcapi.SchedulerAPI,
+ 'live_migration',
+ fake_scheduler_api_live_migration)
+
+ res = req.get_response(app)
+ self.assertEqual(res.status_int, 400)
+ self.assertIn(
+ unicode(exception.ComputeServiceUnavailable(host='host')),
+ res.body)
+
class CreateBackupTests(test.TestCase):
diff --git a/nova/tests/api/openstack/compute/contrib/test_console_output.py b/nova/tests/api/openstack/compute/contrib/test_console_output.py
index d3f80b655..14b61abb7 100644
--- a/nova/tests/api/openstack/compute/contrib/test_console_output.py
+++ b/nova/tests/api/openstack/compute/contrib/test_console_output.py
@@ -35,6 +35,10 @@ def fake_get_console_output(self, _context, _instance, tail_length):
return '\n'.join(fixture)
+def fake_get_console_output_not_ready(self, _context, _instance, tail_length):
+ raise exception.InstanceNotReady(instance_id=_instance["uuid"])
+
+
def fake_get(self, context, instance_uuid):
return {'uuid': instance_uuid}
@@ -133,3 +137,15 @@ class ConsoleOutputExtensionTest(test.TestCase):
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
+
+ def test_get_console_output_not_ready(self):
+ self.stubs.Set(compute_api.API, 'get_console_output',
+ fake_get_console_output_not_ready)
+ body = {'os-getConsoleOutput': {'length': 3}}
+ req = webob.Request.blank('/v2/fake/servers/1/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 409)
diff --git a/nova/tests/api/openstack/compute/contrib/test_extended_availability_zone.py b/nova/tests/api/openstack/compute/contrib/test_extended_availability_zone.py
index 8ebd810ac..d847e54f9 100644
--- a/nova/tests/api/openstack/compute/contrib/test_extended_availability_zone.py
+++ b/nova/tests/api/openstack/compute/contrib/test_extended_availability_zone.py
@@ -17,6 +17,7 @@ from lxml import etree
import webob
from nova.api.openstack.compute.contrib import extended_availability_zone
+from nova import availability_zones
from nova import compute
from nova import exception
from nova.openstack.common import jsonutils
@@ -29,19 +30,20 @@ UUID3 = '00000000-0000-0000-0000-000000000003'
def fake_compute_get(*args, **kwargs):
- inst = fakes.stub_instance(1, uuid=UUID3, host="host-fake")
- inst['availability_zone'] = 'az-i'
+ inst = fakes.stub_instance(1, uuid=UUID3, host="get-host")
return inst
def fake_compute_get_all(*args, **kwargs):
- inst1 = fakes.stub_instance(1, uuid=UUID1, host="host-1")
- inst2 = fakes.stub_instance(2, uuid=UUID2, host="host-2")
- inst1['availability_zone'] = 'az-i'
- inst2['availability_zone'] = 'az-i'
+ inst1 = fakes.stub_instance(1, uuid=UUID1, host="all-host")
+ inst2 = fakes.stub_instance(2, uuid=UUID2, host="all-host")
return [inst1, inst2]
+def fake_get_host_availability_zone(context, host):
+ return host
+
+
class ExtendedServerAttributesTest(test.TestCase):
content_type = 'application/json'
prefix = 'OS-EXT-AZ:'
@@ -51,6 +53,8 @@ class ExtendedServerAttributesTest(test.TestCase):
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
+ self.stubs.Set(availability_zones, 'get_host_availability_zone',
+ fake_get_host_availability_zone)
self.flags(
osapi_compute_extension=[
@@ -69,20 +73,16 @@ class ExtendedServerAttributesTest(test.TestCase):
def _get_servers(self, body):
return jsonutils.loads(body).get('servers')
- def assertServerAttributes(self, server, az_instance, az_host):
+ def assertServerAttributes(self, server, az):
self.assertEqual(server.get('%savailability_zone' % self.prefix),
- az_instance)
- self.assertEqual(server.get('%shost_availability_zone' % self.prefix),
- az_host)
+ az)
def test_show(self):
url = '/v2/fake/servers/%s' % UUID3
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
- self.assertServerAttributes(self._get_server(res.body),
- az_instance='az-i',
- az_host='nova')
+ self.assertServerAttributes(self._get_server(res.body), 'get-host')
def test_detail(self):
url = '/v2/fake/servers/detail'
@@ -90,9 +90,7 @@ class ExtendedServerAttributesTest(test.TestCase):
self.assertEqual(res.status_int, 200)
for i, server in enumerate(self._get_servers(res.body)):
- self.assertServerAttributes(server,
- az_instance='az-i',
- az_host='nova')
+ self.assertServerAttributes(server, 'all-host')
def test_no_instance_passthrough_404(self):
diff --git a/nova/tests/api/openstack/compute/contrib/test_extended_ips.py b/nova/tests/api/openstack/compute/contrib/test_extended_ips.py
new file mode 100644
index 000000000..9fd646b91
--- /dev/null
+++ b/nova/tests/api/openstack/compute/contrib/test_extended_ips.py
@@ -0,0 +1,168 @@
+# Copyright 2013 Nebula, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+import webob
+
+from nova.api.openstack.compute.contrib import extended_ips
+from nova.api.openstack import xmlutil
+from nova import compute
+from nova.openstack.common import jsonutils
+from nova import test
+from nova.tests.api.openstack import fakes
+
+UUID1 = '00000000-0000-0000-0000-000000000001'
+UUID2 = '00000000-0000-0000-0000-000000000002'
+UUID3 = '00000000-0000-0000-0000-000000000003'
+NW_CACHE = [
+ {
+ 'address': 'aa:aa:aa:aa:aa:aa',
+ 'id': 1,
+ 'network': {
+ 'bridge': 'br0',
+ 'id': 1,
+ 'label': 'private',
+ 'subnets': [
+ {
+ 'cidr': '192.168.1.0/24',
+ 'ips': [
+ {
+ 'address': '192.168.1.100',
+ 'type': 'fixed',
+ 'floating_ips': [
+ {'address': '5.0.0.1', 'type': 'floating'},
+ ],
+ },
+ ],
+ },
+ ]
+ }
+ },
+ {
+ 'address': 'bb:bb:bb:bb:bb:bb',
+ 'id': 2,
+ 'network': {
+ 'bridge': 'br1',
+ 'id': 2,
+ 'label': 'public',
+ 'subnets': [
+ {
+ 'cidr': '10.0.0.0/24',
+ 'ips': [
+ {
+ 'address': '10.0.0.100',
+ 'type': 'fixed',
+ 'floating_ips': [
+ {'address': '5.0.0.2', 'type': 'floating'},
+ ],
+ }
+ ],
+ },
+ ]
+ }
+ }
+]
+ALL_IPS = []
+for cache in NW_CACHE:
+ for subnet in cache['network']['subnets']:
+ for fixed in subnet['ips']:
+ sanitized = dict(fixed)
+ sanitized.pop('floating_ips')
+ ALL_IPS.append(sanitized)
+ for floating in fixed['floating_ips']:
+ ALL_IPS.append(floating)
+ALL_IPS.sort()
+
+
+def fake_compute_get(*args, **kwargs):
+ return fakes.stub_instance(1, uuid=UUID3, nw_cache=NW_CACHE)
+
+
+def fake_compute_get_all(*args, **kwargs):
+ return [
+ fakes.stub_instance(1, uuid=UUID1, nw_cache=NW_CACHE),
+ fakes.stub_instance(2, uuid=UUID2, nw_cache=NW_CACHE),
+ ]
+
+
+class ExtendedIpsTest(test.TestCase):
+ content_type = 'application/json'
+ prefix = 'OS-EXT-IPS:'
+
+ def setUp(self):
+ super(ExtendedIpsTest, self).setUp()
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Extended_ips'])
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
+ return res
+
+ def _get_server(self, body):
+ return jsonutils.loads(body).get('server')
+
+ def _get_servers(self, body):
+ return jsonutils.loads(body).get('servers')
+
+ def _get_ips(self, server):
+ for network in server['addresses'].itervalues():
+ for ip in network:
+ yield ip
+
+ def assertServerStates(self, server):
+ results = []
+ for ip in self._get_ips(server):
+ results.append({'address': ip.get('addr'),
+ 'type': ip.get('%stype' % self.prefix)})
+
+ self.assertEqual(ALL_IPS, sorted(results))
+
+ def test_show(self):
+ url = '/v2/fake/servers/%s' % UUID3
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertServerStates(self._get_server(res.body))
+
+ def test_detail(self):
+ url = '/v2/fake/servers/detail'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ for i, server in enumerate(self._get_servers(res.body)):
+ self.assertServerStates(server)
+
+
+class ExtendedIpsXmlTest(ExtendedIpsTest):
+ content_type = 'application/xml'
+ prefix = '{%s}' % extended_ips.Extended_ips.namespace
+
+ def _get_server(self, body):
+ return etree.XML(body)
+
+ def _get_servers(self, body):
+ return etree.XML(body).getchildren()
+
+ def _get_ips(self, server):
+ for network in server.find('{%s}addresses' % xmlutil.XMLNS_V11):
+ for ip in network:
+ yield ip
diff --git a/nova/tests/api/openstack/compute/contrib/test_instance_actions.py b/nova/tests/api/openstack/compute/contrib/test_instance_actions.py
new file mode 100644
index 000000000..b4db5daba
--- /dev/null
+++ b/nova/tests/api/openstack/compute/contrib/test_instance_actions.py
@@ -0,0 +1,231 @@
+# Copyright 2013 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import uuid
+
+from lxml import etree
+from webob import exc
+
+from nova.api.openstack.compute.contrib import instance_actions
+from nova import db
+from nova import exception
+from nova.openstack.common import policy
+from nova import test
+from nova.tests.api.openstack import fakes
+from nova.tests import fake_instance_actions
+
+FAKE_UUID = fake_instance_actions.FAKE_UUID
+FAKE_REQUEST_ID = fake_instance_actions.FAKE_REQUEST_ID1
+
+
+def format_action(action):
+ '''Remove keys that aren't serialized.'''
+ if 'id' in action:
+ del(action['id'])
+ if 'finish_time' in action:
+ del(action['finish_time'])
+ return action
+
+
+def format_event(event):
+ '''Remove keys that aren't serialized.'''
+ if 'id' in event:
+ del(event['id'])
+ return event
+
+
+class InstanceActionsPolicyTest(test.TestCase):
+ def setUp(self):
+ super(InstanceActionsPolicyTest, self).setUp()
+ self.controller = instance_actions.InstanceActionsController()
+
+ def test_list_actions_restricted_by_project(self):
+ rules = policy.Rules({'compute:get': policy.parse_rule(''),
+ 'compute_extension:instance_actions':
+ policy.parse_rule('project_id:%(project_id)s')})
+ policy.set_rules(rules)
+
+ def fake_instance_get_by_uuid(context, instance_id):
+ return {'name': 'fake', 'project_id': '%s_unequal' %
+ context.project_id}
+
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+ req = fakes.HTTPRequest.blank('/v2/123/servers/12/os-instance-actions')
+ self.assertRaises(exception.NotAuthorized, self.controller.index, req,
+ str(uuid.uuid4()))
+
+ def test_get_action_restricted_by_project(self):
+ rules = policy.Rules({'compute:get': policy.parse_rule(''),
+ 'compute_extension:instance_actions':
+ policy.parse_rule('project_id:%(project_id)s')})
+ policy.set_rules(rules)
+
+ def fake_instance_get_by_uuid(context, instance_id):
+ return {'name': 'fake', 'project_id': '%s_unequal' %
+ context.project_id}
+
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+ req = fakes.HTTPRequest.blank(
+ '/v2/123/servers/12/os-instance-actions/1')
+ self.assertRaises(exception.NotAuthorized, self.controller.show, req,
+ str(uuid.uuid4()), '1')
+
+
+class InstanceActionsTest(test.TestCase):
+ def setUp(self):
+ super(InstanceActionsTest, self).setUp()
+ self.controller = instance_actions.InstanceActionsController()
+ self.fake_actions = copy.deepcopy(fake_instance_actions.FAKE_ACTIONS)
+ self.fake_events = copy.deepcopy(fake_instance_actions.FAKE_EVENTS)
+
+ def fake_instance_get_by_uuid(context, instance_id):
+ return {'name': 'fake', 'project_id': context.project_id}
+
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+
+ def test_list_actions(self):
+ def fake_get_actions(context, uuid):
+ return self.fake_actions[uuid].values()
+
+ self.stubs.Set(db, 'actions_get', fake_get_actions)
+ req = fakes.HTTPRequest.blank('/v2/123/servers/12/os-instance-actions')
+ res_dict = self.controller.index(req, FAKE_UUID)
+ for res in res_dict['instanceActions']:
+ fake_action = self.fake_actions[FAKE_UUID][res['request_id']]
+ fake_action = format_action(fake_action)
+ self.assertEqual(fake_action, res)
+
+ def test_get_action_with_events_allowed(self):
+ def fake_get_action(context, uuid, request_id):
+ return self.fake_actions[uuid][request_id]
+
+ def fake_get_events(context, action_id):
+ return self.fake_events[action_id]
+
+ self.stubs.Set(db, 'action_get_by_request_id', fake_get_action)
+ self.stubs.Set(db, 'action_events_get', fake_get_events)
+ req = fakes.HTTPRequest.blank(
+ '/v2/123/servers/12/os-instance-actions/1',
+ use_admin_context=True)
+ res_dict = self.controller.show(req, FAKE_UUID, FAKE_REQUEST_ID)
+ fake_action = self.fake_actions[FAKE_UUID][FAKE_REQUEST_ID]
+ fake_events = self.fake_events[fake_action['id']]
+ fake_events = [format_event(event) for event in fake_events]
+ fake_action = format_action(fake_action)
+ fake_action['events'] = fake_events
+ self.assertEqual(fake_action, res_dict['instanceAction'])
+
+ def test_get_action_with_events_not_allowed(self):
+ def fake_get_action(context, uuid, request_id):
+ return self.fake_actions[uuid][request_id]
+
+ def fake_get_events(context, action_id):
+ return self.fake_events[action_id]
+
+ self.stubs.Set(db, 'action_get_by_request_id', fake_get_action)
+ self.stubs.Set(db, 'action_events_get', fake_get_events)
+ rules = policy.Rules({'compute:get': policy.parse_rule(''),
+ 'compute_extension:instance_actions':
+ policy.parse_rule(''),
+ 'compute_extension:instance_actions:events':
+ policy.parse_rule('is_admin:True')})
+ policy.set_rules(rules)
+ req = fakes.HTTPRequest.blank(
+ '/v2/123/servers/12/os-instance-actions/1')
+ res_dict = self.controller.show(req, FAKE_UUID, FAKE_REQUEST_ID)
+ fake_action = self.fake_actions[FAKE_UUID][FAKE_REQUEST_ID]
+ fake_action = format_action(fake_action)
+ self.assertEqual(fake_action, res_dict['instanceAction'])
+
+ def test_action_not_found(self):
+ def fake_no_action(context, uuid, action_id):
+ return None
+
+ self.stubs.Set(db, 'action_get_by_request_id', fake_no_action)
+ req = fakes.HTTPRequest.blank(
+ '/v2/123/servers/12/os-instance-actions/1')
+ self.assertRaises(exc.HTTPNotFound, self.controller.show, req,
+ FAKE_UUID, FAKE_REQUEST_ID)
+
+
+class InstanceActionsSerializerTest(test.TestCase):
+ def setUp(self):
+ super(InstanceActionsSerializerTest, self).setUp()
+ self.fake_actions = copy.deepcopy(fake_instance_actions.FAKE_ACTIONS)
+ self.fake_events = copy.deepcopy(fake_instance_actions.FAKE_EVENTS)
+
+ def _verify_instance_action_attachment(self, attach, tree):
+ for key in attach.keys():
+ if key != 'events':
+ self.assertEqual(attach[key], tree.get(key),
+ '%s did not match' % key)
+
+ def _verify_instance_action_event_attachment(self, attach, tree):
+ for key in attach.keys():
+ self.assertEqual(attach[key], tree.get(key),
+ '%s did not match' % key)
+
+ def test_instance_action_serializer(self):
+ serializer = instance_actions.InstanceActionTemplate()
+ action = self.fake_actions[FAKE_UUID][FAKE_REQUEST_ID]
+ text = serializer.serialize({'instanceAction': action})
+ tree = etree.fromstring(text)
+
+ action = format_action(action)
+ self.assertEqual('instanceAction', tree.tag)
+ self._verify_instance_action_attachment(action, tree)
+ found_events = False
+ for child in tree:
+ if child.tag == 'events':
+ found_events = True
+ self.assertFalse(found_events)
+
+ def test_instance_action_events_serializer(self):
+ serializer = instance_actions.InstanceActionTemplate()
+ action = self.fake_actions[FAKE_UUID][FAKE_REQUEST_ID]
+ event = self.fake_events[action['id']][0]
+ action['events'] = [event, event]
+ text = serializer.serialize({'instanceAction': action})
+ tree = etree.fromstring(text)
+
+ action = format_action(action)
+ self.assertEqual('instanceAction', tree.tag)
+ self._verify_instance_action_attachment(action, tree)
+
+ event = format_event(event)
+ found_events = False
+ for child in tree:
+ if child.tag == 'events':
+ found_events = True
+ for key in event:
+ self.assertEqual(event[key], child.get(key))
+ self.assertTrue(found_events)
+
+ def test_instance_actions_serializer(self):
+ serializer = instance_actions.InstanceActionsTemplate()
+ action_list = self.fake_actions[FAKE_UUID].values()
+ text = serializer.serialize({'instanceActions': action_list})
+ tree = etree.fromstring(text)
+
+ action_list = [format_action(action) for action in action_list]
+ self.assertEqual('instanceActions', tree.tag)
+ self.assertEqual(len(action_list), len(tree))
+ for idx, child in enumerate(tree):
+ self.assertEqual('instanceAction', child.tag)
+ request_id = child.get('request_id')
+ self._verify_instance_action_attachment(
+ self.fake_actions[FAKE_UUID][request_id],
+ child)
diff --git a/nova/tests/api/openstack/compute/contrib/test_quotas.py b/nova/tests/api/openstack/compute/contrib/test_quotas.py
index 8d518b815..6636824fd 100644
--- a/nova/tests/api/openstack/compute/contrib/test_quotas.py
+++ b/nova/tests/api/openstack/compute/contrib/test_quotas.py
@@ -143,6 +143,45 @@ class QuotaSetsTest(test.TestCase):
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 'update_me', body)
+ def test_quotas_update_invalid_value(self):
+ expected_resp = {'quota_set': {
+ 'instances': 50, 'cores': 50,
+ 'ram': 51200, 'floating_ips': 10,
+ 'metadata_items': 128, 'injected_files': 5,
+ 'injected_file_content_bytes': 10240,
+ 'injected_file_path_bytes': 255,
+ 'security_groups': 10,
+ 'security_group_rules': 20,
+ 'key_pairs': 100}}
+
+ # when PUT JSON format with empty string for quota
+ body = {'quota_set': {'instances': 50, 'cores': 50,
+ 'ram': '', 'floating_ips': 10,
+ 'metadata_items': 128, 'injected_files': 5,
+ 'injected_file_content_bytes': 10240,
+ 'injected_file_path_bytes': 255,
+ 'security_groups': 10,
+ 'security_group_rules': 20,
+ 'key_pairs': 100}}
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
+ use_admin_context=True)
+ res_dict = self.controller.update(req, 'update_me', body)
+ self.assertEqual(res_dict, expected_resp)
+
+ # when PUT XML format with empty string for quota
+ body = {'quota_set': {'instances': 50, 'cores': 50,
+ 'ram': {}, 'floating_ips': 10,
+ 'metadata_items': 128, 'injected_files': 5,
+ 'injected_file_content_bytes': 10240,
+ 'injected_file_path_bytes': 255,
+ 'security_groups': 10,
+ 'security_group_rules': 20,
+ 'key_pairs': 100}}
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
+ use_admin_context=True)
+ res_dict = self.controller.update(req, 'update_me', body)
+ self.assertEqual(res_dict, expected_resp)
+
class QuotaXMLSerializerTest(test.TestCase):
def setUp(self):
diff --git a/nova/tests/api/openstack/compute/contrib/test_security_groups.py b/nova/tests/api/openstack/compute/contrib/test_security_groups.py
index 231923e6d..2823c3e8f 100644
--- a/nova/tests/api/openstack/compute/contrib/test_security_groups.py
+++ b/nova/tests/api/openstack/compute/contrib/test_security_groups.py
@@ -448,6 +448,9 @@ class TestSecurityGroups(test.TestCase):
req, '1')
def test_associate_by_non_existing_security_group_name(self):
+ self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.assertEquals(return_server(None, '1'),
+ nova.db.instance_get(None, '1'))
body = dict(addSecurityGroup=dict(name='non-existing'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
@@ -537,6 +540,9 @@ class TestSecurityGroups(test.TestCase):
self.manager._addSecurityGroup(req, '1', body)
def test_disassociate_by_non_existing_security_group_name(self):
+ self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.assertEquals(return_server(None, '1'),
+ nova.db.instance_get(None, '1'))
body = dict(removeSecurityGroup=dict(name='non-existing'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
@@ -1011,6 +1017,38 @@ class TestSecurityGroupRules(test.TestCase):
self.controller.create,
req, {'security_group_rule': rule})
+ def test_create_rule_cidr_allow_all(self):
+ rule = security_group_rule_template(cidr='0.0.0.0/0')
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule})
+
+ security_group_rule = res_dict['security_group_rule']
+ self.assertNotEquals(security_group_rule['id'], 0)
+ self.assertEquals(security_group_rule['parent_group_id'],
+ self.parent_security_group['id'])
+ self.assertEquals(security_group_rule['ip_range']['cidr'],
+ "0.0.0.0/0")
+
+ def test_create_rule_cidr_allow_some(self):
+ rule = security_group_rule_template(cidr='15.0.0.0/8')
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule})
+
+ security_group_rule = res_dict['security_group_rule']
+ self.assertNotEquals(security_group_rule['id'], 0)
+ self.assertEquals(security_group_rule['parent_group_id'],
+ self.parent_security_group['id'])
+ self.assertEquals(security_group_rule['ip_range']['cidr'],
+ "15.0.0.0/8")
+
+ def test_create_rule_cidr_bad_netmask(self):
+ rule = security_group_rule_template(cidr='15.0.0.0/0')
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
class TestSecurityGroupRulesXMLDeserializer(test.TestCase):
diff --git a/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py b/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py
index 554379e74..eb708a574 100644
--- a/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py
+++ b/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py
@@ -17,6 +17,7 @@ import webob
from nova.api.openstack.compute.contrib import server_start_stop
from nova.compute import api as compute_api
+from nova import exception
from nova import test
from nova.tests.api.openstack import fakes
@@ -25,6 +26,10 @@ def fake_compute_api_get(self, context, instance_id):
return {'id': 1, 'uuid': instance_id}
+def fake_start_stop_not_ready(self, context, instance):
+ raise exception.InstanceNotReady(instance_id=instance["uuid"])
+
+
class ServerStartStopTest(test.TestCase):
def setUp(self):
@@ -41,6 +46,14 @@ class ServerStartStopTest(test.TestCase):
body = dict(start="")
self.controller._start_server(req, 'test_inst', body)
+ def test_start_not_ready(self):
+ self.stubs.Set(compute_api.API, 'get', fake_compute_api_get)
+ self.stubs.Set(compute_api.API, 'start', fake_start_stop_not_ready)
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+ body = dict(start="")
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._start_server, req, 'test_inst', body)
+
def test_stop(self):
self.stubs.Set(compute_api.API, 'get', fake_compute_api_get)
self.mox.StubOutWithMock(compute_api.API, 'stop')
@@ -51,6 +64,14 @@ class ServerStartStopTest(test.TestCase):
body = dict(stop="")
self.controller._stop_server(req, 'test_inst', body)
+ def test_stop_not_ready(self):
+ self.stubs.Set(compute_api.API, 'get', fake_compute_api_get)
+ self.stubs.Set(compute_api.API, 'stop', fake_start_stop_not_ready)
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+ body = dict(start="")
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._stop_server, req, 'test_inst', body)
+
def test_start_with_bogus_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
body = dict(start="")
diff --git a/nova/tests/api/openstack/compute/test_extensions.py b/nova/tests/api/openstack/compute/test_extensions.py
index a52b0e0fc..9c45edc08 100644
--- a/nova/tests/api/openstack/compute/test_extensions.py
+++ b/nova/tests/api/openstack/compute/test_extensions.py
@@ -167,6 +167,7 @@ class ExtensionControllerTest(ExtensionTestCase):
"DeferredDelete",
"DiskConfig",
"ExtendedAvailabilityZone",
+ "ExtendedIps",
"Evacuate",
"ExtendedStatus",
"ExtendedServerAttributes",
@@ -184,6 +185,7 @@ class ExtensionControllerTest(ExtensionTestCase):
"FloatingIpsBulk",
"Fox In Socks",
"Hosts",
+ "InstanceActions",
"Keypairs",
"Multinic",
"MultipleCreate",
diff --git a/nova/tests/baremetal/test_virtual_power_driver.py b/nova/tests/baremetal/test_virtual_power_driver.py
index a63d8add1..689677698 100644
--- a/nova/tests/baremetal/test_virtual_power_driver.py
+++ b/nova/tests/baremetal/test_virtual_power_driver.py
@@ -101,7 +101,7 @@ class BareMetalVPDTestCase(bm_db_base.BMDBTestCase):
return self.pm
-class VPDClassMethodsTestCase(BareMetalVPDTestCase):
+class VPDMissingOptionsTestCase(BareMetalVPDTestCase):
def test_get_conn_missing_options(self):
self.flags(virtual_power_ssh_host=None, group="baremetal")
@@ -121,10 +121,16 @@ class VPDClassMethodsTestCase(BareMetalVPDTestCase):
self.assertRaises(exception.NovaException,
self.pm._get_conn)
- def test_get_conn_success(self):
+
+class VPDClassMethodsTestCase(BareMetalVPDTestCase):
+
+ def setUp(self):
+ super(VPDClassMethodsTestCase, self).setUp()
self.flags(virtual_power_ssh_host='127.0.0.1', group="baremetal")
self.flags(virtual_power_host_user='user', group="baremetal")
self.flags(virtual_power_host_pass='password', group="baremetal")
+
+ def test_get_conn_success(self):
self._create_node()
self._create_pm()
self._conn = self.pm._get_conn()
@@ -138,9 +144,6 @@ class VPDClassMethodsTestCase(BareMetalVPDTestCase):
self.mox.VerifyAll()
def test_get_full_node_list(self):
- self.flags(virtual_power_ssh_host='127.0.0.1', group="baremetal")
- self.flags(virtual_power_host_user='user', group="baremetal")
- self.flags(virtual_power_host_pass='password', group="baremetal")
self._create_node()
self._create_pm()
@@ -154,9 +157,6 @@ class VPDClassMethodsTestCase(BareMetalVPDTestCase):
self.mox.VerifyAll()
def test_check_for_node(self):
- self.flags(virtual_power_ssh_host='127.0.0.1', group="baremetal")
- self.flags(virtual_power_host_user='user', group="baremetal")
- self.flags(virtual_power_host_pass='password', group="baremetal")
self._create_node()
self._create_pm()
@@ -175,9 +175,6 @@ class VPDClassMethodsTestCase(BareMetalVPDTestCase):
self.mox.VerifyAll()
def test_check_for_node_not_found(self):
- self.flags(virtual_power_ssh_host='127.0.0.1', group="baremetal")
- self.flags(virtual_power_host_user='user', group="baremetal")
- self.flags(virtual_power_host_pass='password', group="baremetal")
self._create_node()
self._create_pm()
@@ -194,9 +191,6 @@ class VPDClassMethodsTestCase(BareMetalVPDTestCase):
self.mox.VerifyAll()
def test_activate_node(self):
- self.flags(virtual_power_ssh_host='127.0.0.1', group="baremetal")
- self.flags(virtual_power_host_user='user', group="baremetal")
- self.flags(virtual_power_host_pass='password', group="baremetal")
self._create_node()
self._create_pm()
@@ -212,9 +206,6 @@ class VPDClassMethodsTestCase(BareMetalVPDTestCase):
self.mox.VerifyAll()
def test_activate_node_fail(self):
- self.flags(virtual_power_ssh_host='127.0.0.1', group="baremetal")
- self.flags(virtual_power_host_user='user', group="baremetal")
- self.flags(virtual_power_host_pass='password', group="baremetal")
self._create_node()
self._create_pm()
@@ -230,9 +221,6 @@ class VPDClassMethodsTestCase(BareMetalVPDTestCase):
self.mox.VerifyAll()
def test_deactivate_node(self):
- self.flags(virtual_power_ssh_host='127.0.0.1', group="baremetal")
- self.flags(virtual_power_host_user='user', group="baremetal")
- self.flags(virtual_power_host_pass='password', group="baremetal")
self._create_node()
self._create_pm()
@@ -249,9 +237,6 @@ class VPDClassMethodsTestCase(BareMetalVPDTestCase):
self.mox.VerifyAll()
def test_deactivate_node_fail(self):
- self.flags(virtual_power_ssh_host='127.0.0.1', group="baremetal")
- self.flags(virtual_power_host_user='user', group="baremetal")
- self.flags(virtual_power_host_pass='password', group="baremetal")
self._create_node()
self._create_pm()
@@ -268,9 +253,6 @@ class VPDClassMethodsTestCase(BareMetalVPDTestCase):
self.mox.VerifyAll()
def test_reboot_node(self):
- self.flags(virtual_power_ssh_host='127.0.0.1', group="baremetal")
- self.flags(virtual_power_host_user='user', group="baremetal")
- self.flags(virtual_power_host_pass='password', group="baremetal")
self._create_node()
self._create_pm()
@@ -286,9 +268,6 @@ class VPDClassMethodsTestCase(BareMetalVPDTestCase):
self.mox.VerifyAll()
def test_reboot_node_fail(self):
- self.flags(virtual_power_ssh_host='127.0.0.1', group="baremetal")
- self.flags(virtual_power_host_user='user', group="baremetal")
- self.flags(virtual_power_host_pass='password', group="baremetal")
self._create_node()
self._create_pm()
@@ -304,9 +283,6 @@ class VPDClassMethodsTestCase(BareMetalVPDTestCase):
self.mox.VerifyAll()
def test_is_power_on(self):
- self.flags(virtual_power_ssh_host='127.0.0.1', group="baremetal")
- self.flags(virtual_power_host_user='user', group="baremetal")
- self.flags(virtual_power_host_pass='password', group="baremetal")
self._create_node()
self._create_pm()
@@ -322,9 +298,6 @@ class VPDClassMethodsTestCase(BareMetalVPDTestCase):
self.mox.VerifyAll()
def test_is_power_on_fail(self):
- self.flags(virtual_power_ssh_host='127.0.0.1', group="baremetal")
- self.flags(virtual_power_host_user='user', group="baremetal")
- self.flags(virtual_power_host_pass='password', group="baremetal")
self._create_node()
self._create_pm()
@@ -340,9 +313,6 @@ class VPDClassMethodsTestCase(BareMetalVPDTestCase):
self.mox.VerifyAll()
def test_run_command(self):
- self.flags(virtual_power_ssh_host='127.0.0.1', group="baremetal")
- self.flags(virtual_power_host_user='user', group="baremetal")
- self.flags(virtual_power_host_pass='password', group="baremetal")
self._create_node()
self._create_pm()
@@ -356,3 +326,42 @@ class VPDClassMethodsTestCase(BareMetalVPDTestCase):
result = self.pm._run_command("test return")
self.assertEqual(result, ['test', 'return'])
self.mox.VerifyAll()
+
+ def test_run_command_raises_exception(self):
+ self._create_node()
+ self._create_pm()
+
+ self.mox.StubOutWithMock(self.pm, '_set_connection')
+ self.mox.StubOutWithMock(nutils, 'ssh_execute')
+
+ self.pm._set_connection().AndReturn(True)
+ nutils.ssh_execute(None, '/usr/bin/VBoxManage test return',
+ check_exit_code=True).\
+ AndRaise(exception.ProcessExecutionError)
+ self.mox.ReplayAll()
+
+ result = self.pm._run_command("test return")
+ self.assertEqual(result, [])
+ self.mox.VerifyAll()
+
+ def test_activate_node_with_exception(self):
+ self._create_node()
+ self._create_pm()
+
+ self.mox.StubOutWithMock(self.pm, '_check_for_node')
+ self.mox.StubOutWithMock(nutils, 'ssh_execute')
+
+ self.pm._check_for_node().AndReturn(["testNode"])
+ self.pm._check_for_node().AndReturn(["testNode"])
+ nutils.ssh_execute('test', '/usr/bin/VBoxManage startvm ',
+ check_exit_code=True).\
+ AndRaise(exception.ProcessExecutionError)
+ nutils.ssh_execute('test', '/usr/bin/VBoxManage list runningvms',
+ check_exit_code=True).\
+ AndRaise(exception.ProcessExecutionError)
+
+ self.mox.ReplayAll()
+ self.pm._connection = 'test'
+ state = self.pm.activate_node()
+ self.assertEqual(state, 'error')
+ self.mox.VerifyAll()
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index a75ba00cc..5afaa8e3c 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -2640,10 +2640,11 @@ class ComputeTestCase(BaseTestCase):
# creating mocks
self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance')
self.compute.driver.unfilter_instance(inst_ref, [])
- self.mox.StubOutWithMock(self.compute.network_api,
- 'migrate_instance_start')
+ self.mox.StubOutWithMock(self.compute.conductor_api,
+ 'network_migrate_instance_start')
migration = {'source_compute': srchost, 'dest_compute': dest, }
- self.compute.network_api.migrate_instance_start(c, inst_ref, migration)
+ self.compute.conductor_api.network_migrate_instance_start(c, inst_ref,
+ migration)
self.mox.StubOutWithMock(rpc, 'call')
rpc.call(c, rpc.queue_get_for(c, CONF.compute_topic, dest),
{"method": "post_live_migration_at_destination",
@@ -2684,11 +2685,12 @@ class ComputeTestCase(BaseTestCase):
# creating mocks
self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance')
self.compute.driver.unfilter_instance(inst_ref, [])
- self.mox.StubOutWithMock(self.compute.network_api,
- 'migrate_instance_start')
+ self.mox.StubOutWithMock(self.compute.conductor_api,
+ 'network_migrate_instance_start')
migration = {'source_compute': srchost,
'dest_compute': dest, }
- self.compute.network_api.migrate_instance_start(c, inst_ref, migration)
+ self.compute.conductor_api.network_migrate_instance_start(c, inst_ref,
+ migration)
self.mox.StubOutWithMock(rpc, 'call')
rpc.call(c, rpc.queue_get_for(c, CONF.compute_topic, dest),
{"method": "post_live_migration_at_destination",
@@ -2710,8 +2712,8 @@ class ComputeTestCase(BaseTestCase):
def test_post_live_migration_at_destination(self):
self.mox.StubOutWithMock(self.compute.network_api,
'setup_networks_on_host')
- self.mox.StubOutWithMock(self.compute.network_api,
- 'migrate_instance_finish')
+ self.mox.StubOutWithMock(self.compute.conductor_api,
+ 'network_migrate_instance_finish')
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.mox.StubOutWithMock(self.compute, '_instance_update')
@@ -2726,8 +2728,8 @@ class ComputeTestCase(BaseTestCase):
self.compute.host)
migration = {'source_compute': instance['host'],
'dest_compute': self.compute.host, }
- self.compute.network_api.migrate_instance_finish(admin_ctxt,
- instance, migration)
+ self.compute.conductor_api.network_migrate_instance_finish(admin_ctxt,
+ instance, migration)
fake_net_info = []
fake_block_dev_info = {'foo': 'bar'}
self.compute.driver.post_live_migration_at_destination(admin_ctxt,
@@ -3804,6 +3806,15 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
+ def test_start_no_host(self):
+ instance = self._create_fake_instance(params={'host': ''})
+
+ self.assertRaises(exception.InstanceNotReady,
+ self.compute_api.start,
+ self.context, instance)
+
+ db.instance_destroy(self.context, instance['uuid'])
+
def test_stop(self):
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
@@ -3819,6 +3830,15 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
+ def test_stop_no_host(self):
+ instance = self._create_fake_instance(params={'host': ''})
+
+ self.assertRaises(exception.InstanceNotReady,
+ self.compute_api.stop,
+ self.context, instance)
+
+ db.instance_destroy(self.context, instance['uuid'])
+
def test_start_shutdown(self):
def check_state(instance_uuid, power_state_, vm_state_, task_state_):
instance = db.instance_get_by_uuid(self.context, instance_uuid)
@@ -5657,6 +5677,15 @@ class ComputeAPITestCase(BaseTestCase):
fake_instance, tail_length=fake_tail_length)
self.assertEqual(output, fake_console_output)
+ def test_console_output_no_host(self):
+ instance = self._create_fake_instance(params={'host': ''})
+
+ self.assertRaises(exception.InstanceNotReady,
+ self.compute_api.get_console_output,
+ self.context, instance)
+
+ db.instance_destroy(self.context, instance['uuid'])
+
def test_attach_volume(self):
# Ensure instance can be soft rebooted.
@@ -6161,6 +6190,16 @@ class ComputeAPIAggrTestCase(BaseTestCase):
self.stubs.Set(rpc, 'call', fake_rpc_method)
self.stubs.Set(rpc, 'cast', fake_rpc_method)
+ def test_aggregate_no_zone(self):
+ # Ensure we can create an aggregate without an availability zone
+ aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
+ None)
+ self.api.delete_aggregate(self.context, aggr['id'])
+ db.aggregate_get(self.context.elevated(read_deleted='yes'),
+ aggr['id'])
+ self.assertRaises(exception.AggregateNotFound,
+ self.api.delete_aggregate, self.context, aggr['id'])
+
def test_update_aggregate_metadata(self):
# Ensure metadata can be updated.
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py
index bd73328e8..ed733599b 100644
--- a/nova/tests/conductor/test_conductor.py
+++ b/nova/tests/conductor/test_conductor.py
@@ -16,6 +16,7 @@
import mox
+from nova.api.ec2 import ec2utils
from nova.compute import instance_types
from nova.compute import utils as compute_utils
from nova.compute import vm_states
@@ -31,6 +32,7 @@ from nova import notifications
from nova.openstack.common import jsonutils
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
+from nova import quota
from nova import test
@@ -240,6 +242,15 @@ class _BaseTestCase(object):
aggregate,
'fake')
+ def test_aggregate_metadata_get_by_host(self):
+ self.mox.StubOutWithMock(db, 'aggregate_metadata_get_by_host')
+ db.aggregate_metadata_get_by_host(self.context, 'host',
+ 'key').AndReturn('result')
+ self.mox.ReplayAll()
+ result = self.conductor.aggregate_metadata_get_by_host(self.context,
+ 'host', 'key')
+ self.assertEqual(result, 'result')
+
def test_bw_usage_update(self):
self.mox.StubOutWithMock(db, 'bw_usage_update')
self.mox.StubOutWithMock(db, 'bw_usage_get')
@@ -503,6 +514,78 @@ class _BaseTestCase(object):
self.conductor.security_groups_trigger_members_refresh(self.context,
[1, 2, 3])
+ def test_network_migrate_instance_start(self):
+ self.mox.StubOutWithMock(self.conductor_manager.network_api,
+ 'migrate_instance_start')
+ self.conductor_manager.network_api.migrate_instance_start(self.context,
+ 'instance',
+ 'migration')
+ self.mox.ReplayAll()
+ self.conductor.network_migrate_instance_start(self.context,
+ 'instance',
+ 'migration')
+
+ def test_network_migrate_instance_finish(self):
+ self.mox.StubOutWithMock(self.conductor_manager.network_api,
+ 'migrate_instance_finish')
+ self.conductor_manager.network_api.migrate_instance_finish(
+ self.context, 'instance', 'migration')
+ self.mox.ReplayAll()
+ self.conductor.network_migrate_instance_finish(self.context,
+ 'instance',
+ 'migration')
+
+ def test_quota_commit(self):
+ self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
+ quota.QUOTAS.commit(self.context, 'reservations')
+ self.mox.ReplayAll()
+ self.conductor.quota_commit(self.context, 'reservations')
+
+ def test_quota_commit(self):
+ self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
+ quota.QUOTAS.rollback(self.context, 'reservations')
+ self.mox.ReplayAll()
+ self.conductor.quota_rollback(self.context, 'reservations')
+
+ def test_get_ec2_ids(self):
+ expected = {
+ 'instance-id': 'ec2-inst-id',
+ 'ami-id': 'ec2-ami-id',
+ 'kernel-id': 'ami-kernel-ec2-kernelid',
+ 'ramdisk-id': 'ami-ramdisk-ec2-ramdiskid',
+ }
+ inst = {
+ 'uuid': 'fake-uuid',
+ 'kernel_id': 'ec2-kernelid',
+ 'ramdisk_id': 'ec2-ramdiskid',
+ 'image_ref': 'fake-image',
+ }
+ self.mox.StubOutWithMock(ec2utils, 'id_to_ec2_inst_id')
+ self.mox.StubOutWithMock(ec2utils, 'glance_id_to_ec2_id')
+ self.mox.StubOutWithMock(ec2utils, 'image_type')
+
+ ec2utils.id_to_ec2_inst_id(inst['uuid']).AndReturn(
+ expected['instance-id'])
+ ec2utils.glance_id_to_ec2_id(self.context,
+ inst['image_ref']).AndReturn(
+ expected['ami-id'])
+ for image_type in ['kernel', 'ramdisk']:
+ image_id = inst['%s_id' % image_type]
+ ec2utils.image_type(image_type).AndReturn('ami-' + image_type)
+ ec2utils.glance_id_to_ec2_id(self.context, image_id,
+ 'ami-' + image_type).AndReturn(
+ 'ami-%s-ec2-%sid' % (image_type, image_type))
+
+ self.mox.ReplayAll()
+ result = self.conductor.get_ec2_ids(self.context, inst)
+ self.assertEqual(result, expected)
+
+ def test_compute_stop(self):
+ self.mox.StubOutWithMock(self.conductor_manager.compute_api, 'stop')
+ self.conductor_manager.compute_api.stop(self.context, 'instance', True)
+ self.mox.ReplayAll()
+ self.conductor.compute_stop(self.context, 'instance')
+
class ConductorTestCase(_BaseTestCase, test.TestCase):
"""Conductor Manager Tests."""
diff --git a/nova/tests/fake_instance_actions.py b/nova/tests/fake_instance_actions.py
index 1667ac62d..f34d9b213 100644
--- a/nova/tests/fake_instance_actions.py
+++ b/nova/tests/fake_instance_actions.py
@@ -17,6 +17,64 @@
from nova import db
+FAKE_UUID = 'b48316c5-71e8-45e4-9884-6c78055b9b13'
+FAKE_REQUEST_ID1 = 'req-3293a3f1-b44c-4609-b8d2-d81b105636b8'
+FAKE_REQUEST_ID2 = 'req-25517360-b757-47d3-be45-0e8d2a01b36a'
+FAKE_ACTION_ID1 = 'f811a359-0c98-4daa-87a4-2948d4c21b78'
+FAKE_ACTION_ID2 = '4e9594b5-4ac5-421c-ac60-2d802b11c798'
+
+FAKE_ACTIONS = {
+ FAKE_UUID: {
+ FAKE_REQUEST_ID1: {'id': FAKE_ACTION_ID1,
+ 'action': 'reboot',
+ 'instance_uuid': FAKE_UUID,
+ 'request_id': FAKE_REQUEST_ID1,
+ 'project_id': '147',
+ 'user_id': '789',
+ 'start_time': '2012-12-05 00:00:00.000000',
+ 'finish_time': '',
+ 'message': '',
+ },
+ FAKE_REQUEST_ID2: {'id': FAKE_ACTION_ID2,
+ 'action': 'resize',
+ 'instance_uuid': FAKE_UUID,
+ 'request_id': FAKE_REQUEST_ID2,
+ 'user_id': '789',
+ 'project_id': '842',
+ 'start_time': '2012-12-05 01:00:00.000000',
+ 'finish_time': '',
+ 'message': '',
+ }
+ }
+}
+
+FAKE_EVENTS = {
+ FAKE_ACTION_ID1: [{'id': '1',
+ 'event': 'schedule',
+ 'start_time': '2012-12-05 01:00:02.000000',
+ 'finish_time': '2012-12-05 01:02:00.000000',
+ 'result': 'Success',
+ 'traceback': '',
+ },
+ {'id': '2',
+ 'event': 'compute_create',
+ 'start_time': '2012-12-05 01:03:00.000000',
+ 'finish_time': '2012-12-05 01:04:00.000000',
+ 'result': 'Success',
+ 'traceback': '',
+ }
+ ],
+ FAKE_ACTION_ID2: [{'id': '3',
+ 'event': 'schedule',
+ 'start_time': '2012-12-05 03:00:00.000000',
+ 'finish_time': '2012-12-05 03:02:00.000000',
+ 'result': 'Error',
+ 'traceback': ''
+ }
+ ]
+}
+
+
def fake_action_event_start(*args):
pass
diff --git a/nova/tests/fake_network.py b/nova/tests/fake_network.py
index 6618c58ea..ae8f2b7b0 100644
--- a/nova/tests/fake_network.py
+++ b/nova/tests/fake_network.py
@@ -53,7 +53,7 @@ class FakeVIFDriver(object):
def setattr(self, key, val):
self.__setattr__(key, val)
- def get_config(self, instance, network, mapping):
+ def get_config(self, instance, network, mapping, image_meta):
conf = libvirt_config.LibvirtConfigGuestInterface()
for attr, val in conf.__dict__.iteritems():
@@ -376,7 +376,7 @@ def stub_out_nw_api_get_instance_nw_info(stubs, func=None,
floating_ips_per_fixed_ip=0,
spectacular=False):
- def get_instance_nw_info(self, context, instance):
+ def get_instance_nw_info(self, context, instance, conductor_api=None):
return fake_get_instance_nw_info(stubs, num_networks=num_networks,
ips_per_vif=ips_per_vif,
floating_ips_per_fixed_ip=floating_ips_per_fixed_ip,
diff --git a/nova/tests/fake_policy.py b/nova/tests/fake_policy.py
index 92ce0815a..3878df531 100644
--- a/nova/tests/fake_policy.py
+++ b/nova/tests/fake_policy.py
@@ -121,6 +121,7 @@ policy_data = """
"compute_extension:extended_server_attributes": "",
"compute_extension:extended_status": "",
"compute_extension:extended_availability_zone": "",
+ "compute_extension:extended_ips": "",
"compute_extension:fixed_ips": "",
"compute_extension:flavor_access": "",
"compute_extension:flavor_disabled": "",
@@ -142,6 +143,8 @@ policy_data = """
"compute_extension:hide_server_addresses": "",
"compute_extension:hosts": "",
"compute_extension:hypervisors": "",
+ "compute_extension:instance_actions": "",
+ "compute_extension:instance_actions:events": "is_admin:True",
"compute_extension:instance_usage_audit_log": "",
"compute_extension:keypairs": "",
"compute_extension:multinic": "",
diff --git a/nova/tests/fake_volume.py b/nova/tests/fake_volume.py
index c7430ee6d..0d8a502a5 100644
--- a/nova/tests/fake_volume.py
+++ b/nova/tests/fake_volume.py
@@ -17,12 +17,17 @@
import uuid
from nova import exception
+from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
LOG = logging.getLogger(__name__)
+CONF = cfg.CONF
+CONF.import_opt('cinder_cross_az_attach',
+ 'nova.volume.cinder')
+
class fake_volume():
user_uuid = '4a3cd440-b9c2-11e1-afa6-0800200c9a66'
@@ -175,7 +180,7 @@ class API(object):
LOG.info('deleting volume %s', volume['id'])
self.volume_list = [v for v in self.volume_list if v != volume]
- def check_attach(self, context, volume):
+ def check_attach(self, context, volume, instance=None):
if volume['status'] != 'available':
msg = _("status must be available")
msg = "%s" % volume
@@ -183,6 +188,10 @@ class API(object):
if volume['attach_status'] == 'attached':
msg = _("already attached")
raise exception.InvalidVolume(reason=msg)
+ if instance and not CONF.cinder_cross_az_attach:
+ if instance['availability_zone'] != volume['availability_zone']:
+ msg = _("Instance and volume not in same availability_zone")
+ raise exception.InvalidVolume(reason=msg)
def check_detach(self, context, volume):
if volume['status'] == "available":
diff --git a/nova/tests/hyperv/fake.py b/nova/tests/hyperv/fake.py
index 9890a5462..e0e5a6bbe 100644
--- a/nova/tests/hyperv/fake.py
+++ b/nova/tests/hyperv/fake.py
@@ -23,24 +23,50 @@ class PathUtils(object):
def open(self, path, mode):
return io.BytesIO(b'fake content')
- def get_instances_path(self):
- return 'C:\\FakePath\\'
+ def exists(self, path):
+ return False
+
+ def makedirs(self, path):
+ pass
+
+ def remove(self, path):
+ pass
+
+ def rename(self, src, dest):
+ pass
+
+ def copyfile(self, src, dest):
+ pass
+
+ def copy(self, src, dest):
+ pass
+
+ def rmtree(self, path):
+ pass
+
+ def get_instances_dir(self, remote_server=None):
+ return 'C:\\FakeInstancesPath\\'
+
+ def get_instance_migr_revert_dir(self, instance_name, create_dir=False,
+ remove_dir=False):
+ return os.path.join(self.get_instances_dir(), instance_name, '_revert')
- def get_instance_path(self, instance_name):
- return os.path.join(self.get_instances_path(), instance_name)
+ def get_instance_dir(self, instance_name, remote_server=None,
+ create_dir=True, remove_dir=False):
+ return os.path.join(self.get_instances_dir(remote_server),
+ instance_name)
def get_vhd_path(self, instance_name):
- instance_path = self.get_instance_path(instance_name)
- return os.path.join(instance_path, instance_name + ".vhd")
+ instance_path = self.get_instance_dir(instance_name)
+ return os.path.join(instance_path, 'root.vhd')
- def get_base_vhd_path(self, image_name):
- base_dir = os.path.join(self.get_instances_path(), '_base')
- return os.path.join(base_dir, image_name + ".vhd")
+ def get_base_vhd_dir(self):
+ return os.path.join(self.get_instances_dir(), '_base')
- def make_export_path(self, instance_name):
- export_folder = os.path.join(self.get_instances_path(), "export",
- instance_name)
- return export_folder
+ def get_export_dir(self, instance_name):
+ export_dir = os.path.join(self.get_instances_dir(), 'export',
+ instance_name)
+ return export_dir
def vhd_exists(self, path):
return False
diff --git a/nova/tests/integrated/api_samples/OS-EXT-AZ/server-get-resp.json.tpl b/nova/tests/integrated/api_samples/OS-EXT-AZ/server-get-resp.json.tpl
index 25915610d..413f3ce95 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-AZ/server-get-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/OS-EXT-AZ/server-get-resp.json.tpl
@@ -2,8 +2,7 @@
"server": {
"updated": "%(timestamp)s",
"created": "%(timestamp)s",
- "OS-EXT-AZ:availability_zone": null,
- "OS-EXT-AZ:host_availability_zone": "nova",
+ "OS-EXT-AZ:availability_zone": "nova",
"accessIPv4": "",
"accessIPv6": "",
"addresses": {
diff --git a/nova/tests/integrated/api_samples/OS-EXT-AZ/server-get-resp.xml.tpl b/nova/tests/integrated/api_samples/OS-EXT-AZ/server-get-resp.xml.tpl
index 1cdbd2012..849b9b750 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-AZ/server-get-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/OS-EXT-AZ/server-get-resp.xml.tpl
@@ -1,5 +1,5 @@
<?xml version='1.0' encoding='UTF-8'?>
-<server xmlns:OS-EXT-AZ="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(timestamp)s" hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(uuid)s" OS-EXT-AZ:availability_zone="None" OS-EXT-AZ:host_availability_zone="nova">
+<server xmlns:OS-EXT-AZ="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(timestamp)s" hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(uuid)s" OS-EXT-AZ:availability_zone="nova">
<image id="%(uuid)s">
<atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
</image>
diff --git a/nova/tests/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.json.tpl b/nova/tests/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.json.tpl
index 895f0a514..362c85085 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.json.tpl
@@ -3,8 +3,7 @@
{
"updated": "%(timestamp)s",
"created": "%(timestamp)s",
- "OS-EXT-AZ:availability_zone": null,
- "OS-EXT-AZ:host_availability_zone": "nova",
+ "OS-EXT-AZ:availability_zone": "nova",
"accessIPv4": "",
"accessIPv6": "",
"addresses": {
diff --git a/nova/tests/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.xml.tpl b/nova/tests/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.xml.tpl
index 15cd9b1e1..9cd820cd0 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.xml.tpl
@@ -1,5 +1,5 @@
<?xml version='1.0' encoding='UTF-8'?>
-<servers xmlns:OS-EXT-AZ="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1"> <server status="ACTIVE" updated="%(timestamp)s" hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" OS-EXT-AZ:availability_zone="None" OS-EXT-AZ:host_availability_zone="nova">
+<servers xmlns:OS-EXT-AZ="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1"> <server status="ACTIVE" updated="%(timestamp)s" hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" OS-EXT-AZ:availability_zone="nova">
<image id="%(uuid)s">
<atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
</image>
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IPS/server-get-resp.json.tpl b/nova/tests/integrated/api_samples/OS-EXT-IPS/server-get-resp.json.tpl
new file mode 100644
index 000000000..bea96d4f6
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-IPS/server-get-resp.json.tpl
@@ -0,0 +1,55 @@
+{
+ "server": {
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(timestamp)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(timestamp)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IPS/server-get-resp.xml.tpl b/nova/tests/integrated/api_samples/OS-EXT-IPS/server-get-resp.xml.tpl
new file mode 100644
index 000000000..d3f14f6ed
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-IPS/server-get-resp.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:OS-EXT-IPS="http://docs.openstack.org/compute/ext/extended_ips/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(timestamp)s" hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip OS-EXT-IPS:type="fixed" version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IPS/server-post-req.json.tpl b/nova/tests/integrated/api_samples/OS-EXT-IPS/server-post-req.json.tpl
new file mode 100644
index 000000000..d3916d1aa
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-IPS/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IPS/server-post-req.xml.tpl b/nova/tests/integrated/api_samples/OS-EXT-IPS/server-post-req.xml.tpl
new file mode 100644
index 000000000..f92614984
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-IPS/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IPS/server-post-resp.json.tpl b/nova/tests/integrated/api_samples/OS-EXT-IPS/server-post-resp.json.tpl
new file mode 100644
index 000000000..d5f030c87
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-IPS/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IPS/server-post-resp.xml.tpl b/nova/tests/integrated/api_samples/OS-EXT-IPS/server-post-resp.xml.tpl
new file mode 100644
index 000000000..3bb13e69b
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-IPS/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IPS/servers-detail-resp.json.tpl b/nova/tests/integrated/api_samples/OS-EXT-IPS/servers-detail-resp.json.tpl
new file mode 100644
index 000000000..37487e49f
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-IPS/servers-detail-resp.json.tpl
@@ -0,0 +1,56 @@
+{
+ "servers": [
+ {
+ "status": "ACTIVE",
+ "updated": "%(timestamp)s",
+ "user_id": "fake",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "created": "%(timestamp)s",
+ "name": "new-server-test",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "id": "%(uuid)s",
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "tenant_id": "openstack",
+ "progress": 0,
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "metadata": {
+ "My Server Name": "Apache1"
+ }
+ }]
+}
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IPS/servers-detail-resp.xml.tpl b/nova/tests/integrated/api_samples/OS-EXT-IPS/servers-detail-resp.xml.tpl
new file mode 100644
index 000000000..5fc5c1f93
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-IPS/servers-detail-resp.xml.tpl
@@ -0,0 +1,21 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers xmlns:OS-EXT-IPS="http://docs.openstack.org/compute/ext/extended_ips/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server xmlns:OS-EXT-IPS="http://docs.openstack.org/compute/ext/extended_ips/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(timestamp)s" hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip OS-EXT-IPS:type="fixed" version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+ </server>
+</servers>
diff --git a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.json.tpl b/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/server-get-resp.json.tpl
index c70192949..c70192949 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.json.tpl
+++ b/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/server-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.xml.tpl b/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/server-get-resp.xml.tpl
index beec3a12a..beec3a12a 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.xml.tpl
+++ b/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/server-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.json.tpl b/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.json.tpl
index 1fb8e1a47..1fb8e1a47 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.json.tpl
+++ b/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.xml.tpl b/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.xml.tpl
index 1811882a2..1811882a2 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.xml.tpl
+++ b/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-STS/server-get-resp.json.tpl b/nova/tests/integrated/api_samples/OS-EXT-STS/server-get-resp.json.tpl
new file mode 100644
index 000000000..7ac35024b
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-STS/server-get-resp.json.tpl
@@ -0,0 +1,57 @@
+{
+ "server": {
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "OS-EXT-STS:power_state": 1,
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(timestamp)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(timestamp)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/OS-EXT-STS/server-get-resp.xml.tpl b/nova/tests/integrated/api_samples/OS-EXT-STS/server-get-resp.xml.tpl
new file mode 100644
index 000000000..f594be120
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-STS/server-get-resp.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(timestamp)s" hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" OS-EXT-STS:vm_state="active" OS-EXT-STS:task_state="None" OS-EXT-STS:power_state="1">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/integrated/api_samples/OS-EXT-STS/servers-list-resp.json.tpl b/nova/tests/integrated/api_samples/OS-EXT-STS/servers-list-resp.json.tpl
deleted file mode 100644
index 8b97dc28d..000000000
--- a/nova/tests/integrated/api_samples/OS-EXT-STS/servers-list-resp.json.tpl
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "servers": [
- {
- "id": "%(id)s",
- "links": [
- {
- "href": "%(host)s/v2/openstack/servers/%(id)s",
- "rel": "self"
- },
- {
- "href": "%(host)s/openstack/servers/%(id)s",
- "rel": "bookmark"
- }
- ],
- "name": "new-server-test"
- }
- ]
-}
diff --git a/nova/tests/integrated/api_samples/OS-EXT-STS/servers-list-resp.xml.tpl b/nova/tests/integrated/api_samples/OS-EXT-STS/servers-list-resp.xml.tpl
deleted file mode 100644
index 03bee03a6..000000000
--- a/nova/tests/integrated/api_samples/OS-EXT-STS/servers-list-resp.xml.tpl
+++ /dev/null
@@ -1,7 +0,0 @@
-<?xml version='1.0' encoding='UTF-8'?>
-<servers xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
- <server name="new-server-test" id="%(id)s">
- <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
- <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
- </server>
-</servers>
diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
index 35d50d025..17914de42 100644
--- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
@@ -25,6 +25,14 @@
"updated": "%(timestamp)s"
},
{
+ "alias": "OS-EXT-IPS",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ExtendedIps",
+ "namespace": "http://docs.openstack.org/compute/ext/extended_ips/api/v1.1",
+ "updated": "%(timestamp)s"
+ },
+ {
"alias": "OS-EXT-SRV-ATTR",
"description": "%(text)s",
"links": [],
@@ -455,6 +463,14 @@
"name": "Volumes",
"namespace": "http://docs.openstack.org/compute/ext/volumes/api/v1.1",
"updated": "%(timestamp)s"
+ },
+ {
+ "alias": "os-instance-actions",
+ "description": "%(text)s",
+ "links": [],
+ "name": "InstanceActions",
+ "namespace": "http://docs.openstack.org/compute/ext/instance-actions/api/v1.1",
+ "updated": "%(timestamp)s"
}
]
}
diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
index 2adc5988c..4492ed3aa 100644
--- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
@@ -9,6 +9,9 @@
<extension alias="OS-EXT-AZ" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" name="ExtendedAvailabilityZone">
<description>%(text)s</description>
</extension>
+ <extension alias="OS-EXT-IPS" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/extended_ips/api/v1.1" name="ExtendedIps">
+ <description>%(text)s</description>
+ </extension>
<extension alias="OS-EXT-SRV-ATTR" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" name="ExtendedServerAttributes">
<description>%(text)s</description>
</extension>
@@ -171,4 +174,7 @@
<extension alias="os-volumes" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/volumes/api/v1.1" name="Volumes">
<description>%(text)s</description>
</extension>
+ <extension alias="os-instance-actions" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/instance-actions/api/v1.1" name="InstanceActions">
+ <description>%(text)s</description>
+ </extension>
</extensions>
diff --git a/nova/tests/integrated/api_samples/all_extensions/server-get-resp.json.tpl b/nova/tests/integrated/api_samples/all_extensions/server-get-resp.json.tpl
index 85fc6f605..ccefc2dc7 100644
--- a/nova/tests/integrated/api_samples/all_extensions/server-get-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/server-get-resp.json.tpl
@@ -1,8 +1,7 @@
{
"server": {
"OS-DCF:diskConfig": "AUTO",
- "OS-EXT-AZ:availability_zone": null,
- "OS-EXT-AZ:host_availability_zone": "nova",
+ "OS-EXT-AZ:availability_zone": "nova",
"OS-EXT-SRV-ATTR:host": "%(compute_host)s",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "%(hypervisor_hostname)s",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
@@ -14,6 +13,7 @@
"addresses": {
"private": [
{
+ "OS-EXT-IPS:type": "fixed",
"addr": "%(ip)s",
"version": 4
}
diff --git a/nova/tests/integrated/api_samples/all_extensions/server-get-resp.xml.tpl b/nova/tests/integrated/api_samples/all_extensions/server-get-resp.xml.tpl
index bd73accda..973305854 100644
--- a/nova/tests/integrated/api_samples/all_extensions/server-get-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/server-get-resp.xml.tpl
@@ -1,5 +1,5 @@
<?xml version='1.0' encoding='UTF-8'?>
-<server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:OS-EXT-AZ="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(timestamp)s" hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" key_name="None" config_drive="" OS-EXT-SRV-ATTR:vm_state="active" OS-EXT-SRV-ATTR:task_state="None" OS-EXT-SRV-ATTR:power_state="1" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="%(compute_host)s" OS-EXT-SRV-ATTR:hypervisor_hostname="%(hypervisor_hostname)s" OS-EXT-AZ:availability_zone="None" OS-EXT-AZ:host_availability_zone="nova" OS-DCF:diskConfig="AUTO">
+<server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:OS-EXT-AZ="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-IPS="http://docs.openstack.org/compute/ext/extended_ips/api/v1.1" xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(timestamp)s" hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" key_name="None" config_drive="" OS-EXT-SRV-ATTR:vm_state="active" OS-EXT-SRV-ATTR:task_state="None" OS-EXT-SRV-ATTR:power_state="1" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="%(compute_host)s" OS-EXT-SRV-ATTR:hypervisor_hostname="%(hypervisor_hostname)s" OS-EXT-AZ:availability_zone="nova" OS-DCF:diskConfig="AUTO">
<image id="%(uuid)s">
<atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
</image>
@@ -11,7 +11,7 @@
</metadata>
<addresses>
<network id="private">
- <ip version="4" addr="%(ip)s"/>
+ <ip OS-EXT-IPS:type="fixed" version="4" addr="%(ip)s"/>
</network>
</addresses>
<atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
diff --git a/nova/tests/integrated/api_samples/all_extensions/servers-details-resp.json.tpl b/nova/tests/integrated/api_samples/all_extensions/servers-details-resp.json.tpl
index a4918203b..d50088837 100644
--- a/nova/tests/integrated/api_samples/all_extensions/servers-details-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/servers-details-resp.json.tpl
@@ -2,8 +2,7 @@
"servers": [
{
"OS-DCF:diskConfig": "AUTO",
- "OS-EXT-AZ:availability_zone": null,
- "OS-EXT-AZ:host_availability_zone": "nova",
+ "OS-EXT-AZ:availability_zone": "nova",
"OS-EXT-SRV-ATTR:host": "%(compute_host)s",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "%(hypervisor_hostname)s",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
@@ -15,6 +14,7 @@
"addresses": {
"private": [
{
+ "OS-EXT-IPS:type": "fixed",
"addr": "%(ip)s",
"version": 4
}
diff --git a/nova/tests/integrated/api_samples/all_extensions/servers-details-resp.xml.tpl b/nova/tests/integrated/api_samples/all_extensions/servers-details-resp.xml.tpl
index d26eb38ef..e2166c2ff 100644
--- a/nova/tests/integrated/api_samples/all_extensions/servers-details-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/servers-details-resp.xml.tpl
@@ -1,6 +1,6 @@
<?xml version='1.0' encoding='UTF-8'?>
-<servers xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:OS-EXT-AZ="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
- <server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:OS-EXT-AZ="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(timestamp)s" hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" key_name="None" config_drive="" OS-EXT-SRV-ATTR:vm_state="active" OS-EXT-SRV-ATTR:task_state="None" OS-EXT-SRV-ATTR:power_state="1" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="%(compute_host)s" OS-EXT-SRV-ATTR:hypervisor_hostname="%(hypervisor_hostname)s" OS-EXT-AZ:availability_zone="None" OS-EXT-AZ:host_availability_zone="nova" OS-DCF:diskConfig="AUTO">
+<servers xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:OS-EXT-AZ="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-IPS="http://docs.openstack.org/compute/ext/extended_ips/api/v1.1" xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:OS-EXT-AZ="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-IPS="http://docs.openstack.org/compute/ext/extended_ips/api/v1.1" xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(timestamp)s" hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" key_name="None" config_drive="" OS-EXT-SRV-ATTR:vm_state="active" OS-EXT-SRV-ATTR:task_state="None" OS-EXT-SRV-ATTR:power_state="1" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="%(compute_host)s" OS-EXT-SRV-ATTR:hypervisor_hostname="%(hypervisor_hostname)s" OS-EXT-AZ:availability_zone="nova" OS-DCF:diskConfig="AUTO">
<image id="%(uuid)s">
<atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
</image>
@@ -12,7 +12,7 @@
</metadata>
<addresses>
<network id="private">
- <ip version="4" addr="%(ip)s"/>
+ <ip OS-EXT-IPS:type="fixed" version="4" addr="%(ip)s"/>
</network>
</addresses>
<atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
diff --git a/nova/tests/integrated/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl
new file mode 100644
index 000000000..ea47da06c
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl
@@ -0,0 +1,55 @@
+{
+ "server": {
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "config_drive": "%(cdrive)s",
+ "created": "%(timestamp)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(timestamp)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-config-drive/server-config-drive-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-config-drive/server-config-drive-get-resp.xml.tpl
new file mode 100644
index 000000000..c6fb338c7
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-config-drive/server-config-drive-get-resp.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(timestamp)s" hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" config_drive="%(cdrive)s">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/integrated/api_samples/os-config-drive/server-post-req.json.tpl b/nova/tests/integrated/api_samples/os-config-drive/server-post-req.json.tpl
new file mode 100644
index 000000000..d3916d1aa
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-config-drive/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-config-drive/server-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-config-drive/server-post-req.xml.tpl
new file mode 100644
index 000000000..f92614984
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-config-drive/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/integrated/api_samples/os-config-drive/server-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-config-drive/server-post-resp.json.tpl
new file mode 100644
index 000000000..d5f030c87
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-config-drive/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-config-drive/server-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-config-drive/server-post-resp.xml.tpl
new file mode 100644
index 000000000..3bb13e69b
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-config-drive/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/integrated/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl b/nova/tests/integrated/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl
new file mode 100644
index 000000000..535d00410
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl
@@ -0,0 +1,57 @@
+{
+ "servers": [
+ {
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "config_drive": "%(cdrive)s",
+ "created": "%(timestamp)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(timestamp)s",
+ "user_id": "fake"
+ }
+ ]
+}
diff --git a/nova/tests/integrated/api_samples/os-config-drive/servers-config-drive-details-resp.xml.tpl b/nova/tests/integrated/api_samples/os-config-drive/servers-config-drive-details-resp.xml.tpl
new file mode 100644
index 000000000..c2b2fa3ba
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-config-drive/servers-config-drive-details-resp.xml.tpl
@@ -0,0 +1,21 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server status="ACTIVE" updated="%(timestamp)s" hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" config_drive="%(cdrive)s">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+ </server>
+</servers>
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json.tpl b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json.tpl
new file mode 100644
index 000000000..7dc33ddb1
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json.tpl
@@ -0,0 +1,7 @@
+{
+ "dns_entry" :
+ {
+ "ip": "%(ip)s",
+ "dns_type": "%(dns_type)s"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.xml.tpl b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.xml.tpl
new file mode 100644
index 000000000..bd62d3418
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<dns_entry>
+ <ip>%(ip)s</ip>
+ <dns_type>%(dns_type)s</dns_type>
+</dns_entry>
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json.tpl b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json.tpl
new file mode 100644
index 000000000..3ec0743ba
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "dns_entry": {
+ "domain": "%(domain)s",
+ "id": null,
+ "ip": "%(ip)s",
+ "name": "%(name)s",
+ "type": "%(dns_type)s"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.xml.tpl b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.xml.tpl
new file mode 100644
index 000000000..38a659b78
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<dns_entry ip="%(ip)s" domain="%(domain)s" type="%(dns_type)s" id="None" name="%(name)s"/>
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json.tpl b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json.tpl
new file mode 100644
index 000000000..db73be14a
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json.tpl
@@ -0,0 +1,8 @@
+{
+ "domain_entry" :
+ {
+ "domain": "%(domain)s",
+ "scope": "%(scope)s",
+ "project": "%(project)s"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.xml.tpl b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.xml.tpl
new file mode 100644
index 000000000..40866a537
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<domain_entry>
+ <domain>%(domain)s</domain>
+ <scope>%(scope)s</scope>
+ <project>%(project)s</project>
+</domain_entry>
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json.tpl b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json.tpl
new file mode 100644
index 000000000..a14d395d2
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "domain_entry": {
+ "availability_zone": null,
+ "domain": "%(domain)s",
+ "project": "%(project)s",
+ "scope": "%(scope)s"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.xml.tpl b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.xml.tpl
new file mode 100644
index 000000000..1759c403a
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<domain_entry project="%(project)s" scope="%(scope)s" domain="%(domain)s" availability_zone="None"/>
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json.tpl
new file mode 100644
index 000000000..8edd0603f
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "dns_entry": {
+ "domain": "%(domain)s",
+ "id": null,
+ "ip": "%(ip)s",
+ "name": "%(name)s",
+ "type": null
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.xml.tpl
new file mode 100644
index 000000000..a889ef6e2
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<dns_entry ip="%(ip)s" domain="%(domain)s" type="None" id="None" name="%(name)s"/>
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json.tpl b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json.tpl
new file mode 100644
index 000000000..831cda7b5
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json.tpl
@@ -0,0 +1,11 @@
+{
+ "dns_entries": [
+ {
+ "domain": "%(domain)s",
+ "id": null,
+ "ip": "%(ip)s",
+ "name": "%(name)s",
+ "type": null
+ }
+ ]
+}
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.xml.tpl b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.xml.tpl
new file mode 100644
index 000000000..bf7788f94
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<dns_entries>
+ <dns_entry ip="%(ip)s" domain="%(domain)s" type="None" id="None" name="%(name)s"/>
+</dns_entries>
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json.tpl b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json.tpl
new file mode 100644
index 000000000..a6055cfec
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json.tpl
@@ -0,0 +1,10 @@
+{
+ "domain_entries": [
+ {
+ "availability_zone": null,
+ "domain": "%(domain)s",
+ "project": "%(project)s",
+ "scope": "%(scope)s"
+ }
+ ]
+}
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.xml.tpl b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.xml.tpl
new file mode 100644
index 000000000..e57c290cb
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<domain_entries>
+ <domain_entry project="%(project)s" scope="%(scope)s" domain="%(domain)s" availability_zone="None"/>
+</domain_entries>
diff --git a/nova/tests/integrated/api_samples/os-instance-actions/instance-action-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-instance-actions/instance-action-get-resp.json.tpl
new file mode 100644
index 000000000..6ba99d264
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-instance-actions/instance-action-get-resp.json.tpl
@@ -0,0 +1,27 @@
+{
+ "instanceAction": {
+ "action": "%(action)s",
+ "instance_uuid": "%(instance_uuid)s",
+ "request_id": "%(request_id)s",
+ "user_id": "%(integer_id)s",
+ "project_id": "%(integer_id)s",
+ "start_time": "%(start_time)s",
+ "message": "",
+ "events": [
+ {
+ "event": "%(event)s",
+ "start_time": "%(timestamp)s",
+ "finish_time": "%(timestamp)s",
+ "result": "%(result)s",
+ "traceback": ""
+ },
+ {
+ "event": "%(event)s",
+ "start_time": "%(timestamp)s",
+ "finish_time": "%(timestamp)s",
+ "result": "%(result)s",
+ "traceback": ""
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-instance-actions/instance-action-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-instance-actions/instance-action-get-resp.xml.tpl
new file mode 100644
index 000000000..ef4b7b003
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-instance-actions/instance-action-get-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<instanceAction action="%(action)s" instance_uuid="%(instance_uuid)s" request_id="%(request_id)s" user_id="%(integer_id)s" project_id="%(integer_id)s" start_time="%(start_time)s" message="">
+ <events event="%(event)s" start_time="%(timestamp)s" finish_time="%(timestamp)s" result="%(result)s" traceback=""/>
+ <events event="%(event)s" start_time="%(timestamp)s" finish_time="%(timestamp)s" result="%(result)s" traceback=""/>
+</instanceAction>
diff --git a/nova/tests/integrated/api_samples/os-instance-actions/instance-actions-list-resp.json.tpl b/nova/tests/integrated/api_samples/os-instance-actions/instance-actions-list-resp.json.tpl
new file mode 100644
index 000000000..9f64a1b29
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-instance-actions/instance-actions-list-resp.json.tpl
@@ -0,0 +1,22 @@
+{
+ "instanceActions": [
+ {
+ "action": "%(action)s",
+ "instance_uuid": "%(uuid)s",
+ "request_id": "%(request_id)s",
+ "user_id": "%(integer_id)s",
+ "project_id": "%(integer_id)s",
+ "start_time": "%(timestamp)s",
+ "message": ""
+ },
+ {
+ "action": "%(action)s",
+ "instance_uuid": "%(uuid)s",
+ "request_id": "%(request_id)s",
+ "user_id": "%(integer_id)s",
+ "project_id": "%(integer_id)s",
+ "start_time": "%(timestamp)s",
+ "message": ""
+ }
+ ]
+}
diff --git a/nova/tests/integrated/api_samples/os-instance-actions/instance-actions-list-resp.xml.tpl b/nova/tests/integrated/api_samples/os-instance-actions/instance-actions-list-resp.xml.tpl
new file mode 100644
index 000000000..943b1ba74
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-instance-actions/instance-actions-list-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<instanceActions>
+ <instanceAction action="%(action)s" instance_uuid="%(uuid)s" request_id="%(request_id)s" user_id="%(integer_id)s" project_id="%(integer_id)s" start_time="%(timestamp)s" message=""/>
+ <instanceAction action="%(action)s" instance_uuid="%(uuid)s" request_id="%(request_id)s" user_id="%(integer_id)s" project_id="%(integer_id)s" start_time="%(timestamp)s" message=""/>
+</instanceActions>
diff --git a/nova/tests/integrated/api_samples/os-services/service-disable-put-req.json.tpl b/nova/tests/integrated/api_samples/os-services/service-disable-put-req.json.tpl
new file mode 100644
index 000000000..4d48af1b8
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-services/service-disable-put-req.json.tpl
@@ -0,0 +1,4 @@
+{
+ "host": "%(host)s",
+ "service": "%(service)s"
+}
diff --git a/nova/tests/integrated/api_samples/os-services/service-disable-put-resp.json.tpl b/nova/tests/integrated/api_samples/os-services/service-disable-put-resp.json.tpl
new file mode 100644
index 000000000..8219a43f6
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-services/service-disable-put-resp.json.tpl
@@ -0,0 +1,5 @@
+{
+ "disabled": true,
+ "host": "%(host)s",
+ "service": "%(service)s"
+}
diff --git a/nova/tests/integrated/api_samples/os-services/service-enable-put-req.json.tpl b/nova/tests/integrated/api_samples/os-services/service-enable-put-req.json.tpl
new file mode 100644
index 000000000..4d48af1b8
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-services/service-enable-put-req.json.tpl
@@ -0,0 +1,4 @@
+{
+ "host": "%(host)s",
+ "service": "%(service)s"
+}
diff --git a/nova/tests/integrated/api_samples/os-services/service-enable-put-resp.json.tpl b/nova/tests/integrated/api_samples/os-services/service-enable-put-resp.json.tpl
new file mode 100644
index 000000000..079b9c76e
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-services/service-enable-put-resp.json.tpl
@@ -0,0 +1,5 @@
+{
+ "disabled": false,
+ "host": "%(host)s",
+ "service": "%(service)s"
+}
diff --git a/nova/tests/integrated/api_samples/os-services/services-list-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-services/services-list-get-resp.json.tpl
new file mode 100644
index 000000000..cc6101338
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-services/services-list-get-resp.json.tpl
@@ -0,0 +1,36 @@
+{
+ "services": [
+ {
+ "binary": "nova-scheduler",
+ "host": "host1",
+ "state": "up",
+ "status": "disabled",
+ "updated_at": "%(timestamp)s",
+ "zone": "internal"
+ },
+ {
+ "binary": "nova-compute",
+ "host": "host1",
+ "state": "up",
+ "status": "disabled",
+ "updated_at": "%(timestamp)s",
+ "zone": "nova"
+ },
+ {
+ "binary": "nova-scheduler",
+ "host": "host2",
+ "state": "down",
+ "status": "enabled",
+ "updated_at": "%(timestamp)s",
+ "zone": "internal"
+ },
+ {
+ "binary": "nova-compute",
+ "host": "host2",
+ "state": "down",
+ "status": "disabled",
+ "updated_at": "%(timestamp)s",
+ "zone": "nova"
+ }
+ ]
+}
diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py
index f37b3ac5d..97e38fa0c 100644
--- a/nova/tests/integrated/test_api_samples.py
+++ b/nova/tests/integrated/test_api_samples.py
@@ -14,6 +14,7 @@
# under the License.
import base64
+import copy
import datetime
import inspect
import json
@@ -46,10 +47,14 @@ from nova.servicegroup import api as service_group_api
from nova import test
from nova.tests.api.openstack.compute.contrib import test_fping
from nova.tests.api.openstack.compute.contrib import test_networks
+from nova.tests.api.openstack.compute.contrib import test_services
+from nova.tests.api.openstack import fakes
from nova.tests.baremetal.db import base as bm_db_base
+from nova.tests import fake_instance_actions
from nova.tests import fake_network
from nova.tests.image import fake
from nova.tests.integrated import integrated_helpers
+from nova.tests import utils as test_utils
from nova import utils
CONF = cfg.CONF
@@ -377,12 +382,9 @@ class ApiSamplesTrap(ApiSampleTestBase):
# NOT be allowed to grow, and should shrink to zero (and be
# removed) soon.
do_not_approve_additions = []
- do_not_approve_additions.append('os-config-drive')
do_not_approve_additions.append('os-create-server-ext')
do_not_approve_additions.append('os-flavor-access')
- do_not_approve_additions.append('os-floating-ip-dns')
do_not_approve_additions.append('os-hypervisors')
- do_not_approve_additions.append('os-services')
do_not_approve_additions.append('os-volumes')
tests = self._get_extensions_tested()
@@ -1175,7 +1177,7 @@ class ExtendedServerAttributesJsonTest(ServersSampleBase):
".extended_server_attributes" + \
".Extended_server_attributes"
- def test_extended_server_attrs_get(self):
+ def test_show(self):
uuid = self._post_server()
response = self._do_get('servers/%s' % uuid)
@@ -1184,10 +1186,10 @@ class ExtendedServerAttributesJsonTest(ServersSampleBase):
subs['id'] = uuid
subs['instance_name'] = 'instance-\d{8}'
subs['hypervisor_hostname'] = r'[\w\.\-]+'
- return self._verify_response('extended-server-attrs-get',
+ return self._verify_response('server-get-resp',
subs, response)
- def test_extended_server_attrs_list(self):
+ def test_detail(self):
uuid = self._post_server()
response = self._do_get('servers/detail')
@@ -1196,7 +1198,7 @@ class ExtendedServerAttributesJsonTest(ServersSampleBase):
subs['id'] = uuid
subs['instance_name'] = 'instance-\d{8}'
subs['hypervisor_hostname'] = r'[\w\.\-]+'
- return self._verify_response('extended-server-attrs-list',
+ return self._verify_response('servers-detail-resp',
subs, response)
@@ -1909,6 +1911,61 @@ class MultipleCreateXmlTest(MultipleCreateJsonTest):
ctype = 'xml'
+class ServicesJsonTest(ApiSampleTestBase):
+ extension_name = "nova.api.openstack.compute.contrib.services.Services"
+
+ def setUp(self):
+ super(ServicesJsonTest, self).setUp()
+ self.stubs.Set(db, "service_get_all",
+ test_services.fake_service_get_all)
+ self.stubs.Set(timeutils, "utcnow", test_services.fake_utcnow)
+ self.stubs.Set(db, "service_get_by_args",
+ test_services.fake_service_get_by_host_binary)
+ self.stubs.Set(db, "service_update",
+ test_services.fake_service_update)
+
+ def tearDown(self):
+ super(ServicesJsonTest, self).tearDown()
+ timeutils.clear_time_override()
+
+ def test_services_list(self):
+ """Return a list of all agent builds."""
+ response = self._do_get('os-services')
+ self.assertEqual(response.status, 200)
+ subs = {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up'}
+ subs.update(self._get_regexes())
+ return self._verify_response('services-list-get-resp',
+ subs, response)
+
+ def test_service_enable(self):
+ """Enable an existing agent build."""
+ subs = {"host": "host1",
+ 'service': 'nova-compute'}
+ response = self._do_put('/os-services/enable',
+ 'service-enable-put-req', subs)
+ self.assertEqual(response.status, 200)
+ subs = {"host": "host1",
+ "service": "nova-compute"}
+ return self._verify_response('service-enable-put-resp',
+ subs, response)
+
+ def test_service_disable(self):
+ """Disable an existing agent build."""
+ subs = {"host": "host1",
+ 'service': 'nova-compute'}
+ response = self._do_put('/os-services/disable',
+ 'service-disable-put-req', subs)
+ self.assertEqual(response.status, 200)
+ subs = {"host": "host1",
+ "service": "nova-compute"}
+ return self._verify_response('service-disable-put-resp',
+ subs, response)
+
+
class SimpleTenantUsageSampleJsonTest(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib.simple_tenant_usage."
"Simple_tenant_usage")
@@ -2218,17 +2275,45 @@ class QuotasSampleXmlTests(QuotasSampleJsonTests):
ctype = "xml"
-class ExtendedStatusSampleJsonTests(ServersSampleBase):
+class ExtendedIpsSampleJsonTests(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib"
- ".extended_status.Extended_status")
+ ".extended_ips.Extended_ips")
def test_show(self):
uuid = self._post_server()
- response = self._do_get('servers')
+ response = self._do_get('servers/%s' % uuid)
self.assertEqual(response.status, 200)
subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
- return self._verify_response('servers-list-resp', subs, response)
+ subs['hypervisor_hostname'] = r'[\w\.\-]+'
+ return self._verify_response('server-get-resp', subs, response)
+
+ def test_detail(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/detail')
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ subs['id'] = uuid
+ subs['hostid'] = '[a-f0-9]+'
+ return self._verify_response('servers-detail-resp', subs, response)
+
+
+class ExtendedIpsSampleXmlTests(ExtendedIpsSampleJsonTests):
+ ctype = 'xml'
+
+
+class ExtendedStatusSampleJsonTests(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".extended_status.Extended_status")
+
+ def test_show(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/%s' % uuid)
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ return self._verify_response('server-get-resp', subs, response)
def test_detail(self):
uuid = self._post_server()
@@ -2886,7 +2971,7 @@ class ExtendedAvailabilityZoneJsonTests(ServersSampleBase):
".extended_availability_zone"
".Extended_availability_zone")
- def test_get(self):
+ def test_show(self):
uuid = self._post_server()
response = self._do_get('servers/%s' % uuid)
self.assertEqual(response.status, 200)
@@ -2938,3 +3023,190 @@ class EvacuateJsonTest(ServersSampleBase):
class EvacuateXmlTest(EvacuateJsonTest):
ctype = 'xml'
+
+
+class FloatingIpDNSJsonTest(ApiSampleTestBase):
+ extension_name = ("nova.api.openstack.compute.contrib.floating_ip_dns."
+ "Floating_ip_dns")
+
+ domain = 'domain1.example.org'
+ name = 'instance1'
+ scope = 'public'
+ project = 'project1'
+ dns_type = 'A'
+ ip = '192.168.1.1'
+
+ def _create_or_update(self):
+ subs = {'domain': self.domain,
+ 'project': self.project,
+ 'scope': self.scope}
+ response = self._do_put('os-floating-ip-dns/%s' % self.domain,
+ 'floating-ip-dns-create-or-update-req', subs)
+ self.assertEqual(response.status, 200)
+ self._verify_response('floating-ip-dns-create-or-update-resp', subs,
+ response)
+
+ def _create_or_update_entry(self):
+ subs = {'ip': self.ip, 'dns_type': self.dns_type}
+ response = self._do_put('os-floating-ip-dns/%s/entries/%s'
+ % (self.domain, self.name),
+ 'floating-ip-dns-create-or-update-entry-req',
+ subs)
+ self.assertEqual(response.status, 200)
+ subs.update({'name': self.name, 'domain': self.domain})
+ self._verify_response('floating-ip-dns-create-or-update-entry-resp',
+ subs, response)
+
+ def test_floating_ip_dns_list(self):
+ self._create_or_update()
+ response = self._do_get('os-floating-ip-dns')
+ self.assertEqual(response.status, 200)
+ subs = {'domain': self.domain,
+ 'project': self.project,
+ 'scope': self.scope}
+ return self._verify_response('floating-ip-dns-list-resp', subs,
+ response)
+
+ def test_floating_ip_dns_create_or_update(self):
+ self._create_or_update()
+
+ def test_floating_ip_dns_delete(self):
+ self._create_or_update()
+ response = self._do_delete('os-floating-ip-dns/%s' % self.domain)
+ self.assertEqual(response.status, 202)
+
+ def test_floating_ip_dns_create_or_update_entry(self):
+ self._create_or_update_entry()
+
+ def test_floating_ip_dns_entry_get(self):
+ self._create_or_update_entry()
+ response = self._do_get('os-floating-ip-dns/%s/entries/%s'
+ % (self.domain, self.name))
+ self.assertEqual(response.status, 200)
+ subs = {'domain': self.domain,
+ 'ip': self.ip,
+ 'name': self.name}
+ return self._verify_response('floating-ip-dns-entry-get-resp', subs,
+ response)
+
+ def test_floating_ip_dns_entry_delete(self):
+ self._create_or_update_entry()
+ response = self._do_delete('os-floating-ip-dns/%s/entries/%s'
+ % (self.domain, self.name))
+ self.assertEqual(response.status, 202)
+
+ def test_floating_ip_dns_entry_list(self):
+ self._create_or_update_entry()
+ response = self._do_get('os-floating-ip-dns/%s/entries/%s'
+ % (self.domain, self.ip))
+ self.assertEqual(response.status, 200)
+ subs = {'domain': self.domain,
+ 'ip': self.ip,
+ 'name': self.name}
+ return self._verify_response('floating-ip-dns-entry-list-resp', subs,
+ response)
+
+
+class FloatingIpDNSXmlTest(FloatingIpDNSJsonTest):
+ ctype = 'xml'
+
+
+class InstanceActionsSampleJsonTest(ApiSampleTestBase):
+ extension_name = ('nova.api.openstack.compute.contrib.instance_actions.'
+ 'Instance_actions')
+
+ def setUp(self):
+ super(InstanceActionsSampleJsonTest, self).setUp()
+ self.actions = fake_instance_actions.FAKE_ACTIONS
+ self.events = fake_instance_actions.FAKE_EVENTS
+ self.instance = test_utils.get_test_instance()
+
+ def fake_instance_action_get_by_request_id(context, uuid, request_id):
+ return copy.deepcopy(self.actions[uuid][request_id])
+
+ def fake_instance_actions_get(context, uuid):
+ return [copy.deepcopy(value) for value in
+ self.actions[uuid].itervalues()]
+
+ def fake_instance_action_events_get(context, action_id):
+ return copy.deepcopy(self.events[action_id])
+
+ def fake_instance_get_by_uuid(context, instance_id):
+ return self.instance
+
+ self.stubs.Set(db, 'action_get_by_request_id',
+ fake_instance_action_get_by_request_id)
+ self.stubs.Set(db, 'actions_get', fake_instance_actions_get)
+ self.stubs.Set(db, 'action_events_get',
+ fake_instance_action_events_get)
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+
+ def test_instance_action_get(self):
+ fake_uuid = fake_instance_actions.FAKE_UUID
+ fake_request_id = fake_instance_actions.FAKE_REQUEST_ID1
+ fake_action = self.actions[fake_uuid][fake_request_id]
+
+ response = self._do_get('servers/%s/os-instance-actions/%s' %
+ (fake_uuid, fake_request_id))
+ subs = self._get_regexes()
+ subs['action'] = '(reboot)|(resize)'
+ subs['instance_uuid'] = fake_uuid
+ subs['integer_id'] = '[0-9]+'
+ subs['request_id'] = fake_action['request_id']
+ subs['start_time'] = fake_action['start_time']
+ subs['result'] = '(Success)|(Error)'
+ subs['event'] = '(schedule)|(compute_create)'
+ return self._verify_response('instance-action-get-resp', subs,
+ response)
+
+ def test_instance_actions_list(self):
+ fake_uuid = fake_instance_actions.FAKE_UUID
+ response = self._do_get('servers/%s/os-instance-actions' % (fake_uuid))
+ subs = self._get_regexes()
+ subs['action'] = '(reboot)|(resize)'
+ subs['integer_id'] = '[0-9]+'
+ subs['request_id'] = ('req-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}'
+ '-[0-9a-f]{4}-[0-9a-f]{12}')
+ return self._verify_response('instance-actions-list-resp', subs,
+ response)
+
+
+class InstanceActionsSampleXmlTest(InstanceActionsSampleJsonTest):
+ ctype = 'xml'
+
+
+class ConfigDriveSampleJsonTest(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib.config_drive."
+ "Config_drive")
+
+ def setUp(self):
+ super(ConfigDriveSampleJsonTest, self).setUp()
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fake.stub_out_image_service(self.stubs)
+
+ def test_config_drive_show(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/%s' % uuid)
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ # config drive can be an uuid or empty value
+ subs['cdrive'] = '(%s)?' % subs['uuid']
+ return self._verify_response('server-config-drive-get-resp', subs,
+ response)
+
+ def test_config_drive_detail(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/detail')
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ # config drive can be an uuid or empty value
+ subs['cdrive'] = '(%s)?' % subs['uuid']
+ return self._verify_response('servers-config-drive-details-resp',
+ subs, response)
+
+
+class ConfigDriveSampleXmlTest(ConfigDriveSampleJsonTest):
+ ctype = 'xml'
diff --git a/nova/tests/network/test_linux_net.py b/nova/tests/network/test_linux_net.py
index bc21b80ad..50c98e78c 100644
--- a/nova/tests/network/test_linux_net.py
+++ b/nova/tests/network/test_linux_net.py
@@ -24,6 +24,7 @@ from nova import context
from nova import db
from nova.network import driver
from nova.network import linux_net
+from nova.openstack.common import cfg
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
@@ -31,6 +32,7 @@ from nova import test
from nova import utils
LOG = logging.getLogger(__name__)
+CONF = cfg.CONF
HOST = "testhost"
@@ -472,6 +474,71 @@ class LinuxNetworkTestCase(test.TestCase):
driver.plug(network, "fakemac")
self.assertEqual(info['passed_interface'], "override_interface")
+ def _test_dnsmasq_execute(self, extra_expected=None):
+ network_ref = {'id': 'fake',
+ 'label': 'fake',
+ 'multi_host': False,
+ 'cidr': '10.0.0.0/24',
+ 'dns1': '8.8.4.4',
+ 'dhcp_start': '1.0.0.2',
+ 'dhcp_server': '10.0.0.1'}
+ executes = []
+
+ def fake_execute(*args, **kwargs):
+ executes.append(args)
+ return "", ""
+
+ self.stubs.Set(linux_net, '_execute', fake_execute)
+
+ self.stubs.Set(os, 'chmod', lambda *a, **kw: None)
+ self.stubs.Set(linux_net, 'write_to_file', lambda *a, **kw: None)
+ self.stubs.Set(linux_net, '_dnsmasq_pid_for', lambda *a, **kw: None)
+ dev = 'br100'
+ linux_net.restart_dhcp(self.context, dev, network_ref)
+ expected = ['env',
+ 'CONFIG_FILE=%s' % CONF.dhcpbridge_flagfile,
+ 'NETWORK_ID=fake',
+ 'dnsmasq',
+ '--strict-order',
+ '--bind-interfaces',
+ '--conf-file=%s' % CONF.dnsmasq_config_file,
+ '--domain=%s' % CONF.dhcp_domain,
+ '--pid-file=%s' % linux_net._dhcp_file(dev, 'pid'),
+ '--listen-address=%s' % network_ref['dhcp_server'],
+ '--except-interface=lo',
+ "--dhcp-range=set:'%s',%s,static,%ss" % (network_ref['label'],
+ network_ref['dhcp_start'],
+ CONF.dhcp_lease_time),
+ '--dhcp-lease-max=256',
+ '--dhcp-hostsfile=%s' % linux_net._dhcp_file(dev, 'conf'),
+ '--dhcp-script=%s' % CONF.dhcpbridge,
+ '--leasefile-ro']
+ if extra_expected:
+ expected += extra_expected
+ self.assertEqual([tuple(expected)], executes)
+
+ def test_dnsmasq_execute(self):
+ self._test_dnsmasq_execute()
+
+ def test_dnsmasq_execute_dns_servers(self):
+ self.flags(dns_server=['1.1.1.1', '2.2.2.2'])
+ expected = [
+ '--no-hosts',
+ '--no-resolv',
+ '--server=1.1.1.1',
+ '--server=2.2.2.2',
+ ]
+ self._test_dnsmasq_execute(expected)
+
+ def test_dnsmasq_execute_use_network_dns_servers(self):
+ self.flags(use_network_dns_servers=True)
+ expected = [
+ '--no-hosts',
+ '--no-resolv',
+ '--server=8.8.4.4',
+ ]
+ self._test_dnsmasq_execute(expected)
+
def test_isolated_host(self):
self.flags(fake_network=False,
share_dhcp_address=True)
diff --git a/nova/tests/network/test_quantumv2.py b/nova/tests/network/test_quantumv2.py
index 1805044a1..f3f306694 100644
--- a/nova/tests/network/test_quantumv2.py
+++ b/nova/tests/network/test_quantumv2.py
@@ -189,6 +189,16 @@ class TestQuantumv2(test.TestCase):
'gateway_ip': '10.0.1.1',
'dns_nameservers': ['8.8.1.1', '8.8.1.2']}]
self.subnet_data2 = []
+ self.subnet_data_n = [{'id': 'my_subid1',
+ 'cidr': '10.0.1.0/24',
+ 'network_id': 'my_netid1',
+ 'gateway_ip': '10.0.1.1',
+ 'dns_nameservers': ['8.8.1.1', '8.8.1.2']},
+ {'id': 'my_subid2',
+ 'cidr': '20.0.1.0/24',
+ 'network_id': 'my_netid2',
+ 'gateway_ip': '20.0.1.1',
+ 'dns_nameservers': ['8.8.1.1', '8.8.1.2']}]
self.subnet_data2.append({'id': 'my_subid2',
'cidr': '10.0.2.0/24',
'network_id': 'my_netid2',
@@ -1010,7 +1020,7 @@ class TestQuantumv2(test.TestCase):
network_id = 'my_netid1'
search_opts = {'network_id': network_id}
self.moxed_client.list_subnets(
- **search_opts).AndReturn({'subnets': self.subnet_data1})
+ **search_opts).AndReturn({'subnets': self.subnet_data_n})
zone = 'compute:%s' % self.instance['availability_zone']
search_opts = {'device_id': self.instance['uuid'],
diff --git a/nova/tests/scheduler/test_filter_scheduler.py b/nova/tests/scheduler/test_filter_scheduler.py
index b4d73ec0c..4b07581fb 100644
--- a/nova/tests/scheduler/test_filter_scheduler.py
+++ b/nova/tests/scheduler/test_filter_scheduler.py
@@ -40,6 +40,16 @@ def fake_get_filtered_hosts(hosts, filter_properties):
return list(hosts)
+def fake_get_group_filtered_hosts(hosts, filter_properties):
+ group_hosts = filter_properties.get('group_hosts') or []
+ if group_hosts:
+ hosts = list(hosts)
+ hosts.pop(0)
+ return hosts
+ else:
+ return list(hosts)
+
+
class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
"""Test case for Filter Scheduler."""
@@ -480,3 +490,169 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertRaises(exception.MigrationError,
self.driver._assert_compute_node_has_enough_memory,
self.context, instance, dest)
+
+ def test_basic_schedule_run_instances_anti_affinity(self):
+ filter_properties = {'scheduler_hints':
+ {'group': 'cats'}}
+ # Request spec 1
+ instance_opts1 = {'project_id': 1, 'os_type': 'Linux',
+ 'memory_mb': 512, 'root_gb': 512,
+ 'ephemeral_gb': 0, 'vcpus': 1,
+ 'system_metadata': {'system': 'metadata'}}
+ request_spec1 = {'instance_uuids': ['fake-uuid1-1', 'fake-uuid1-2'],
+ 'instance_properties': instance_opts1,
+ 'instance_type': {'memory_mb': 512, 'root_gb': 512,
+ 'ephemeral_gb': 0, 'vcpus': 1}}
+ self.next_weight = 1.0
+
+ def _fake_weigh_objects(_self, functions, hosts, options):
+ self.next_weight += 2.0
+ host_state = hosts[0]
+ return [weights.WeighedHost(host_state, self.next_weight)]
+
+ sched = fakes.FakeFilterScheduler()
+
+ fake_context = context.RequestContext('user', 'project',
+ is_admin=True)
+
+ self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
+ fake_get_group_filtered_hosts)
+ self.stubs.Set(weights.HostWeightHandler,
+ 'get_weighed_objects', _fake_weigh_objects)
+ fakes.mox_host_manager_db_calls(self.mox, fake_context)
+
+ self.mox.StubOutWithMock(driver, 'instance_update_db')
+ self.mox.StubOutWithMock(compute_rpcapi.ComputeAPI, 'run_instance')
+ self.mox.StubOutWithMock(sched, 'group_hosts')
+
+ instance1_1 = {'uuid': 'fake-uuid1-1'}
+ instance1_2 = {'uuid': 'fake-uuid1-2'}
+
+ sched.group_hosts(mox.IgnoreArg(), 'cats').AndReturn([])
+
+ def inc_launch_index1(*args, **kwargs):
+ request_spec1['instance_properties']['launch_index'] = (
+ request_spec1['instance_properties']['launch_index'] + 1)
+
+ expected_metadata = {'system_metadata':
+ {'system': 'metadata', 'group': 'cats'}}
+ driver.instance_update_db(fake_context, instance1_1['uuid'],
+ extra_values=expected_metadata).WithSideEffects(
+ inc_launch_index1).AndReturn(instance1_1)
+ compute_rpcapi.ComputeAPI.run_instance(fake_context, host='host3',
+ instance=instance1_1, requested_networks=None,
+ injected_files=None, admin_password=None, is_first_time=None,
+ request_spec=request_spec1, filter_properties=mox.IgnoreArg(),
+ node='node3')
+
+ driver.instance_update_db(fake_context, instance1_2['uuid'],
+ extra_values=expected_metadata).WithSideEffects(
+ inc_launch_index1).AndReturn(instance1_2)
+ compute_rpcapi.ComputeAPI.run_instance(fake_context, host='host4',
+ instance=instance1_2, requested_networks=None,
+ injected_files=None, admin_password=None, is_first_time=None,
+ request_spec=request_spec1, filter_properties=mox.IgnoreArg(),
+ node='node4')
+ self.mox.ReplayAll()
+ sched.schedule_run_instance(fake_context, request_spec1,
+ None, None, None, None, filter_properties)
+
+ def test_schedule_host_pool(self):
+ """Make sure the scheduler_host_subset_size property works properly."""
+
+ self.flags(scheduler_host_subset_size=2)
+ sched = fakes.FakeFilterScheduler()
+
+ fake_context = context.RequestContext('user', 'project',
+ is_admin=True)
+ self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
+ fake_get_filtered_hosts)
+ fakes.mox_host_manager_db_calls(self.mox, fake_context)
+
+ instance_properties = {'project_id': 1,
+ 'root_gb': 512,
+ 'memory_mb': 512,
+ 'ephemeral_gb': 0,
+ 'vcpus': 1,
+ 'os_type': 'Linux'}
+
+ request_spec = dict(instance_properties=instance_properties)
+ filter_properties = {}
+ self.mox.ReplayAll()
+ hosts = sched._schedule(self.context, request_spec,
+ filter_properties=filter_properties)
+
+ # one host should be chosen
+ self.assertEqual(len(hosts), 1)
+
+ def test_schedule_large_host_pool(self):
+ """Hosts should still be chosen if pool size
+ is larger than number of filtered hosts"""
+
+ sched = fakes.FakeFilterScheduler()
+
+ fake_context = context.RequestContext('user', 'project',
+ is_admin=True)
+ self.flags(scheduler_host_subset_size=20)
+ self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
+ fake_get_filtered_hosts)
+ fakes.mox_host_manager_db_calls(self.mox, fake_context)
+
+ instance_properties = {'project_id': 1,
+ 'root_gb': 512,
+ 'memory_mb': 512,
+ 'ephemeral_gb': 0,
+ 'vcpus': 1,
+ 'os_type': 'Linux'}
+ request_spec = dict(instance_properties=instance_properties)
+ filter_properties = {}
+ self.mox.ReplayAll()
+ hosts = sched._schedule(self.context, request_spec,
+ filter_properties=filter_properties)
+
+ # one host should be chose
+ self.assertEqual(len(hosts), 1)
+
+ def test_schedule_chooses_best_host(self):
+ """If scheduler_host_subset_size is 1, the largest host with greatest
+ weight should be returned"""
+
+ self.flags(scheduler_host_subset_size=1)
+
+ sched = fakes.FakeFilterScheduler()
+
+ fake_context = context.RequestContext('user', 'project',
+ is_admin=True)
+ self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
+ fake_get_filtered_hosts)
+ fakes.mox_host_manager_db_calls(self.mox, fake_context)
+
+ self.next_weight = 50
+
+ def _fake_weigh_objects(_self, functions, hosts, options):
+ this_weight = self.next_weight
+ self.next_weight = 0
+ host_state = hosts[0]
+ return [weights.WeighedHost(host_state, this_weight)]
+
+ instance_properties = {'project_id': 1,
+ 'root_gb': 512,
+ 'memory_mb': 512,
+ 'ephemeral_gb': 0,
+ 'vcpus': 1,
+ 'os_type': 'Linux'}
+
+ request_spec = dict(instance_properties=instance_properties)
+
+ self.stubs.Set(weights.HostWeightHandler,
+ 'get_weighed_objects', _fake_weigh_objects)
+
+ filter_properties = {}
+ self.mox.ReplayAll()
+ hosts = sched._schedule(self.context, request_spec,
+ filter_properties=filter_properties)
+
+ # one host should be chosen
+ self.assertEquals(1, len(hosts))
+
+ self.assertEquals(50, hosts[0].weight)
diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py
index 230e2ea03..edd2e0d61 100644
--- a/nova/tests/scheduler/test_host_filters.py
+++ b/nova/tests/scheduler/test_host_filters.py
@@ -1400,3 +1400,17 @@ class HostFiltersTestCase(test.TestCase):
{'num_instances': 5})
filter_properties = {}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ def test_group_anti_affinity_filter_passes(self):
+ filt_cls = self.class_map['GroupAntiAffinityFilter']()
+ host = fakes.FakeHostState('host1', 'node1', {})
+ filter_properties = {'group_hosts': []}
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ filter_properties = {'group_hosts': ['host2']}
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+ def test_group_anti_affinity_filter_fails(self):
+ filt_cls = self.class_map['GroupAntiAffinityFilter']()
+ host = fakes.FakeHostState('host1', 'node1', {})
+ filter_properties = {'group_hosts': ['host1']}
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/scheduler/test_host_manager.py b/nova/tests/scheduler/test_host_manager.py
index 91e719ab6..088aa4da9 100644
--- a/nova/tests/scheduler/test_host_manager.py
+++ b/nova/tests/scheduler/test_host_manager.py
@@ -45,10 +45,7 @@ class HostManagerTestCase(test.TestCase):
self.host_manager = host_manager.HostManager()
self.fake_hosts = [host_manager.HostState('fake_host%s' % x,
'fake-node') for x in xrange(1, 5)]
-
- def tearDown(self):
- timeutils.clear_time_override()
- super(HostManagerTestCase, self).tearDown()
+ self.addCleanup(timeutils.clear_time_override)
def test_choose_host_filters_not_found(self):
self.flags(scheduler_default_filters='FakeFilterClass3')
@@ -268,6 +265,64 @@ class HostManagerTestCase(test.TestCase):
8388608)
+class HostManagerChangedNodesTestCase(test.TestCase):
+ """Test case for HostManager class."""
+
+ def setUp(self):
+ super(HostManagerChangedNodesTestCase, self).setUp()
+ self.host_manager = host_manager.HostManager()
+ self.fake_hosts = [
+ host_manager.HostState('host1', 'node1'),
+ host_manager.HostState('host2', 'node2'),
+ host_manager.HostState('host3', 'node3'),
+ host_manager.HostState('host4', 'node4')
+ ]
+ self.addCleanup(timeutils.clear_time_override)
+
+ def test_get_all_host_states(self):
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ db.compute_node_get_all(context).AndReturn(fakes.COMPUTE_NODES)
+ self.mox.ReplayAll()
+
+ self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+ self.assertEqual(len(host_states_map), 4)
+
+ def test_get_all_host_states_after_delete_one(self):
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ # all nodes active for first call
+ db.compute_node_get_all(context).AndReturn(fakes.COMPUTE_NODES)
+ # remove node4 for second call
+ running_nodes = [n for n in fakes.COMPUTE_NODES
+ if n.get('hypervisor_hostname') != 'node4']
+ db.compute_node_get_all(context).AndReturn(running_nodes)
+ self.mox.ReplayAll()
+
+ self.host_manager.get_all_host_states(context)
+ self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+ self.assertEqual(len(host_states_map), 3)
+
+ def test_get_all_host_states_after_delete_all(self):
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ # all nodes active for first call
+ db.compute_node_get_all(context).AndReturn(fakes.COMPUTE_NODES)
+ # remove all nodes for second call
+ db.compute_node_get_all(context).AndReturn([])
+ self.mox.ReplayAll()
+
+ self.host_manager.get_all_host_states(context)
+ self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+ self.assertEqual(len(host_states_map), 0)
+
+
class HostStateTestCase(test.TestCase):
"""Test case for HostState class."""
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index 44e1f3537..01d3f6a50 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -24,6 +24,7 @@ import mox
from nova.compute import api as compute_api
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
+from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor import api as conductor_api
@@ -199,6 +200,38 @@ class SchedulerManagerTestCase(test.TestCase):
self.manager.run_instance(self.context, request_spec,
None, None, None, None, {})
+ def test_live_migration_compute_service_notavailable(self):
+ inst = {"uuid": "fake-instance-id",
+ "vm_state": vm_states.ACTIVE,
+ "task_state": task_states.MIGRATING, }
+
+ dest = 'fake_host'
+ block_migration = False
+ disk_over_commit = False
+
+ self._mox_schedule_method_helper('schedule_live_migration')
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+
+ self.manager.driver.schedule_live_migration(self.context,
+ inst, dest, block_migration, disk_over_commit).AndRaise(
+ exception.ComputeServiceUnavailable(host="src"))
+ db.instance_update_and_get_original(self.context, inst["uuid"],
+ {"vm_state": inst['vm_state'],
+ "task_state": None,
+ "expected_task_state": task_states.MIGRATING,
+ }).AndReturn((inst, inst))
+ compute_utils.add_instance_fault_from_exc(self.context,
+ mox.IsA(conductor_api.LocalAPI), inst,
+ mox.IsA(exception.ComputeServiceUnavailable),
+ mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.ComputeServiceUnavailable,
+ self.manager.live_migration,
+ self.context, inst, dest, block_migration,
+ disk_over_commit)
+
def test_prep_resize_no_valid_host_back_in_active_state(self):
fake_instance_uuid = 'fake-instance-id'
fake_instance = {'uuid': fake_instance_uuid}
@@ -510,6 +543,29 @@ class SchedulerTestCase(test.TestCase):
block_migration=block_migration,
disk_over_commit=disk_over_commit)
+ def test_live_migration_compute_dest_not_exist(self):
+ # Raise exception when dest compute node does not exist.
+
+ self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
+
+ dest = 'fake_host2'
+ block_migration = False
+ disk_over_commit = False
+ instance = self._live_migration_instance()
+
+ self.driver._live_migration_src_check(self.context, instance)
+ # Compute down
+ db.service_get_by_compute_host(self.context,
+ dest).AndRaise(exception.NotFound())
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.ComputeServiceUnavailable,
+ self.driver.schedule_live_migration, self.context,
+ instance=instance, dest=dest,
+ block_migration=block_migration,
+ disk_over_commit=disk_over_commit)
+
def test_live_migration_compute_dest_not_alive(self):
# Raise exception when dest compute node is not alive.
diff --git a/nova/tests/servicegroup/test_zk_driver.py b/nova/tests/servicegroup/test_zk_driver.py
new file mode 100644
index 000000000..753153bb5
--- /dev/null
+++ b/nova/tests/servicegroup/test_zk_driver.py
@@ -0,0 +1,65 @@
+# Copyright (c) AT&T 2012-2013 Yun Mao <yunmao@gmail.com>
+# Copyright (c) IBM 2012 Alexey Roytman <roytman at il dot ibm dot com>.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Test the ZooKeeper driver for servicegroup.
+
+You need to install ZooKeeper locally and related dependencies
+to run the test. It's unclear how to install python-zookeeper lib
+in venv so you might have to run the test without it.
+
+To set up in Ubuntu 12.04:
+$ sudo apt-get install zookeeper zookeeperd python-zookeeper
+$ sudo pip install evzookeeper
+$ nosetests nova.tests.servicegroup.test_zk_driver
+"""
+
+import eventlet
+
+from nova import servicegroup
+from nova import test
+
+
+class ZKServiceGroupTestCase(test.TestCase):
+
+ def setUp(self):
+ super(ZKServiceGroupTestCase, self).setUp()
+ servicegroup.API._driver = None
+ try:
+ from nova.servicegroup.drivers import zk
+ _unused = zk
+ except ImportError:
+ self.skipTest("Unable to test due to lack of ZooKeeper")
+ self.flags(servicegroup_driver='zk')
+ self.flags(address='localhost:2181', group="zk")
+
+ def test_join_leave(self):
+ self.servicegroup_api = servicegroup.API()
+ service_id = {'topic': 'unittest', 'host': 'serviceA'}
+ self.servicegroup_api.join(service_id['host'], service_id['topic'])
+ self.assertTrue(self.servicegroup_api.service_is_up(service_id))
+ self.servicegroup_api.leave(service_id['host'], service_id['topic'])
+ # make sure zookeeper is updated and watcher is triggered
+ eventlet.sleep(1)
+ self.assertFalse(self.servicegroup_api.service_is_up(service_id))
+
+ def test_stop(self):
+ self.servicegroup_api = servicegroup.API()
+ service_id = {'topic': 'unittest', 'host': 'serviceA'}
+ pulse = self.servicegroup_api.join(service_id['host'],
+ service_id['topic'], None)
+ self.assertTrue(self.servicegroup_api.service_is_up(service_id))
+ pulse.stop()
+ eventlet.sleep(1)
+ self.assertFalse(self.servicegroup_api.service_is_up(service_id))
diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py
index 11c16d6dd..949f54512 100644
--- a/nova/tests/test_api.py
+++ b/nova/tests/test_api.py
@@ -22,12 +22,13 @@ import random
import StringIO
import boto
+import boto.connection
from boto.ec2 import regioninfo
from boto import exception as boto_exc
# newer versions of boto use their own wrapper on top of httplib.HTTPResponse
-try:
- import boto.connection as httplib
-except ImportError:
+if hasattr(boto.connection, 'HTTPResponse'):
+ httplib = boto.connection
+else:
import httplib
import fixtures
import webob
diff --git a/nova/tests/test_crypto.py b/nova/tests/test_crypto.py
index 25df336fb..cec3ca9c1 100644
--- a/nova/tests/test_crypto.py
+++ b/nova/tests/test_crypto.py
@@ -212,3 +212,31 @@ e6fCXWECgYEAqgpGvva5kJ1ISgNwnJbwiNw0sOT9BMOsdNZBElf0kJIIy6FMPvap
def test_ssh_encrypt_failure(self):
self.assertRaises(exception.EncryptionFailure,
crypto.ssh_encrypt_text, '', self.text)
+
+
+class ConversionTests(test.TestCase):
+ k1 = ("ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA4CqmrxfU7x4sJrubpMNxeglul+d"
+ "ByrsicnvQcHDEjPzdvoz+BaoAG9bjCA5mCeTBIISsVTVXz/hxNeiuBV6LH/UR/c"
+ "27yl53ypN+821ImoexQZcKItdnjJ3gVZlDob1f9+1qDVy63NJ1c+TstkrCTRVeo"
+ "9VyE7RpdSS4UCiBe8Xwk3RkedioFxePrI0Ktc2uASw2G0G2Rl7RN7KZOJbCivfF"
+ "LQMAOu6e+7fYvuE1gxGHHj7dxaBY/ioGOm1W4JmQ1V7AKt19zTBlZKduN8FQMSF"
+ "r35CDlvoWs0+OP8nwlebKNCi/5sdL8qiSLrAcPB4LqdkAf/blNSVA2Yl83/c4lQ"
+ "== test@test")
+
+ k2 = ("-----BEGIN PUBLIC KEY-----\n"
+ "MIIBIDANBgkqhkiG9w0BAQEFAAOCAQ0AMIIBCAKCAQEA4CqmrxfU7x4sJrubpMNx\n"
+ "eglul+dByrsicnvQcHDEjPzdvoz+BaoAG9bjCA5mCeTBIISsVTVXz/hxNeiuBV6L\n"
+ "H/UR/c27yl53ypN+821ImoexQZcKItdnjJ3gVZlDob1f9+1qDVy63NJ1c+TstkrC\n"
+ "TRVeo9VyE7RpdSS4UCiBe8Xwk3RkedioFxePrI0Ktc2uASw2G0G2Rl7RN7KZOJbC\n"
+ "ivfFLQMAOu6e+7fYvuE1gxGHHj7dxaBY/ioGOm1W4JmQ1V7AKt19zTBlZKduN8FQ\n"
+ "MSFr35CDlvoWs0+OP8nwlebKNCi/5sdL8qiSLrAcPB4LqdkAf/blNSVA2Yl83/c4\n"
+ "lQIBIw==\n"
+ "-----END PUBLIC KEY-----\n")
+
+ def test_convert_keys(self):
+ result = crypto.convert_from_sshrsa_to_pkcs8(self.k1)
+ self.assertEqual(result, self.k2)
+
+ def test_convert_failure(self):
+ self.assertRaises(exception.EncryptionFailure,
+ crypto.convert_from_sshrsa_to_pkcs8, '')
diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py
index c6bf2941e..835527219 100644
--- a/nova/tests/test_db_api.py
+++ b/nova/tests/test_db_api.py
@@ -22,10 +22,15 @@
import datetime
import uuid as stdlib_uuid
+from sqlalchemy import MetaData
+from sqlalchemy.schema import Table
+from sqlalchemy.sql.expression import select
+
from nova import context
from nova import db
from nova import exception
from nova.openstack.common import cfg
+from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.openstack.common import timeutils
from nova import test
from nova.tests import matchers
@@ -36,6 +41,9 @@ CONF = cfg.CONF
CONF.import_opt('reserved_host_memory_mb', 'nova.compute.resource_tracker')
CONF.import_opt('reserved_host_disk_mb', 'nova.compute.resource_tracker')
+get_engine = db_session.get_engine
+get_session = db_session.get_session
+
class DbApiTestCase(test.TestCase):
def setUp(self):
@@ -685,9 +693,9 @@ class DbApiTestCase(test.TestCase):
db.action_start(ctxt2, action_values)
actions = db.actions_get(ctxt1, uuid1)
- action_id = actions[0]['id']
- action = db.action_get_by_id(ctxt1, uuid1, action_id)
- self.assertEqual('resize', action['action'])
+ request_id = actions[0]['request_id']
+ action = db.action_get_by_request_id(ctxt1, uuid1, request_id)
+ self.assertEqual('run_instance', action['action'])
self.assertEqual(ctxt1.request_id, action['request_id'])
def test_instance_action_event_start(self):
@@ -1791,3 +1799,156 @@ class TaskLogTestCase(test.TestCase):
result = db.task_log_get(self.context, self.task_name, self.begin,
self.end, self.host)
self.assertEqual(result['errors'], 1)
+
+
+class ArchiveTestCase(test.TestCase):
+
+ def setUp(self):
+ super(ArchiveTestCase, self).setUp()
+ self.context = context.get_admin_context()
+ engine = get_engine()
+ self.conn = engine.connect()
+ self.metadata = MetaData()
+ self.metadata.bind = engine
+ self.table1 = Table("instance_id_mappings",
+ self.metadata,
+ autoload=True)
+ self.shadow_table1 = Table("shadow_instance_id_mappings",
+ self.metadata,
+ autoload=True)
+ self.table2 = Table("dns_domains",
+ self.metadata,
+ autoload=True)
+ self.shadow_table2 = Table("shadow_dns_domains",
+ self.metadata,
+ autoload=True)
+ self.uuidstrs = []
+ for unused in xrange(6):
+ self.uuidstrs.append(stdlib_uuid.uuid4().hex)
+
+ def tearDown(self):
+ super(ArchiveTestCase, self).tearDown()
+ delete_statement1 = self.table1.delete(
+ self.table1.c.uuid.in_(self.uuidstrs))
+ self.conn.execute(delete_statement1)
+ delete_statement2 = self.shadow_table1.delete(
+ self.shadow_table1.c.uuid.in_(self.uuidstrs))
+ self.conn.execute(delete_statement2)
+ delete_statement3 = self.table2.delete(self.table2.c.domain.in_(
+ self.uuidstrs))
+ self.conn.execute(delete_statement3)
+ delete_statement4 = self.shadow_table2.delete(
+ self.shadow_table2.c.domain.in_(self.uuidstrs))
+ self.conn.execute(delete_statement4)
+
+ def test_archive_deleted_rows(self):
+ # Add 6 rows to table
+ for uuidstr in self.uuidstrs:
+ insert_statement = self.table1.insert().values(uuid=uuidstr)
+ self.conn.execute(insert_statement)
+ # Set 4 to deleted
+ update_statement = self.table1.update().\
+ where(self.table1.c.uuid.in_(self.uuidstrs[:4]))\
+ .values(deleted=True)
+ self.conn.execute(update_statement)
+ query1 = select([self.table1]).where(self.table1.c.uuid.in_(
+ self.uuidstrs))
+ rows1 = self.conn.execute(query1).fetchall()
+ # Verify we have 6 in main
+ self.assertEqual(len(rows1), 6)
+ query2 = select([self.shadow_table1]).\
+ where(self.shadow_table1.c.uuid.in_(self.uuidstrs))
+ rows2 = self.conn.execute(query2).fetchall()
+ # Verify we have 0 in shadow
+ self.assertEqual(len(rows2), 0)
+ # Archive 2 rows
+ db.archive_deleted_rows(self.context, max_rows=2)
+ rows3 = self.conn.execute(query1).fetchall()
+ # Verify we have 4 left in main
+ self.assertEqual(len(rows3), 4)
+ rows4 = self.conn.execute(query2).fetchall()
+ # Verify we have 2 in shadow
+ self.assertEqual(len(rows4), 2)
+ # Archive 2 more rows
+ db.archive_deleted_rows(self.context, max_rows=2)
+ rows5 = self.conn.execute(query1).fetchall()
+ # Verify we have 2 left in main
+ self.assertEqual(len(rows5), 2)
+ rows6 = self.conn.execute(query2).fetchall()
+ # Verify we have 4 in shadow
+ self.assertEqual(len(rows6), 4)
+ # Try to archive more, but there are no deleted rows left.
+ db.archive_deleted_rows(self.context, max_rows=2)
+ rows7 = self.conn.execute(query1).fetchall()
+ # Verify we still have 2 left in main
+ self.assertEqual(len(rows7), 2)
+ rows8 = self.conn.execute(query2).fetchall()
+ # Verify we still have 4 in shadow
+ self.assertEqual(len(rows8), 4)
+
+ def test_archive_deleted_rows_for_table(self):
+ tablename = "instance_id_mappings"
+ # Add 6 rows to table
+ for uuidstr in self.uuidstrs:
+ insert_statement = self.table1.insert().values(uuid=uuidstr)
+ self.conn.execute(insert_statement)
+ # Set 4 to deleted
+ update_statement = self.table1.update().\
+ where(self.table1.c.uuid.in_(self.uuidstrs[:4]))\
+ .values(deleted=True)
+ self.conn.execute(update_statement)
+ query1 = select([self.table1]).where(self.table1.c.uuid.in_(
+ self.uuidstrs))
+ rows1 = self.conn.execute(query1).fetchall()
+ # Verify we have 6 in main
+ self.assertEqual(len(rows1), 6)
+ query2 = select([self.shadow_table1]).\
+ where(self.shadow_table1.c.uuid.in_(self.uuidstrs))
+ rows2 = self.conn.execute(query2).fetchall()
+ # Verify we have 0 in shadow
+ self.assertEqual(len(rows2), 0)
+ # Archive 2 rows
+ db.archive_deleted_rows_for_table(self.context, tablename, max_rows=2)
+ rows3 = self.conn.execute(query1).fetchall()
+ # Verify we have 4 left in main
+ self.assertEqual(len(rows3), 4)
+ rows4 = self.conn.execute(query2).fetchall()
+ # Verify we have 2 in shadow
+ self.assertEqual(len(rows4), 2)
+ # Archive 2 more rows
+ db.archive_deleted_rows_for_table(self.context, tablename, max_rows=2)
+ rows5 = self.conn.execute(query1).fetchall()
+ # Verify we have 2 left in main
+ self.assertEqual(len(rows5), 2)
+ rows6 = self.conn.execute(query2).fetchall()
+ # Verify we have 4 in shadow
+ self.assertEqual(len(rows6), 4)
+ # Try to archive more, but there are no deleted rows left.
+ db.archive_deleted_rows_for_table(self.context, tablename, max_rows=2)
+ rows7 = self.conn.execute(query1).fetchall()
+ # Verify we still have 2 left in main
+ self.assertEqual(len(rows7), 2)
+ rows8 = self.conn.execute(query2).fetchall()
+ # Verify we still have 4 in shadow
+ self.assertEqual(len(rows8), 4)
+
+ def test_archive_deleted_rows_no_id_column(self):
+ uuidstr0 = self.uuidstrs[0]
+ insert_statement = self.table2.insert().values(domain=uuidstr0)
+ self.conn.execute(insert_statement)
+ update_statement = self.table2.update().\
+ where(self.table2.c.domain == uuidstr0).\
+ values(deleted=True)
+ self.conn.execute(update_statement)
+ query1 = select([self.table2], self.table2.c.domain == uuidstr0)
+ rows1 = self.conn.execute(query1).fetchall()
+ self.assertEqual(len(rows1), 1)
+ query2 = select([self.shadow_table2],
+ self.shadow_table2.c.domain == uuidstr0)
+ rows2 = self.conn.execute(query2).fetchall()
+ self.assertEqual(len(rows2), 0)
+ db.archive_deleted_rows(self.context, max_rows=1)
+ rows3 = self.conn.execute(query1).fetchall()
+ self.assertEqual(len(rows3), 0)
+ rows4 = self.conn.execute(query2).fetchall()
+ self.assertEqual(len(rows4), 1)
diff --git a/nova/tests/test_hypervapi.py b/nova/tests/test_hypervapi.py
index 0c2f90a4d..025d3a454 100644
--- a/nova/tests/test_hypervapi.py
+++ b/nova/tests/test_hypervapi.py
@@ -80,6 +80,7 @@ class HyperVAPITestCase(test.TestCase):
self._instance_ide_disks = []
self._instance_ide_dvds = []
self._instance_volume_disks = []
+ self._test_vm_name = None
self._setup_stubs()
@@ -116,6 +117,14 @@ class HyperVAPITestCase(test.TestCase):
self.stubs.Set(pathutils, 'PathUtils', fake.PathUtils)
self._mox.StubOutWithMock(fake.PathUtils, 'open')
+ self._mox.StubOutWithMock(fake.PathUtils, 'copyfile')
+ self._mox.StubOutWithMock(fake.PathUtils, 'rmtree')
+ self._mox.StubOutWithMock(fake.PathUtils, 'copy')
+ self._mox.StubOutWithMock(fake.PathUtils, 'remove')
+ self._mox.StubOutWithMock(fake.PathUtils, 'rename')
+ self._mox.StubOutWithMock(fake.PathUtils, 'makedirs')
+ self._mox.StubOutWithMock(fake.PathUtils,
+ 'get_instance_migr_revert_dir')
self._mox.StubOutWithMock(vmutils.VMUtils, 'vm_exists')
self._mox.StubOutWithMock(vmutils.VMUtils, 'create_vm')
@@ -137,11 +146,13 @@ class HyperVAPITestCase(test.TestCase):
self._mox.StubOutWithMock(vmutils.VMUtils,
'get_mounted_disk_by_drive_number')
self._mox.StubOutWithMock(vmutils.VMUtils, 'detach_vm_disk')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_storage_paths')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'create_differencing_vhd')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'reconnect_parent_vhd')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'merge_vhd')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'get_vhd_parent_path')
+ self._mox.StubOutWithMock(vhdutils.VHDUtils, 'get_vhd_info')
self._mox.StubOutWithMock(hostutils.HostUtils, 'get_cpus_info')
self._mox.StubOutWithMock(hostutils.HostUtils,
@@ -149,6 +160,7 @@ class HyperVAPITestCase(test.TestCase):
self._mox.StubOutWithMock(hostutils.HostUtils, 'get_memory_info')
self._mox.StubOutWithMock(hostutils.HostUtils, 'get_volume_info')
self._mox.StubOutWithMock(hostutils.HostUtils, 'get_windows_version')
+ self._mox.StubOutWithMock(hostutils.HostUtils, 'get_local_ips')
self._mox.StubOutWithMock(networkutils.NetworkUtils,
'get_external_vswitch')
@@ -181,11 +193,6 @@ class HyperVAPITestCase(test.TestCase):
self._mox.StubOutWithMock(volumeutilsv2.VolumeUtilsV2,
'execute_log_out')
- self._mox.StubOutWithMock(shutil, 'copyfile')
- self._mox.StubOutWithMock(shutil, 'rmtree')
-
- self._mox.StubOutWithMock(os, 'remove')
-
self._mox.StubOutClassWithMocks(instance_metadata, 'InstanceMetadata')
self._mox.StubOutWithMock(instance_metadata.InstanceMetadata,
'metadata_for_config_drive')
@@ -332,7 +339,7 @@ class HyperVAPITestCase(test.TestCase):
mox.IsA(str),
mox.IsA(str),
attempts=1)
- os.remove(mox.IsA(str))
+ fake.PathUtils.remove(mox.IsA(str))
m = vmutils.VMUtils.attach_ide_drive(mox.IsA(str),
mox.IsA(str),
@@ -490,15 +497,22 @@ class HyperVAPITestCase(test.TestCase):
None)
self._mox.VerifyAll()
- def test_destroy(self):
- self._instance_data = self._get_instance_data()
-
+ def _setup_destroy_mocks(self):
m = vmutils.VMUtils.vm_exists(mox.Func(self._check_instance_name))
m.AndReturn(True)
- m = vmutils.VMUtils.destroy_vm(mox.Func(self._check_instance_name),
- True)
- m.AndReturn([])
+ func = mox.Func(self._check_instance_name)
+ vmutils.VMUtils.set_vm_state(func, constants.HYPERV_VM_STATE_DISABLED)
+
+ m = vmutils.VMUtils.get_vm_storage_paths(func)
+ m.AndReturn(([], []))
+
+ vmutils.VMUtils.destroy_vm(func)
+
+ def test_destroy(self):
+ self._instance_data = self._get_instance_data()
+
+ self._setup_destroy_mocks()
self._mox.ReplayAll()
self._conn.destroy(self._instance_data)
@@ -562,7 +576,9 @@ class HyperVAPITestCase(test.TestCase):
if cow:
m = basevolumeutils.BaseVolumeUtils.volume_in_mapping(mox.IsA(str),
None)
- m.AndReturn([])
+ m.AndReturn(False)
+
+ vhdutils.VHDUtils.get_vhd_info(mox.Func(self._check_img_path))
self._mox.ReplayAll()
self._conn.pre_live_migration(self._context, instance_data,
@@ -617,7 +633,7 @@ class HyperVAPITestCase(test.TestCase):
def copy_dest_disk_path(src, dest):
self._fake_dest_disk_path = dest
- m = shutil.copyfile(mox.IsA(str), mox.IsA(str))
+ m = fake.PathUtils.copyfile(mox.IsA(str), mox.IsA(str))
m.WithSideEffects(copy_dest_disk_path)
self._fake_dest_base_disk_path = None
@@ -625,7 +641,7 @@ class HyperVAPITestCase(test.TestCase):
def copy_dest_base_disk_path(src, dest):
self._fake_dest_base_disk_path = dest
- m = shutil.copyfile(fake_parent_vhd_path, mox.IsA(str))
+ m = fake.PathUtils.copyfile(fake_parent_vhd_path, mox.IsA(str))
m.WithSideEffects(copy_dest_base_disk_path)
def check_dest_disk_path(path):
@@ -647,7 +663,7 @@ class HyperVAPITestCase(test.TestCase):
func = mox.Func(check_snapshot_path)
vmutils.VMUtils.remove_vm_snapshot(func)
- shutil.rmtree(mox.IsA(str))
+ fake.PathUtils.rmtree(mox.IsA(str))
m = fake.PathUtils.open(func2, 'rb')
m.AndReturn(io.BytesIO(b'fake content'))
@@ -702,65 +718,70 @@ class HyperVAPITestCase(test.TestCase):
mounted_disk_path):
self._instance_volume_disks.append(mounted_disk_path)
- def _setup_spawn_instance_mocks(self, cow, setup_vif_mocks_func=None,
- with_exception=False,
- block_device_info=None):
- self._test_vm_name = None
-
- def set_vm_name(vm_name):
- self._test_vm_name = vm_name
-
- def check_vm_name(vm_name):
- return vm_name == self._test_vm_name
-
- m = vmutils.VMUtils.vm_exists(mox.IsA(str))
- m.WithSideEffects(set_vm_name).AndReturn(False)
-
- if not block_device_info:
- m = basevolumeutils.BaseVolumeUtils.volume_in_mapping(mox.IsA(str),
- None)
- m.AndReturn([])
- else:
- m = basevolumeutils.BaseVolumeUtils.volume_in_mapping(
- mox.IsA(str), block_device_info)
- m.AndReturn(True)
-
- if cow:
- def check_path(parent_path):
- return parent_path == self._fetched_image
-
- vhdutils.VHDUtils.create_differencing_vhd(mox.IsA(str),
- mox.Func(check_path))
+ def _check_img_path(self, image_path):
+ return image_path == self._fetched_image
- vmutils.VMUtils.create_vm(mox.Func(check_vm_name), mox.IsA(int),
+ def _setup_create_instance_mocks(self, setup_vif_mocks_func=None,
+ boot_from_volume=False):
+ vmutils.VMUtils.create_vm(mox.Func(self._check_vm_name), mox.IsA(int),
mox.IsA(int), mox.IsA(bool))
- if not block_device_info:
- m = vmutils.VMUtils.attach_ide_drive(mox.Func(check_vm_name),
+ if not boot_from_volume:
+ m = vmutils.VMUtils.attach_ide_drive(mox.Func(self._check_vm_name),
mox.IsA(str),
mox.IsA(int),
mox.IsA(int),
mox.IsA(str))
m.WithSideEffects(self._add_ide_disk).InAnyOrder()
- m = vmutils.VMUtils.create_scsi_controller(mox.Func(check_vm_name))
+ func = mox.Func(self._check_vm_name)
+ m = vmutils.VMUtils.create_scsi_controller(func)
m.InAnyOrder()
- vmutils.VMUtils.create_nic(mox.Func(check_vm_name), mox.IsA(str),
+ vmutils.VMUtils.create_nic(mox.Func(self._check_vm_name), mox.IsA(str),
mox.IsA(str)).InAnyOrder()
if setup_vif_mocks_func:
setup_vif_mocks_func()
+ def _set_vm_name(self, vm_name):
+ self._test_vm_name = vm_name
+
+ def _check_vm_name(self, vm_name):
+ return vm_name == self._test_vm_name
+
+ def _setup_spawn_instance_mocks(self, cow, setup_vif_mocks_func=None,
+ with_exception=False,
+ block_device_info=None,
+ boot_from_volume=False):
+ m = vmutils.VMUtils.vm_exists(mox.IsA(str))
+ m.WithSideEffects(self._set_vm_name).AndReturn(False)
+
+ m = basevolumeutils.BaseVolumeUtils.volume_in_mapping(
+ mox.IsA(str), block_device_info)
+ m.AndReturn(boot_from_volume)
+
+ if not boot_from_volume:
+ vhdutils.VHDUtils.get_vhd_info(mox.Func(self._check_img_path))
+
+ if cow:
+ vhdutils.VHDUtils.create_differencing_vhd(
+ mox.IsA(str), mox.Func(self._check_img_path))
+ else:
+ fake.PathUtils.copyfile(mox.IsA(str), mox.IsA(str))
+
+ self._setup_create_instance_mocks(setup_vif_mocks_func,
+ boot_from_volume)
+
# TODO(alexpilotti) Based on where the exception is thrown
# some of the above mock calls need to be skipped
if with_exception:
- m = vmutils.VMUtils.vm_exists(mox.Func(check_vm_name))
+ m = vmutils.VMUtils.vm_exists(mox.Func(self._check_vm_name))
m.AndReturn(True)
- vmutils.VMUtils.destroy_vm(mox.Func(check_vm_name), True)
+ vmutils.VMUtils.destroy_vm(mox.Func(self._check_vm_name))
else:
- vmutils.VMUtils.set_vm_state(mox.Func(check_vm_name),
+ vmutils.VMUtils.set_vm_state(mox.Func(self._check_vm_name),
constants.HYPERV_VM_STATE_ENABLED)
def _test_spawn_instance(self, cow=True,
@@ -772,14 +793,14 @@ class HyperVAPITestCase(test.TestCase):
with_exception)
self._mox.ReplayAll()
- self._spawn_instance(cow, )
+ self._spawn_instance(cow)
self._mox.VerifyAll()
self.assertEquals(len(self._instance_ide_disks), expected_ide_disks)
self.assertEquals(len(self._instance_ide_dvds), expected_ide_dvds)
- if not cow:
- self.assertEquals(self._fetched_image, self._instance_ide_disks[0])
+ vhd_path = pathutils.PathUtils().get_vhd_path(self._test_vm_name)
+ self.assertEquals(vhd_path, self._instance_ide_disks[0])
def test_attach_volume(self):
instance_data = self._get_instance_data()
@@ -897,10 +918,165 @@ class HyperVAPITestCase(test.TestCase):
m.WithSideEffects(self._add_volume_disk)
self._setup_spawn_instance_mocks(cow=False,
- block_device_info=block_device_info)
+ block_device_info=block_device_info,
+ boot_from_volume=True)
self._mox.ReplayAll()
self._spawn_instance(False, block_device_info)
self._mox.VerifyAll()
self.assertEquals(len(self._instance_volume_disks), 1)
+
+ def _setup_test_migrate_disk_and_power_off_mocks(self, same_host=False,
+ with_exception=False):
+ self._instance_data = self._get_instance_data()
+ instance = db.instance_create(self._context, self._instance_data)
+ network_info = fake_network.fake_get_instance_nw_info(
+ self.stubs, spectacular=True)
+
+ fake_local_ip = '10.0.0.1'
+ if same_host:
+ fake_dest_ip = fake_local_ip
+ else:
+ fake_dest_ip = '10.0.0.2'
+
+ fake_root_vhd_path = 'C:\\FakePath\\root.vhd'
+ fake_revert_path = ('C:\\FakeInstancesPath\\%s\\_revert' %
+ instance['name'])
+
+ func = mox.Func(self._check_instance_name)
+ vmutils.VMUtils.set_vm_state(func, constants.HYPERV_VM_STATE_DISABLED)
+
+ m = vmutils.VMUtils.get_vm_storage_paths(func)
+ m.AndReturn(([fake_root_vhd_path], []))
+
+ m = hostutils.HostUtils.get_local_ips()
+ m.AndReturn([fake_local_ip])
+
+ m = pathutils.PathUtils.get_instance_migr_revert_dir(instance['name'],
+ remove_dir=True)
+ m.AndReturn(fake_revert_path)
+
+ if same_host:
+ fake.PathUtils.makedirs(mox.IsA(str))
+
+ m = fake.PathUtils.copy(fake_root_vhd_path, mox.IsA(str))
+ if with_exception:
+ m.AndRaise(shutil.Error('Simulated copy error'))
+ else:
+ fake.PathUtils.rename(mox.IsA(str), mox.IsA(str))
+ if same_host:
+ fake.PathUtils.rename(mox.IsA(str), mox.IsA(str))
+
+ self._setup_destroy_mocks()
+
+ return (instance, fake_dest_ip, network_info)
+
+ def test_migrate_disk_and_power_off(self):
+ (instance,
+ fake_dest_ip,
+ network_info) = self._setup_test_migrate_disk_and_power_off_mocks()
+
+ self._mox.ReplayAll()
+ self._conn.migrate_disk_and_power_off(self._context, instance,
+ fake_dest_ip, None,
+ network_info)
+ self._mox.VerifyAll()
+
+ def test_migrate_disk_and_power_off_same_host(self):
+ args = self._setup_test_migrate_disk_and_power_off_mocks(
+ same_host=True)
+ (instance, fake_dest_ip, network_info) = args
+
+ self._mox.ReplayAll()
+ self._conn.migrate_disk_and_power_off(self._context, instance,
+ fake_dest_ip, None,
+ network_info)
+ self._mox.VerifyAll()
+
+ def test_migrate_disk_and_power_off_exception(self):
+ args = self._setup_test_migrate_disk_and_power_off_mocks(
+ with_exception=True)
+ (instance, fake_dest_ip, network_info) = args
+
+ self._mox.ReplayAll()
+ self.assertRaises(shutil.Error, self._conn.migrate_disk_and_power_off,
+ self._context, instance, fake_dest_ip, None,
+ network_info)
+ self._mox.VerifyAll()
+
+ def test_finish_migration(self):
+ self._instance_data = self._get_instance_data()
+ instance = db.instance_create(self._context, self._instance_data)
+ network_info = fake_network.fake_get_instance_nw_info(
+ self.stubs, spectacular=True)
+
+ m = basevolumeutils.BaseVolumeUtils.volume_in_mapping(mox.IsA(str),
+ None)
+ m.AndReturn(False)
+
+ self._mox.StubOutWithMock(fake.PathUtils, 'exists')
+ m = fake.PathUtils.exists(mox.IsA(str))
+ m.AndReturn(True)
+
+ fake_parent_vhd_path = (os.path.join('FakeParentPath', '%s.vhd' %
+ instance["image_ref"]))
+
+ m = vhdutils.VHDUtils.get_vhd_info(mox.IsA(str))
+ m.AndReturn({'ParentPath': fake_parent_vhd_path,
+ 'MaxInternalSize': 1})
+
+ m = fake.PathUtils.exists(mox.IsA(str))
+ m.AndReturn(True)
+
+ vhdutils.VHDUtils.reconnect_parent_vhd(mox.IsA(str), mox.IsA(str))
+
+ self._set_vm_name(instance['name'])
+ self._setup_create_instance_mocks(None, False)
+
+ vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
+ constants.HYPERV_VM_STATE_ENABLED)
+
+ self._mox.ReplayAll()
+ self._conn.finish_migration(self._context, None, instance, "",
+ network_info, None, False, None)
+ self._mox.VerifyAll()
+
+ def test_confirm_migration(self):
+ self._instance_data = self._get_instance_data()
+ instance = db.instance_create(self._context, self._instance_data)
+ network_info = fake_network.fake_get_instance_nw_info(
+ self.stubs, spectacular=True)
+
+ pathutils.PathUtils.get_instance_migr_revert_dir(instance['name'],
+ remove_dir=True)
+ self._mox.ReplayAll()
+ self._conn.confirm_migration(None, instance, network_info)
+ self._mox.VerifyAll()
+
+ def test_finish_revert_migration(self):
+ self._instance_data = self._get_instance_data()
+ instance = db.instance_create(self._context, self._instance_data)
+ network_info = fake_network.fake_get_instance_nw_info(
+ self.stubs, spectacular=True)
+
+ fake_revert_path = ('C:\\FakeInstancesPath\\%s\\_revert' %
+ instance['name'])
+
+ m = basevolumeutils.BaseVolumeUtils.volume_in_mapping(mox.IsA(str),
+ None)
+ m.AndReturn(False)
+
+ m = pathutils.PathUtils.get_instance_migr_revert_dir(instance['name'])
+ m.AndReturn(fake_revert_path)
+ fake.PathUtils.rename(fake_revert_path, mox.IsA(str))
+
+ self._set_vm_name(instance['name'])
+ self._setup_create_instance_mocks(None, False)
+
+ vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
+ constants.HYPERV_VM_STATE_ENABLED)
+
+ self._mox.ReplayAll()
+ self._conn.finish_revert_migration(instance, network_info, None)
+ self._mox.VerifyAll()
diff --git a/nova/tests/test_imagebackend.py b/nova/tests/test_imagebackend.py
index 76fd1d5b6..87e51819d 100644
--- a/nova/tests/test_imagebackend.py
+++ b/nova/tests/test_imagebackend.py
@@ -241,8 +241,8 @@ class LvmTestCase(_ImageTestCase, test.TestCase):
sparse=sparse)
self.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.TEMPLATE_SIZE)
- cmd = ('dd', 'if=%s' % self.TEMPLATE_PATH,
- 'of=%s' % self.PATH, 'bs=4M')
+ cmd = ('qemu-img', 'convert', '-O', 'raw', self.TEMPLATE_PATH,
+ self.PATH)
self.utils.execute(*cmd, run_as_root=True)
self.mox.ReplayAll()
@@ -271,8 +271,8 @@ class LvmTestCase(_ImageTestCase, test.TestCase):
self.SIZE, sparse=sparse)
self.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.TEMPLATE_SIZE)
- cmd = ('dd', 'if=%s' % self.TEMPLATE_PATH,
- 'of=%s' % self.PATH, 'bs=4M')
+ cmd = ('qemu-img', 'convert', '-O', 'raw', self.TEMPLATE_PATH,
+ self.PATH)
self.utils.execute(*cmd, run_as_root=True)
self.disk.resize2fs(self.PATH, run_as_root=True)
self.mox.ReplayAll()
diff --git a/nova/tests/test_instance_types.py b/nova/tests/test_instance_types.py
index 0829df2c6..6ae28a1c9 100644
--- a/nova/tests/test_instance_types.py
+++ b/nova/tests/test_instance_types.py
@@ -142,6 +142,17 @@ class InstanceTypeTestCase(test.TestCase):
self.assertRaises(exception.InvalidInput, instance_types.create,
name, 256, 1, 120, 100, flavorid)
+ def test_instance_type_create_with_long_flavor_name(self):
+ # Flavor name with 255 characters or less is valid.
+ name = 'a' * 255
+ inst_type = instance_types.create(name, 64, 1, 120, flavorid=11)
+ self.assertEqual(inst_type['name'], name)
+
+ # Flavor name which is more than 255 characters will cause error.
+ name = 'a' * 256
+ self.assertRaises(exception.InvalidInput, instance_types.create,
+ name, 64, 1, 120, flavorid=11)
+
def test_add_instance_type_access(self):
user_id = 'fake'
project_id = 'fake'
diff --git a/nova/tests/test_iptables_network.py b/nova/tests/test_iptables_network.py
index 95af25ebd..7e62d3a6e 100644
--- a/nova/tests/test_iptables_network.py
+++ b/nova/tests/test_iptables_network.py
@@ -30,32 +30,34 @@ class IptablesManagerTestCase(test.TestCase):
':INPUT ACCEPT [2223527:305688874]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [2172501:140856656]',
- ':nova-compute-FORWARD - [0:0]',
- ':nova-compute-INPUT - [0:0]',
- ':nova-compute-local - [0:0]',
- ':nova-compute-OUTPUT - [0:0]',
+ ':iptables-top-rule - [0:0]',
+ ':iptables-bottom-rule - [0:0]',
+ ':%s-FORWARD - [0:0]' % (binary_name),
+ ':%s-INPUT - [0:0]' % (binary_name),
+ ':%s-local - [0:0]' % (binary_name),
+ ':%s-OUTPUT - [0:0]' % (binary_name),
':nova-filter-top - [0:0]',
- '[0:0] -A FORWARD -j nova-filter-top ',
- '[0:0] -A OUTPUT -j nova-filter-top ',
- '[0:0] -A nova-filter-top -j nova-compute-local ',
- '[0:0] -A INPUT -j nova-compute-INPUT ',
- '[0:0] -A OUTPUT -j nova-compute-OUTPUT ',
- '[0:0] -A FORWARD -j nova-compute-FORWARD ',
+ '[0:0] -A FORWARD -j nova-filter-top',
+ '[0:0] -A OUTPUT -j nova-filter-top',
+ '[0:0] -A nova-filter-top -j %s-local' % (binary_name),
+ '[0:0] -A INPUT -j %s-INPUT' % (binary_name),
+ '[0:0] -A OUTPUT -j %s-OUTPUT' % (binary_name),
+ '[0:0] -A FORWARD -j %s-FORWARD' % (binary_name),
'[0:0] -A INPUT -i virbr0 -p udp -m udp --dport 53 '
- '-j ACCEPT ',
+ '-j ACCEPT',
'[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 53 '
- '-j ACCEPT ',
+ '-j ACCEPT',
'[0:0] -A INPUT -i virbr0 -p udp -m udp --dport 67 '
- '-j ACCEPT ',
+ '-j ACCEPT',
'[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 '
- '-j ACCEPT ',
+ '-j ACCEPT',
'[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 '
- '-j ACCEPT ',
- '[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
+ '-j ACCEPT',
+ '[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT',
'[0:0] -A FORWARD -o virbr0 -j REJECT --reject-with '
- 'icmp-port-unreachable ',
+ 'icmp-port-unreachable',
'[0:0] -A FORWARD -i virbr0 -j REJECT --reject-with '
- 'icmp-port-unreachable ',
+ 'icmp-port-unreachable',
'COMMIT',
'# Completed on Fri Feb 18 15:17:05 2011']
@@ -65,19 +67,20 @@ class IptablesManagerTestCase(test.TestCase):
':INPUT ACCEPT [2447:225266]',
':OUTPUT ACCEPT [63491:4191863]',
':POSTROUTING ACCEPT [63112:4108641]',
- ':nova-compute-OUTPUT - [0:0]',
- ':nova-compute-floating-ip-snat - [0:0]',
- ':nova-compute-SNATTING - [0:0]',
- ':nova-compute-PREROUTING - [0:0]',
- ':nova-compute-POSTROUTING - [0:0]',
+ ':%s-OUTPUT - [0:0]' % (binary_name),
+ ':%s-float-snat - [0:0]' % (binary_name),
+ ':%s-snat - [0:0]' % (binary_name),
+ ':%s-PREROUTING - [0:0]' % (binary_name),
+ ':%s-POSTROUTING - [0:0]' % (binary_name),
':nova-postrouting-bottom - [0:0]',
- '[0:0] -A PREROUTING -j nova-compute-PREROUTING ',
- '[0:0] -A OUTPUT -j nova-compute-OUTPUT ',
- '[0:0] -A POSTROUTING -j nova-compute-POSTROUTING ',
- '[0:0] -A POSTROUTING -j nova-postrouting-bottom ',
- '[0:0] -A nova-postrouting-bottom -j nova-compute-SNATTING ',
- '[0:0] -A nova-compute-SNATTING '
- '-j nova-compute-floating-ip-snat ',
+ '[0:0] -A PREROUTING -j %s-PREROUTING' % (binary_name),
+ '[0:0] -A OUTPUT -j %s-OUTPUT' % (binary_name),
+ '[0:0] -A POSTROUTING -j %s-POSTROUTING' % (binary_name),
+ '[0:0] -A POSTROUTING -j nova-postrouting-bottom',
+ '[0:0] -A nova-postrouting-bottom '
+ '-j %s-SNATTING' % (binary_name),
+ '[0:0] -A %s-SNATTING '
+ '-j %s-floating-ip-snat' % (binary_name, binary_name),
'COMMIT',
'# Completed on Fri Feb 18 15:17:05 2011']
@@ -105,13 +108,13 @@ class IptablesManagerTestCase(test.TestCase):
new_lines = self.manager._modify_rules(current_lines,
self.manager.ipv4['nat'])
- for line in [':nova-compute-OUTPUT - [0:0]',
- ':nova-compute-floating-ip-snat - [0:0]',
- ':nova-compute-SNATTING - [0:0]',
- ':nova-compute-PREROUTING - [0:0]',
- ':nova-compute-POSTROUTING - [0:0]']:
- self.assertTrue(line in new_lines, "One of nova-compute's chains "
- "went missing.")
+ for line in [':%s-OUTPUT - [0:0]' % (self.binary_name),
+ ':%s-float-snat - [0:0]' % (self.binary_name),
+ ':%s-snat - [0:0]' % (self.binary_name),
+ ':%s-PREROUTING - [0:0]' % (self.binary_name),
+ ':%s-POSTROUTING - [0:0]' % (self.binary_name)]:
+ self.assertTrue(line in new_lines, "One of our chains went"
+ " missing.")
seen_lines = set()
for line in new_lines:
@@ -140,12 +143,12 @@ class IptablesManagerTestCase(test.TestCase):
new_lines = self.manager._modify_rules(current_lines,
self.manager.ipv4['filter'])
- for line in [':nova-compute-FORWARD - [0:0]',
- ':nova-compute-INPUT - [0:0]',
- ':nova-compute-local - [0:0]',
- ':nova-compute-OUTPUT - [0:0]']:
- self.assertTrue(line in new_lines, "One of nova-compute's chains"
- " went missing.")
+ for line in [':%s-FORWARD - [0:0]' % (self.binary_name),
+ ':%s-INPUT - [0:0]' % (self.binary_name),
+ ':%s-local - [0:0]' % (self.binary_name),
+ ':%s-OUTPUT - [0:0]' % (self.binary_name)]:
+ self.assertTrue(line in new_lines, "One of our chains went"
+ " missing.")
seen_lines = set()
for line in new_lines:
@@ -189,3 +192,32 @@ class IptablesManagerTestCase(test.TestCase):
"COMMIT" == new_lines[-2] and
"#Completed by nova" == new_lines[-1],
"iptables rules not generated in the correct order")
+
+ def test_iptables_top_order(self):
+ # Test iptables_top_regex
+ current_lines = list(self.sample_filter)
+ current_lines[12:12] = ['[0:0] -A FORWARD -j iptables-top-rule']
+ self.flags(iptables_top_regex='-j iptables-top-rule')
+ new_lines = self.manager._modify_rules(current_lines,
+ self.manager.ipv4['filter'])
+ self.assertEqual(current_lines, new_lines)
+
+ def test_iptables_bottom_order(self):
+ # Test iptables_bottom_regex
+ current_lines = list(self.sample_filter)
+ current_lines[26:26] = ['[0:0] -A FORWARD -j iptables-bottom-rule']
+ self.flags(iptables_bottom_regex='-j iptables-bottom-rule')
+ new_lines = self.manager._modify_rules(current_lines,
+ self.manager.ipv4['filter'])
+ self.assertEqual(current_lines, new_lines)
+
+ def test_iptables_preserve_order(self):
+ # Test both iptables_top_regex and iptables_bottom_regex
+ current_lines = list(self.sample_filter)
+ current_lines[12:12] = ['[0:0] -A FORWARD -j iptables-top-rule']
+ current_lines[27:27] = ['[0:0] -A FORWARD -j iptables-bottom-rule']
+ self.flags(iptables_top_regex='-j iptables-top-rule')
+ self.flags(iptables_bottom_regex='-j iptables-bottom-rule')
+ new_lines = self.manager._modify_rules(current_lines,
+ self.manager.ipv4['filter'])
+ self.assertEqual(current_lines, new_lines)
diff --git a/nova/tests/test_libvirt_blockinfo.py b/nova/tests/test_libvirt_blockinfo.py
index e8d548399..68f1fa394 100644
--- a/nova/tests/test_libvirt_blockinfo.py
+++ b/nova/tests/test_libvirt_blockinfo.py
@@ -421,7 +421,7 @@ class LibvirtBlockInfoTest(test.TestCase):
self.assertEqual(bus, 'usb')
image_meta = {'properties': {'disk_bus': 'xen'}}
- self.assertRaises(exception.NovaException,
+ self.assertRaises(exception.UnsupportedHardware,
blockinfo.get_disk_bus_for_device_type,
'kvm',
image_meta)
diff --git a/nova/tests/test_libvirt_vif.py b/nova/tests/test_libvirt_vif.py
index 916b961da..749fda33a 100644
--- a/nova/tests/test_libvirt_vif.py
+++ b/nova/tests/test_libvirt_vif.py
@@ -171,7 +171,7 @@ class LibvirtVifTestCase(test.TestCase):
self.stubs.Set(utils, 'execute', fake_execute)
- def _get_instance_xml(self, driver, net, mapping):
+ def _get_instance_xml(self, driver, net, mapping, image_meta=None):
conf = vconfig.LibvirtConfigGuest()
conf.virt_type = "qemu"
conf.name = "fake-name"
@@ -179,7 +179,7 @@ class LibvirtVifTestCase(test.TestCase):
conf.memory = 100 * 1024
conf.vcpus = 4
- nic = driver.get_config(self.instance, net, mapping)
+ nic = driver.get_config(self.instance, net, mapping, image_meta)
conf.add_device(nic)
return conf.to_xml()
@@ -269,6 +269,46 @@ class LibvirtVifTestCase(test.TestCase):
ret = node.findall("driver")
self.assertEqual(len(ret), 0)
+ def test_model_kvm_custom(self):
+ self.flags(libvirt_use_virtio_for_bridges=True,
+ libvirt_type='kvm')
+
+ def get_connection():
+ return fakelibvirt.Connection("qemu:///session",
+ False)
+ d = vif.LibvirtGenericVIFDriver(get_connection)
+ image_meta = {'properties': {'vif_model': 'e1000'}}
+ xml = self._get_instance_xml(d,
+ self.net_bridge,
+ self.mapping_bridge,
+ image_meta)
+
+ doc = etree.fromstring(xml)
+ ret = doc.findall('./devices/interface')
+ self.assertEqual(len(ret), 1)
+ node = ret[0]
+
+ model = node.find("model").get("type")
+ self.assertEqual(model, "e1000")
+ ret = node.findall("driver")
+ self.assertEqual(len(ret), 0)
+
+ def test_model_kvm_bogus(self):
+ self.flags(libvirt_use_virtio_for_bridges=True,
+ libvirt_type='kvm')
+
+ def get_connection():
+ return fakelibvirt.Connection("qemu:///session",
+ False)
+ d = vif.LibvirtGenericVIFDriver(get_connection)
+ image_meta = {'properties': {'vif_model': 'acme'}}
+ self.assertRaises(exception.UnsupportedHardware,
+ self._get_instance_xml,
+ d,
+ self.net_bridge,
+ self.mapping_bridge,
+ image_meta)
+
def test_model_qemu(self):
self.flags(libvirt_use_virtio_for_bridges=True,
libvirt_type='qemu')
diff --git a/nova/tests/test_libvirt_volume.py b/nova/tests/test_libvirt_volume.py
index b9f95735f..0098215b2 100644
--- a/nova/tests/test_libvirt_volume.py
+++ b/nova/tests/test_libvirt_volume.py
@@ -364,3 +364,59 @@ class LibvirtVolumeTestCase(test.TestCase):
('stat', export_mnt_base),
('mount', '-t', 'nfs', export_string, export_mnt_base)]
self.assertEqual(self.executes, expected_commands)
+
+ def aoe_connection(self, shelf, lun):
+ return {
+ 'driver_volume_type': 'aoe',
+ 'data': {
+ 'target_shelf': shelf,
+ 'target_lun': lun,
+ }
+ }
+
+ def test_libvirt_aoe_driver(self):
+ # NOTE(jbr_) exists is to make driver assume connecting worked
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+ libvirt_driver = volume.LibvirtAOEVolumeDriver(self.fake_conn)
+ shelf = '100'
+ lun = '1'
+ connection_info = self.aoe_connection(shelf, lun)
+ disk_info = {
+ "bus": "virtio",
+ "dev": "vde",
+ "type": "disk",
+ }
+ conf = libvirt_driver.connect_volume(connection_info, disk_info)
+ tree = conf.format_dom()
+ aoedevpath = '/dev/etherd/e%s.%s' % (shelf, lun)
+ self.assertEqual(tree.get('type'), 'block')
+ self.assertEqual(tree.find('./source').get('dev'), aoedevpath)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ def test_libvirt_glusterfs_driver(self):
+ mnt_base = '/mnt'
+ self.flags(glusterfs_mount_point_base=mnt_base)
+
+ libvirt_driver = volume.LibvirtGlusterfsVolumeDriver(self.fake_conn)
+ export_string = '192.168.1.1:/volume-00001'
+ name = 'volume-00001'
+ export_mnt_base = os.path.join(mnt_base,
+ libvirt_driver.get_hash_str(export_string))
+ file_path = os.path.join(export_mnt_base, name)
+
+ connection_info = {'data': {'export': export_string, 'name': name}}
+ disk_info = {
+ "bus": "virtio",
+ "dev": "vde",
+ "type": "disk",
+ }
+ conf = libvirt_driver.connect_volume(connection_info, disk_info)
+ tree = conf.format_dom()
+ self.assertEqual(tree.get('type'), 'file')
+ self.assertEqual(tree.find('./source').get('file'), file_path)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ expected_commands = [
+ ('stat', export_mnt_base),
+ ('mount', '-t', 'glusterfs', export_string, export_mnt_base)]
+ self.assertEqual(self.executes, expected_commands)
diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py
index 827bfb398..95399e33d 100644
--- a/nova/tests/test_metadata.py
+++ b/nova/tests/test_metadata.py
@@ -31,6 +31,7 @@ from nova.api.metadata import base
from nova.api.metadata import handler
from nova.api.metadata import password
from nova import block_device
+from nova.conductor import api as conductor_api
from nova import db
from nova.db.sqlalchemy import api
from nova import exception
@@ -118,6 +119,7 @@ class MetadataTestCase(test.TestCase):
def setUp(self):
super(MetadataTestCase, self).setUp()
self.instance = INSTANCES[0]
+ self.flags(use_local=True, group='conductor')
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs,
spectacular=True)
@@ -191,10 +193,11 @@ class MetadataTestCase(test.TestCase):
'swap': '/dev/sdc',
'ebs0': '/dev/sdh'}
- self.assertEqual(base._format_instance_mapping(ctxt, instance_ref0),
- block_device._DEFAULT_MAPPINGS)
- self.assertEqual(base._format_instance_mapping(ctxt, instance_ref1),
- expected)
+ capi = conductor_api.LocalAPI()
+ self.assertEqual(base._format_instance_mapping(capi, ctxt,
+ instance_ref0), block_device._DEFAULT_MAPPINGS)
+ self.assertEqual(base._format_instance_mapping(capi, ctxt,
+ instance_ref1), expected)
def test_pubkey(self):
md = fake_InstanceMetadata(self.stubs, copy.copy(self.instance))
@@ -247,6 +250,7 @@ class OpenStackMetadataTestCase(test.TestCase):
def setUp(self):
super(OpenStackMetadataTestCase, self).setUp()
self.instance = INSTANCES[0]
+ self.flags(use_local=True, group='conductor')
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs,
spectacular=True)
@@ -382,6 +386,7 @@ class MetadataHandlerTestCase(test.TestCase):
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs,
spectacular=True)
self.instance = INSTANCES[0]
+ self.flags(use_local=True, group='conductor')
self.mdinst = fake_InstanceMetadata(self.stubs, self.instance,
address=None, sgroups=None)
@@ -547,6 +552,7 @@ class MetadataPasswordTestCase(test.TestCase):
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs,
spectacular=True)
self.instance = copy.copy(INSTANCES[0])
+ self.flags(use_local=True, group='conductor')
self.mdinst = fake_InstanceMetadata(self.stubs, self.instance,
address=None, sgroups=None)
self.flags(use_local=True, group='conductor')
diff --git a/nova/tests/test_migration_utils.py b/nova/tests/test_migration_utils.py
new file mode 100644
index 000000000..45b6d86d4
--- /dev/null
+++ b/nova/tests/test_migration_utils.py
@@ -0,0 +1,126 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2013 Boris Pavlovic (boris@pavlovic.me).
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from migrate.changeset import UniqueConstraint
+from sqlalchemy import MetaData, Table, Column, Integer, BigInteger
+
+from nova.db.sqlalchemy import utils
+from nova import exception
+from nova.tests import test_migrations
+
+
+class TestMigrationUtils(test_migrations.BaseMigrationTestCase):
+ """Class for testing utils that are used in db migrations."""
+
+ def test_utils_drop_unique_constraint(self):
+ table_name = "__test_tmp_table__"
+ uc_name = 'uniq_foo'
+ values = [
+ {'id': 1, 'a': 3, 'foo': 10},
+ {'id': 2, 'a': 2, 'foo': 20},
+ {'id': 3, 'a': 1, 'foo': 30}
+ ]
+ for key, engine in self.engines.items():
+ meta = MetaData()
+ meta.bind = engine
+ test_table = Table(table_name, meta,
+ Column('id', Integer, primary_key=True,
+ nullable=False),
+ Column('a', Integer),
+ Column('foo', Integer),
+ UniqueConstraint('a', name='uniq_a'),
+ UniqueConstraint('foo', name=uc_name))
+ test_table.create()
+
+ engine.execute(test_table.insert(), values)
+ # NOTE(boris-42): This method is generic UC dropper.
+ utils.drop_unique_constraint(engine, table_name, uc_name, 'foo')
+
+ s = test_table.select().order_by(test_table.c.id)
+ rows = engine.execute(s).fetchall()
+
+ for i in xrange(0, len(values)):
+ v = values[i]
+ self.assertEqual((v['id'], v['a'], v['foo']), rows[i])
+
+ # NOTE(boris-42): Update data about Table from DB.
+ meta = MetaData()
+ meta.bind = engine
+ test_table = Table(table_name, meta, autoload=True)
+ constraints = filter(lambda c: c.name == uc_name,
+ test_table.constraints)
+ self.assertEqual(len(constraints), 0)
+ self.assertEqual(len(test_table.constraints), 1)
+
+ test_table.drop()
+
+ def test_util_drop_unique_constraint_with_not_supported_sqlite_type(self):
+ table_name = "__test_tmp_table__"
+ uc_name = 'uniq_foo'
+ values = [
+ {'id': 1, 'a': 3, 'foo': 10},
+ {'id': 2, 'a': 2, 'foo': 20},
+ {'id': 3, 'a': 1, 'foo': 30}
+ ]
+
+ for key, engine in self.engines.items():
+ meta = MetaData()
+ meta.bind = engine
+ test_table = Table(table_name, meta,
+ Column('id', Integer, primary_key=True,
+ nullable=False),
+ Column('a', Integer),
+ Column('foo', BigInteger, default=0),
+ UniqueConstraint('a', name='uniq_a'),
+ UniqueConstraint('foo', name=uc_name))
+ test_table.create()
+
+ engine.execute(test_table.insert(), values)
+ if key == "sqlite":
+ # NOTE(boris-42): Missing info about column `foo` that has
+ # unsupported type BigInteger.
+ self.assertRaises(exception.NovaException,
+ utils.drop_unique_constraint,
+ engine, table_name, uc_name, 'foo')
+
+ # NOTE(boris-42): Wrong type of foo instance. it should be
+ # instance of sqlalchemy.Column.
+ self.assertRaises(exception.NovaException,
+ utils.drop_unique_constraint,
+ engine, table_name, uc_name, 'foo',
+ foo=Integer())
+
+ foo = Column('foo', BigInteger, default=0)
+ utils.drop_unique_constraint(engine, table_name, uc_name, 'foo',
+ foo=foo)
+
+ s = test_table.select().order_by(test_table.c.id)
+ rows = engine.execute(s).fetchall()
+
+ for i in xrange(0, len(values)):
+ v = values[i]
+ self.assertEqual((v['id'], v['a'], v['foo']), rows[i])
+
+ # NOTE(boris-42): Update data about Table from DB.
+ meta = MetaData()
+ meta.bind = engine
+ test_table = Table(table_name, meta, autoload=True)
+ constraints = filter(lambda c: c.name == uc_name,
+ test_table.constraints)
+ self.assertEqual(len(constraints), 0)
+ self.assertEqual(len(test_table.constraints), 1)
+ test_table.drop()
diff --git a/nova/tests/test_migrations.py b/nova/tests/test_migrations.py
index 02d7462d2..bb1086a9d 100644
--- a/nova/tests/test_migrations.py
+++ b/nova/tests/test_migrations.py
@@ -43,6 +43,7 @@ postgres=# create database openstack_citest with owner openstack_citest;
import collections
import commands
import ConfigParser
+import datetime
import os
import urlparse
@@ -53,6 +54,7 @@ import nova.db.migration as migration
import nova.db.sqlalchemy.migrate_repo
from nova.db.sqlalchemy.migration import versioning_api as migration_api
from nova.openstack.common import log as logging
+from nova.openstack.common import timeutils
from nova import test
@@ -116,8 +118,8 @@ def get_table(engine, name):
return sqlalchemy.Table(name, metadata, autoload=True)
-class TestMigrations(test.TestCase):
- """Test sqlalchemy-migrate migrations."""
+class BaseMigrationTestCase(test.TestCase):
+ """Base class fort testing migrations and migration utils."""
DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
'test_migrations.conf')
@@ -130,18 +132,18 @@ class TestMigrations(test.TestCase):
os.path.abspath(os.path.dirname(MIGRATE_FILE)))
def setUp(self):
- super(TestMigrations, self).setUp()
+ super(BaseMigrationTestCase, self).setUp()
self.snake_walk = False
self.test_databases = {}
# Load test databases from the config file. Only do this
# once. No need to re-run this on each test...
- LOG.debug('config_path is %s' % TestMigrations.CONFIG_FILE_PATH)
- if os.path.exists(TestMigrations.CONFIG_FILE_PATH):
+ LOG.debug('config_path is %s' % BaseMigrationTestCase.CONFIG_FILE_PATH)
+ if os.path.exists(BaseMigrationTestCase.CONFIG_FILE_PATH):
cp = ConfigParser.RawConfigParser()
try:
- cp.read(TestMigrations.CONFIG_FILE_PATH)
+ cp.read(BaseMigrationTestCase.CONFIG_FILE_PATH)
defaults = cp.defaults()
for key, value in defaults.items():
self.test_databases[key] = value
@@ -165,7 +167,7 @@ class TestMigrations(test.TestCase):
# and recreate it, which ensures that we have no side-effects
# from the tests
self._reset_databases()
- super(TestMigrations, self).tearDown()
+ super(BaseMigrationTestCase, self).tearDown()
def _reset_databases(self):
def execute_cmd(cmd=None):
@@ -232,6 +234,10 @@ class TestMigrations(test.TestCase):
os.unsetenv('PGPASSWORD')
os.unsetenv('PGUSER')
+
+class TestMigrations(BaseMigrationTestCase):
+ """Test sqlalchemy-migrate migrations."""
+
def test_walk_versions(self):
"""
Walks all version scripts for each tested database, ensuring
@@ -381,18 +387,70 @@ class TestMigrations(test.TestCase):
(version, engine))
raise
+ def _prerun_134(self, engine):
+ now = timeutils.utcnow()
+ data = [{
+ 'id': 1,
+ 'uuid': '1d739808-d7ec-4944-b252-f8363e119755',
+ 'mac': '00:00:00:00:00:01',
+ 'start_period': now,
+ 'last_refreshed': now + datetime.timedelta(seconds=10),
+ 'bw_in': 100000,
+ 'bw_out': 200000,
+ }, {
+ 'id': 2,
+ 'uuid': '1d739808-d7ec-4944-b252-f8363e119756',
+ 'mac': '2a:f2:48:31:c1:60',
+ 'start_period': now,
+ 'last_refreshed': now + datetime.timedelta(seconds=20),
+ 'bw_in': 1000000000,
+ 'bw_out': 200000000,
+ }, {
+ 'id': 3,
+ # This is intended to be the same as above.
+ 'uuid': '1d739808-d7ec-4944-b252-f8363e119756',
+ 'mac': '00:00:00:00:00:02',
+ 'start_period': now,
+ 'last_refreshed': now + datetime.timedelta(seconds=30),
+ 'bw_in': 0,
+ 'bw_out': 0,
+ }]
+
+ bw_usage_cache = get_table(engine, 'bw_usage_cache')
+ engine.execute(bw_usage_cache.insert(), data)
+ return data
+
+ def _check_134(self, engine, data):
+ bw_usage_cache = get_table(engine, 'bw_usage_cache')
+
+ # Checks if both columns have been successfuly created.
+ self.assertIn('last_ctr_in', bw_usage_cache.c)
+ self.assertIn('last_ctr_out', bw_usage_cache.c)
+
+ # Checks if all rows have been inserted.
+ bw_items = bw_usage_cache.select().execute().fetchall()
+ self.assertEqual(len(bw_items), 3)
+
+ bw = bw_usage_cache.select(
+ bw_usage_cache.c.id == 1).execute().first()
+
+ # New columns have 'NULL' as default value.
+ self.assertEqual(bw['last_ctr_in'], None)
+ self.assertEqual(bw['last_ctr_out'], None)
+
+ self.assertEqual(data[0]['mac'], bw['mac'])
+
# migration 146, availability zone transition
def _prerun_146(self, engine):
data = {
'availability_zone': 'custom_az',
- 'aggregate_name': 1,
'name': 'name',
}
aggregates = get_table(engine, 'aggregates')
result = aggregates.insert().values(data).execute()
# NOTE(sdague) it's important you don't insert keys by value in
- # postgresql, because it's autoincrement counter won't get updated
+ # postgresql, because its autoincrement counter won't get updated
data['id'] = result.inserted_primary_key[0]
return data
@@ -573,3 +631,49 @@ class TestMigrations(test.TestCase):
self.assertIn(prop_name, inst_sys_meta)
self.assertEqual(str(inst_sys_meta[prop_name]),
str(inst_type[prop]))
+
+ # migration 154, add shadow tables for deleted data
+ # There are 53 shadow tables but we only test one
+ # There are additional tests in test_db_api.py
+ def _prerun_154(self, engine):
+ meta = sqlalchemy.schema.MetaData()
+ meta.reflect(engine)
+ table_names = meta.tables.keys()
+ for table_name in table_names:
+ self.assertFalse(table_name.startswith("_shadow"))
+
+ def _check_154(self, engine, data):
+ meta = sqlalchemy.schema.MetaData()
+ meta.reflect(engine)
+ table_names = set(meta.tables.keys())
+ for table_name in table_names:
+ print table_name
+ if table_name.startswith("shadow_"):
+ shadow_name = table_name
+ base_name = table_name.replace("shadow_", "")
+ self.assertIn(base_name, table_names)
+ else:
+ base_name = table_name
+ shadow_name = "shadow_" + table_name
+ self.assertIn(shadow_name, table_names)
+ shadow_table = get_table(engine, shadow_name)
+ base_table = get_table(engine, base_name)
+ base_columns = []
+ shadow_columns = []
+ for column in base_table.columns:
+ base_columns.append(column)
+ for column in shadow_table.columns:
+ shadow_columns.append(column)
+ for ii, base_column in enumerate(base_columns):
+ shadow_column = shadow_columns[ii]
+ self.assertEqual(base_column.name, shadow_column.name)
+ # NullType needs a special case. We end up with NullType on sqlite
+ # where bigint is not defined.
+ if isinstance(base_column.type, sqlalchemy.types.NullType):
+ self.assertTrue(isinstance(shadow_column.type,
+ sqlalchemy.types.NullType))
+ else:
+ # Identical types do not test equal because sqlalchemy does not
+ # override __eq__, but if we stringify them then they do.
+ self.assertEqual(str(base_column.type),
+ str(shadow_column.type))
diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py
index aaa826a70..0aa2a310c 100644
--- a/nova/tests/test_utils.py
+++ b/nova/tests/test_utils.py
@@ -25,6 +25,7 @@ import StringIO
import tempfile
import mox
+import netaddr
import nova
from nova import exception
@@ -481,6 +482,29 @@ class GenericUtilsTestCase(test.TestCase):
self.assertFalse(utils.is_valid_ipv4('::1'))
self.assertFalse(utils.is_valid_ipv4('bacon'))
+ def test_is_valid_ipv6(self):
+ self.assertTrue(utils.is_valid_ipv6("::1"))
+ self.assertTrue(utils.is_valid_ipv6(
+ "abcd:ef01:2345:6789:abcd:ef01:192.168.254.254"))
+ self.assertTrue(utils.is_valid_ipv6(
+ "0000:0000:0000:0000:0000:0000:0000:0001"))
+ self.assertFalse(utils.is_valid_ipv6("foo"))
+ self.assertFalse(utils.is_valid_ipv6("127.0.0.1"))
+
+ def test_get_shortened_ipv6(self):
+ self.assertEquals("abcd:ef01:2345:6789:abcd:ef01:c0a8:fefe",
+ utils.get_shortened_ipv6(
+ "abcd:ef01:2345:6789:abcd:ef01:192.168.254.254"))
+ self.assertEquals("::1", utils.get_shortened_ipv6(
+ "0000:0000:0000:0000:0000:0000:0000:0001"))
+ self.assertEquals("caca::caca:0:babe:201:102",
+ utils.get_shortened_ipv6(
+ "caca:0000:0000:caca:0000:babe:0201:0102"))
+ self.assertRaises(netaddr.AddrFormatError, utils.get_shortened_ipv6,
+ "127.0.0.1")
+ self.assertRaises(netaddr.AddrFormatError, utils.get_shortened_ipv6,
+ "failure")
+
class MonkeyPatchTestCase(test.TestCase):
"""Unit test for utils.monkey_patch()."""
@@ -964,3 +988,18 @@ class GetCallArgsTestCase(test.TestCase):
self.assertEqual(3, callargs['red'])
self.assertTrue('blue' in callargs)
self.assertEqual(None, callargs['blue'])
+
+
+class StringLengthTestCase(test.TestCase):
+ def test_check_string_length(self):
+ self.assertIsNone(utils.check_string_length(
+ 'test', 'name', max_length=255))
+ self.assertRaises(exception.InvalidInput,
+ utils.check_string_length,
+ 11, 'name', max_length=255)
+ self.assertRaises(exception.InvalidInput,
+ utils.check_string_length,
+ '', 'name', min_length=1)
+ self.assertRaises(exception.InvalidInput,
+ utils.check_string_length,
+ 'a' * 256, 'name', max_length=255)
diff --git a/nova/tests/test_versions.py b/nova/tests/test_versions.py
index 5568ff0de..780e5ae12 100644
--- a/nova/tests/test_versions.py
+++ b/nova/tests/test_versions.py
@@ -24,38 +24,14 @@ from nova import version
class VersionTestCase(test.TestCase):
"""Test cases for Versions code."""
- def setUp(self):
- """setup test with unchanging values."""
- super(VersionTestCase, self).setUp()
- self.version = version
- self.version.FINAL = False
- self.version.NOVA_VERSION = ['2012', '10']
- self.version.YEAR, self.version.COUNT = self.version.NOVA_VERSION
- self.version.version_info = {'branch_nick': u'LOCALBRANCH',
- 'revision_id': 'LOCALREVISION',
- 'revno': 0}
- self.version.NOVA_PACKAGE = "g9ec3421"
-
- def test_version_string_is_good(self):
- # Ensure version string works.
- self.assertEqual("2012.10-dev", self.version.version_string())
-
- def test_canonical_version_string_is_good(self):
- # Ensure canonical version works.
- self.assertEqual("2012.10", self.version.canonical_version_string())
-
- def test_final_version_strings_are_identical(self):
- # Ensure final version strings match only at release.
- self.assertNotEqual(self.version.canonical_version_string(),
- self.version.version_string())
- self.version.FINAL = True
- self.assertEqual(self.version.canonical_version_string(),
- self.version.version_string())
def test_version_string_with_package_is_good(self):
- # Ensure uninstalled code get version string.
- self.assertEqual("2012.10-g9ec3421",
- self.version.version_string_with_package())
+ """Ensure uninstalled code get version string."""
+
+ self.stubs.Set(version.version_info, 'version', '5.5.5.5')
+ self.stubs.Set(version, 'NOVA_PACKAGE', 'g9ec3421')
+ self.assertEqual("5.5.5.5-g9ec3421",
+ version.version_string_with_package())
def test_release_file(self):
version.loaded = False
diff --git a/nova/tests/test_vmwareapi.py b/nova/tests/test_vmwareapi.py
index 22544fd2d..e20a5a7b7 100644
--- a/nova/tests/test_vmwareapi.py
+++ b/nova/tests/test_vmwareapi.py
@@ -28,6 +28,7 @@ from nova import exception
from nova import test
import nova.tests.image.fake
from nova.tests import matchers
+from nova.tests import utils
from nova.tests.vmwareapi import db_fakes
from nova.tests.vmwareapi import stubs
from nova.virt.vmwareapi import driver
@@ -54,24 +55,7 @@ class VMwareAPIVMTestCase(test.TestCase):
self.conn = driver.VMwareESXDriver(None, False)
# NOTE(vish): none of the network plugging code is actually
# being tested
- self.network_info = [({'bridge': 'fa0',
- 'id': 0,
- 'vlan': None,
- 'bridge_interface': None,
- 'injected': True},
- {'broadcast': '192.168.0.255',
- 'dns': ['192.168.0.1'],
- 'gateway': '192.168.0.1',
- 'gateway_v6': 'dead:beef::1',
- 'ip6s': [{'enabled': '1',
- 'ip': 'dead:beef::dcad:beff:feef:0',
- 'netmask': '64'}],
- 'ips': [{'enabled': '1',
- 'ip': '192.168.0.100',
- 'netmask': '255.255.255.0'}],
- 'label': 'fake',
- 'mac': 'DE:AD:BE:EF:00:00',
- 'rxtx_cap': 3})]
+ self.network_info = utils.get_test_network_info(legacy_model=False)
self.image = {
'id': 'c1c8ce3d-c2e0-4247-890c-ccf5cc1c004c',
diff --git a/nova/tests/test_vmwareapi_vmops.py b/nova/tests/test_vmwareapi_vmops.py
new file mode 100644
index 000000000..ad83cd21d
--- /dev/null
+++ b/nova/tests/test_vmwareapi_vmops.py
@@ -0,0 +1,62 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2013 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.network import model as network_model
+from nova import test
+from nova.virt.vmwareapi import vmops
+
+
+class VMwareVMOpsTestCase(test.TestCase):
+ def setUp(self):
+ super(VMwareVMOpsTestCase, self).setUp()
+ subnet_4 = network_model.Subnet(cidr='192.168.0.1/24',
+ dns=[network_model.IP('192.168.0.1')],
+ gateway=
+ network_model.IP('192.168.0.1'),
+ ips=[
+ network_model.IP('192.168.0.100')],
+ routes=None)
+ subnet_6 = network_model.Subnet(cidr='dead:beef::1/64',
+ dns=None,
+ gateway=
+ network_model.IP('dead:beef::1'),
+ ips=[network_model.IP(
+ 'dead:beef::dcad:beff:feef:0')],
+ routes=None)
+ network = network_model.Network(id=0,
+ bridge='fa0',
+ label='fake',
+ subnets=[subnet_4, subnet_6],
+ vlan=None,
+ bridge_interface=None,
+ injected=True)
+ self.network_info = network_model.NetworkInfo([
+ network_model.VIF(id=None,
+ address='DE:AD:BE:EF:00:00',
+ network=network,
+ type=None,
+ devname=None,
+ ovs_interfaceid=None,
+ rxtx_cap=3)
+ ])
+
+ def test_get_machine_id_str(self):
+ result = vmops.VMwareVMOps._get_machine_id_str(self.network_info)
+ print result
+ self.assertEqual(result,
+ 'DE:AD:BE:EF:00:00;192.168.0.100;255.255.255.0;'
+ '192.168.0.1;192.168.0.255;192.168.0.1#')
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index cc71ba31e..2a484d832 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -2677,9 +2677,10 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
'destination_sr_ref': None,
'migrate_send_data': None
}}
- self.conn.check_can_live_migrate_source(self.context,
- {'host': 'host'},
- dest_check_data)
+ result = self.conn.check_can_live_migrate_source(self.context,
+ {'host': 'host'},
+ dest_check_data)
+ self.assertEqual(dest_check_data, result)
def test_check_can_live_migrate_source_with_block_migrate_fails(self):
stubs.stubout_session(self.stubs,
diff --git a/nova/tests/utils.py b/nova/tests/utils.py
index a5d4b4712..56be3ab71 100644
--- a/nova/tests/utils.py
+++ b/nova/tests/utils.py
@@ -89,7 +89,7 @@ def get_test_instance(context=None, instance_type=None):
return instance_ref
-def get_test_network_info(count=1):
+def get_test_network_info(count=1, legacy_model=True):
ipv6 = CONF.use_ipv6
fake = 'fake'
fake_ip = '0.0.0.0/0'
@@ -98,26 +98,69 @@ def get_test_network_info(count=1):
fake_netmask = '255.255.255.255'
fake_vlan = 100
fake_bridge_interface = 'eth0'
- network = {'bridge': fake,
- 'cidr': fake_ip,
- 'cidr_v6': fake_ip,
- 'vlan': fake_vlan,
- 'bridge_interface': fake_bridge_interface,
- 'injected': False}
- mapping = {'mac': fake,
- 'vif_type': network_model.VIF_TYPE_BRIDGE,
- 'vif_uuid': 'vif-xxx-yyy-zzz',
- 'dhcp_server': fake,
- 'dns': ['fake1', 'fake2'],
- 'gateway': fake,
- 'gateway_v6': fake,
- 'ips': [{'ip': fake_ip, 'netmask': fake_netmask},
- {'ip': fake_ip, 'netmask': fake_netmask}]}
- if ipv6:
- mapping['ip6s'] = [{'ip': fake_ip, 'netmask': fake_netmask},
- {'ip': fake_ip_2},
- {'ip': fake_ip_3}]
- return [(network, mapping) for x in xrange(0, count)]
+
+ def legacy():
+ network = {'bridge': fake,
+ 'cidr': fake_ip,
+ 'cidr_v6': fake_ip,
+ 'vlan': fake_vlan,
+ 'bridge_interface': fake_bridge_interface,
+ 'injected': False}
+ mapping = {'mac': fake,
+ 'vif_type': network_model.VIF_TYPE_BRIDGE,
+ 'vif_uuid': 'vif-xxx-yyy-zzz',
+ 'dhcp_server': fake,
+ 'dns': ['fake1', 'fake2'],
+ 'gateway': fake,
+ 'gateway_v6': fake,
+ 'ips': [{'ip': fake_ip, 'netmask': fake_netmask},
+ {'ip': fake_ip, 'netmask': fake_netmask}]}
+ if ipv6:
+ mapping['ip6s'] = [{'ip': fake_ip, 'netmask': fake_netmask},
+ {'ip': fake_ip_2},
+ {'ip': fake_ip_3}]
+ return network, mapping
+
+ def current():
+ fake_ip = '0.0.0.0'
+ subnet_4 = network_model.Subnet(cidr=fake_ip,
+ dns=[network_model.IP(fake_ip),
+ network_model.IP(fake_ip)],
+ gateway=network_model.IP(fake_ip),
+ ips=[network_model.IP(fake_ip),
+ network_model.IP(fake_ip)],
+ routes=None,
+ dhcp_server=network_model.IP(fake_ip))
+ subnet_6 = network_model.Subnet(cidr=fake_ip,
+ gateway=network_model.IP(fake_ip),
+ ips=[network_model.IP(fake_ip),
+ network_model.IP(fake_ip),
+ network_model.IP(fake_ip)],
+ routes=None,
+ version=6)
+ subnets = [subnet_4]
+ if ipv6:
+ subnets.append(subnet_6)
+ network = network_model.Network(id=None,
+ bridge=fake,
+ label=None,
+ subnets=subnets,
+ vlan=fake_vlan,
+ bridge_interface=fake_bridge_interface,
+ injected=False)
+ vif = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address=fake,
+ network=network,
+ type=network_model.VIF_TYPE_BRIDGE,
+ devname=None,
+ ovs_interfaceid=None)
+
+ return vif
+
+ if legacy_model:
+ return [legacy() for x in xrange(0, count)]
+ else:
+ return network_model.NetworkInfo([current() for x in xrange(0, count)])
def is_osx():
diff --git a/nova/tests/virt/xenapi/test_volumeops.py b/nova/tests/virt/xenapi/test_volumeops.py
index 844ae8459..3497babf2 100644
--- a/nova/tests/virt/xenapi/test_volumeops.py
+++ b/nova/tests/virt/xenapi/test_volumeops.py
@@ -14,6 +14,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import collections
+
from nova import test
from nova.tests.xenapi import stubs
from nova.virt.xenapi import volumeops
@@ -125,31 +127,33 @@ class VolumeAttachTestCase(test.TestCase):
vm_ref = 'vm_ref'
dev_number = 1
- called = {'xenapi': False}
+ called = collections.defaultdict(bool)
- def fake_call_xenapi(self, *args, **kwargs):
- # Only used for VBD.plug in this code path.
- called['xenapi'] = True
- raise Exception()
+ def fake_call_xenapi(self, method, *args, **kwargs):
+ called[method] = True
self.stubs.Set(ops._session, 'call_xenapi', fake_call_xenapi)
self.mox.StubOutWithMock(volumeops.volume_utils, 'parse_sr_info')
- self.mox.StubOutWithMock(
- volumeops.volume_utils, 'introduce_sr_unless_present')
- self.mox.StubOutWithMock(volumeops.volume_utils, 'introduce_vdi')
- self.mox.StubOutWithMock(volumeops.vm_utils, 'create_vbd')
-
volumeops.volume_utils.parse_sr_info(
connection_data, sr_label).AndReturn(
tuple([sr_uuid, sr_label, sr_params]))
- volumeops.volume_utils.introduce_sr_unless_present(
+ self.mox.StubOutWithMock(
+ volumeops.volume_utils, 'find_sr_by_uuid')
+ volumeops.volume_utils.find_sr_by_uuid(session, sr_uuid).AndReturn(
+ None)
+
+ self.mox.StubOutWithMock(
+ volumeops.volume_utils, 'introduce_sr')
+ volumeops.volume_utils.introduce_sr(
session, sr_uuid, sr_label, sr_params).AndReturn(sr_ref)
+ self.mox.StubOutWithMock(volumeops.volume_utils, 'introduce_vdi')
volumeops.volume_utils.introduce_vdi(
- session, sr_ref, vdi_uuid, None).AndReturn(vdi_ref)
+ session, sr_ref, vdi_uuid=vdi_uuid).AndReturn(vdi_ref)
+ self.mox.StubOutWithMock(volumeops.vm_utils, 'create_vbd')
volumeops.vm_utils.create_vbd(
session, vm_ref, vdi_ref, dev_number,
bootable=False, osvol=True).AndReturn(vbd_ref)
@@ -157,6 +161,6 @@ class VolumeAttachTestCase(test.TestCase):
self.mox.ReplayAll()
ops._connect_volume(connection_data, dev_number, instance_name,
- vm_ref, hotplug=False)
+ vm_ref, hotplug=False)
- self.assertEquals(False, called['xenapi'])
+ self.assertEquals(False, called['VBD.plug'])
diff --git a/nova/utils.py b/nova/utils.py
index 7ad810504..699544daa 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -892,6 +892,15 @@ def is_valid_ipv4(address):
return False
+def is_valid_ipv6(address):
+ return netaddr.valid_ipv6(address)
+
+
+def get_shortened_ipv6(address):
+ addr = netaddr.IPAddress(address, version=6)
+ return str(addr.ipv6())
+
+
def is_valid_cidr(address):
"""Check if the provided ipv4 or ipv6 address is a valid
CIDR address or not"""
@@ -1341,3 +1350,25 @@ class ExceptionHelper(object):
except rpc_common.ClientException, e:
raise (e._exc_info[1], None, e._exc_info[2])
return wrapper
+
+
+def check_string_length(value, name, min_length=0, max_length=None):
+ """Check the length of specified string
+ :param value: the value of the string
+ :param name: the name of the string
+ :param min_length: the min_length of the string
+ :param max_length: the max_length of the string
+ """
+ if not isinstance(value, basestring):
+ msg = _("%s is not a string or unicode") % name
+ raise exception.InvalidInput(message=msg)
+
+ if len(value) < min_length:
+ msg = _("%(name)s has less than %(min_length)s "
+ "characters.") % locals()
+ raise exception.InvalidInput(message=msg)
+
+ if max_length and len(value) > max_length:
+ msg = _("%(name)s has more than %(max_length)s "
+ "characters.") % locals()
+ raise exception.InvalidInput(message=msg)
diff --git a/nova/version.py b/nova/version.py
index 82f3bb970..722a20e16 100644
--- a/nova/version.py
+++ b/nova/version.py
@@ -14,14 +14,15 @@
# License for the specific language governing permissions and limitations
# under the License.
+from nova.openstack.common import version as common_version
+
NOVA_VENDOR = "OpenStack Foundation"
NOVA_PRODUCT = "OpenStack Nova"
NOVA_PACKAGE = None # OS distro package version suffix
-NOVA_VERSION = ['2013', '1', None]
-YEAR, COUNT, REVISION = NOVA_VERSION
-FINAL = False # This becomes true at Release Candidate time
loaded = False
+version_info = common_version.VersionInfo('nova')
+version_string = version_info.version_string
def _load_config():
@@ -81,19 +82,8 @@ def package_string():
return NOVA_PACKAGE
-def canonical_version_string():
- return '.'.join(filter(None, NOVA_VERSION))
-
-
-def version_string():
- if FINAL:
- return canonical_version_string()
- else:
- return '%s-dev' % (canonical_version_string(),)
-
-
def version_string_with_package():
if package_string() is None:
- return canonical_version_string()
+ return version_info.version_string()
else:
- return "%s-%s" % (canonical_version_string(), package_string())
+ return "%s-%s" % (version_info.version_string(), package_string())
diff --git a/nova/virt/baremetal/pxe.py b/nova/virt/baremetal/pxe.py
index 9a8a4fed0..14bdab2b0 100644
--- a/nova/virt/baremetal/pxe.py
+++ b/nova/virt/baremetal/pxe.py
@@ -309,6 +309,9 @@ class PXE(base.NodeDriver):
if injected_files is None:
injected_files = []
+ else:
+ # NOTE(deva): copy so we dont modify the original
+ injected_files = list(injected_files)
net_config = build_network_config(network_info)
diff --git a/nova/virt/baremetal/virtual_power_driver.py b/nova/virt/baremetal/virtual_power_driver.py
index 22dd3a954..f77579dba 100644
--- a/nova/virt/baremetal/virtual_power_driver.py
+++ b/nova/virt/baremetal/virtual_power_driver.py
@@ -212,8 +212,12 @@ class VirtualPowerManager(base.PowerManager):
cmd = '%s %s' % (self._vp_cmd.base_cmd, cmd)
- stdout, stderr = utils.ssh_execute(self._connection, cmd,
+ try:
+ stdout, stderr = utils.ssh_execute(self._connection, cmd,
check_exit_code=check_exit_code)
- result = stdout.strip().splitlines()
- LOG.debug('Result for run_command: %s' % result)
+ result = stdout.strip().splitlines()
+ LOG.debug('Result for run_command: %s' % result)
+ except exception.ProcessExecutionError:
+ result = []
+ LOG.exception("Error running command: %s" % cmd)
return result
diff --git a/nova/virt/hyperv/basevolumeutils.py b/nova/virt/hyperv/basevolumeutils.py
index 34b15ea53..5b515a0f1 100644
--- a/nova/virt/hyperv/basevolumeutils.py
+++ b/nova/virt/hyperv/basevolumeutils.py
@@ -43,6 +43,7 @@ class BaseVolumeUtils(object):
def __init__(self):
if sys.platform == 'win32':
self._conn_wmi = wmi.WMI(moniker='//./root/wmi')
+ self._conn_cimv2 = wmi.WMI(moniker='//./root/cimv2')
@abc.abstractmethod
def login_storage_target(self, target_lun, target_iqn, target_portal):
@@ -56,10 +57,10 @@ class BaseVolumeUtils(object):
def execute_log_out(self, session_id):
pass
- def get_iscsi_initiator(self, cim_conn):
+ def get_iscsi_initiator(self):
"""Get iscsi initiator name for this machine."""
- computer_system = cim_conn.Win32_ComputerSystem()[0]
+ computer_system = self._conn_cimv2.Win32_ComputerSystem()[0]
hostname = computer_system.name
keypath = ("SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\"
"iSCSI\\Discovery")
@@ -72,7 +73,7 @@ class BaseVolumeUtils(object):
except Exception:
LOG.info(_("The ISCSI initiator name can't be found. "
"Choosing the default one"))
- computer_system = cim_conn.Win32_ComputerSystem()[0]
+ computer_system = self._conn_cimv2.Win32_ComputerSystem()[0]
initiator_name = "iqn.1991-05.com.microsoft:" + hostname.lower()
return {
'ip': CONF.my_ip,
diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py
index 4af3b8b05..aac47deef 100755
--- a/nova/virt/hyperv/driver.py
+++ b/nova/virt/hyperv/driver.py
@@ -23,6 +23,7 @@ from nova.openstack.common import log as logging
from nova.virt import driver
from nova.virt.hyperv import hostops
from nova.virt.hyperv import livemigrationops
+from nova.virt.hyperv import migrationops
from nova.virt.hyperv import snapshotops
from nova.virt.hyperv import vmops
from nova.virt.hyperv import volumeops
@@ -36,10 +37,10 @@ class HyperVDriver(driver.ComputeDriver):
self._hostops = hostops.HostOps()
self._volumeops = volumeops.VolumeOps()
- self._vmops = vmops.VMOps(self._volumeops)
+ self._vmops = vmops.VMOps()
self._snapshotops = snapshotops.SnapshotOps()
- self._livemigrationops = livemigrationops.LiveMigrationOps(
- self._volumeops)
+ self._livemigrationops = livemigrationops.LiveMigrationOps()
+ self._migrationops = migrationops.MigrationOps()
def init_host(self, host):
pass
@@ -146,7 +147,7 @@ class HyperVDriver(driver.ComputeDriver):
LOG.debug(_("plug_vifs called"), instance=instance)
def unplug_vifs(self, instance, network_info):
- LOG.debug(_("plug_vifs called"), instance=instance)
+ LOG.debug(_("unplug_vifs called"), instance=instance)
def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
LOG.debug(_("ensure_filtering_rules_for_instance called"),
@@ -155,17 +156,33 @@ class HyperVDriver(driver.ComputeDriver):
def unfilter_instance(self, instance, network_info):
LOG.debug(_("unfilter_instance called"), instance=instance)
+ def migrate_disk_and_power_off(self, context, instance, dest,
+ instance_type, network_info,
+ block_device_info=None):
+ return self._migrationops.migrate_disk_and_power_off(context,
+ instance, dest,
+ instance_type,
+ network_info,
+ block_device_info)
+
def confirm_migration(self, migration, instance, network_info):
- LOG.debug(_("confirm_migration called"), instance=instance)
+ self._migrationops.confirm_migration(migration, instance, network_info)
def finish_revert_migration(self, instance, network_info,
block_device_info=None):
- LOG.debug(_("finish_revert_migration called"), instance=instance)
+ self._migrationops.finish_revert_migration(instance, network_info,
+ block_device_info)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None):
- LOG.debug(_("finish_migration called"), instance=instance)
+ self._migrationops.finish_migration(context, migration, instance,
+ disk_info, network_info,
+ image_meta, resize_instance,
+ block_device_info)
+
+ def get_host_ip_addr(self):
+ return self._hostops.get_host_ip_addr()
def get_console_output(self, instance):
LOG.debug(_("get_console_output called"), instance=instance)
diff --git a/nova/virt/hyperv/hostops.py b/nova/virt/hyperv/hostops.py
index 5a22b60de..b3b38aab9 100644
--- a/nova/virt/hyperv/hostops.py
+++ b/nova/virt/hyperv/hostops.py
@@ -21,12 +21,15 @@ Management class for host operations.
import os
import platform
+from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.virt.hyperv import constants
from nova.virt.hyperv import hostutils
from nova.virt.hyperv import pathutils
+CONF = cfg.CONF
+CONF.import_opt('my_ip', 'nova.netconf')
LOG = logging.getLogger(__name__)
@@ -73,7 +76,7 @@ class HostOps(object):
return (total_mem_mb, free_mem_mb, total_mem_mb - free_mem_mb)
def _get_local_hdd_info_gb(self):
- (drive, _) = os.path.splitdrive(self._pathutils.get_instances_path())
+ drive = os.path.splitdrive(self._pathutils.get_instances_dir())[0]
(size, free_space) = self._hostutils.get_volume_info(drive)
total_gb = size / (1024 ** 3)
@@ -152,7 +155,7 @@ class HostOps(object):
def get_host_stats(self, refresh=False):
"""Return the current state of the host. If 'refresh' is
True, run the update first."""
- LOG.info(_("get_host_stats called"))
+ LOG.debug(_("get_host_stats called"))
if refresh or not self._stats:
self._update_stats()
@@ -161,3 +164,11 @@ class HostOps(object):
def host_power_action(self, host, action):
"""Reboots, shuts down or powers up the host."""
pass
+
+ def get_host_ip_addr(self):
+ host_ip = CONF.my_ip
+ if not host_ip:
+ # Return the first available address
+ host_ip = self._hostutils.get_local_ips()[0]
+ LOG.debug(_("Host IP address is: %s"), host_ip)
+ return host_ip
diff --git a/nova/virt/hyperv/hostutils.py b/nova/virt/hyperv/hostutils.py
index 71f3bc5b2..d28ce75a5 100644
--- a/nova/virt/hyperv/hostutils.py
+++ b/nova/virt/hyperv/hostutils.py
@@ -16,6 +16,7 @@
# under the License.
import ctypes
+import socket
import sys
if sys.platform == 'win32':
@@ -72,3 +73,9 @@ class HostUtils(object):
def get_windows_version(self):
return self._conn_cimv2.Win32_OperatingSystem()[0].Version
+
+ def get_local_ips(self):
+ addr_info = socket.getaddrinfo(socket.gethostname(), None, 0, 0, 0)
+ # Returns IPv4 and IPv6 addresses, ordered by protocol family
+ addr_info.sort()
+ return [a[4][0] for a in addr_info]
diff --git a/nova/virt/hyperv/imagecache.py b/nova/virt/hyperv/imagecache.py
new file mode 100644
index 000000000..93ea32b25
--- /dev/null
+++ b/nova/virt/hyperv/imagecache.py
@@ -0,0 +1,60 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Image caching and management.
+"""
+import os
+
+from nova.openstack.common import lockutils
+from nova.openstack.common import log as logging
+from nova.virt.hyperv import pathutils
+from nova.virt.hyperv import vhdutils
+from nova.virt.hyperv import vmutils
+from nova.virt import images
+
+LOG = logging.getLogger(__name__)
+
+
+class ImageCache(object):
+ def __init__(self):
+ self._pathutils = pathutils.PathUtils()
+ self._vhdutils = vhdutils.VHDUtils()
+
+ def _validate_vhd_image(self, vhd_path):
+ try:
+ self._vhdutils.get_vhd_info(vhd_path)
+ except Exception as ex:
+ LOG.exception(ex)
+ raise vmutils.HyperVException(_('The image is not a valid VHD: %s')
+ % vhd_path)
+
+ def get_cached_image(self, context, instance):
+ image_id = instance['image_ref']
+
+ base_vhd_dir = self._pathutils.get_base_vhd_dir()
+ vhd_path = os.path.join(base_vhd_dir, image_id + ".vhd")
+
+ @lockutils.synchronized(vhd_path, 'nova-')
+ def fetch_image_if_not_existing():
+ if not self._pathutils.exists(vhd_path):
+ images.fetch(context, image_id, vhd_path,
+ instance['user_id'],
+ instance['project_id'])
+ self._validate_vhd_image(vhd_path)
+
+ fetch_image_if_not_existing()
+ return vhd_path
diff --git a/nova/virt/hyperv/livemigrationops.py b/nova/virt/hyperv/livemigrationops.py
index 8ee3005f1..108413b0b 100644
--- a/nova/virt/hyperv/livemigrationops.py
+++ b/nova/virt/hyperv/livemigrationops.py
@@ -18,15 +18,14 @@
"""
Management class for live migration VM operations.
"""
-import os
-
from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
+from nova.virt.hyperv import imagecache
from nova.virt.hyperv import livemigrationutils
from nova.virt.hyperv import pathutils
from nova.virt.hyperv import vmutils
-from nova.virt import images
+from nova.virt.hyperv import volumeops
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@@ -34,12 +33,13 @@ CONF.import_opt('use_cow_images', 'nova.virt.driver')
class LiveMigrationOps(object):
- def __init__(self, volumeops):
+ def __init__(self):
self._pathutils = pathutils.PathUtils()
self._vmutils = vmutils.VMUtils()
self._livemigrutils = livemigrationutils.LiveMigrationUtils()
- self._volumeops = volumeops
+ self._volumeops = volumeops.VolumeOps()
+ self._imagecache = imagecache.ImageCache()
def live_migration(self, context, instance_ref, dest, post_method,
recover_method, block_migration=False,
@@ -65,15 +65,10 @@ class LiveMigrationOps(object):
self._livemigrutils.check_live_migration_config()
if CONF.use_cow_images:
- ebs_root = self._volumeops.volume_in_mapping(
- self._volumeops.get_default_root_device(),
+ ebs_root = self._volumeops.ebs_root_in_block_devices(
block_device_info)
if not ebs_root:
- base_vhd_path = self._pathutils.get_base_vhd_path(
- instance["image_ref"])
- if not os.path.exists(base_vhd_path):
- images.fetch(context, instance["image_ref"], base_vhd_path,
- instance["user_id"], instance["project_id"])
+ self._imagecache.get_cached_image(context, instance)
def post_live_migration_at_destination(self, ctxt, instance_ref,
network_info, block_migration):
diff --git a/nova/virt/hyperv/livemigrationutils.py b/nova/virt/hyperv/livemigrationutils.py
index 6af4f0fa5..d039a5016 100644
--- a/nova/virt/hyperv/livemigrationutils.py
+++ b/nova/virt/hyperv/livemigrationutils.py
@@ -41,7 +41,7 @@ class LiveMigrationUtils(object):
% host)
elif ex.com_error.hresult == -2147023174:
msg = (_('Target live migration host "%s" is unreachable')
- % host)
+ % host)
else:
msg = _('Live migration failed: %s') % ex.message
raise vmutils.HyperVException(msg)
diff --git a/nova/virt/hyperv/migrationops.py b/nova/virt/hyperv/migrationops.py
new file mode 100644
index 000000000..8d5b5e90c
--- /dev/null
+++ b/nova/virt/hyperv/migrationops.py
@@ -0,0 +1,233 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Management class for migration / resize operations.
+"""
+import os
+
+from nova.openstack.common import excutils
+from nova.openstack.common import log as logging
+from nova.virt.hyperv import hostutils
+from nova.virt.hyperv import imagecache
+from nova.virt.hyperv import pathutils
+from nova.virt.hyperv import vhdutils
+from nova.virt.hyperv import vmops
+from nova.virt.hyperv import vmutils
+from nova.virt.hyperv import volumeops
+
+LOG = logging.getLogger(__name__)
+
+
+class MigrationOps(object):
+ def __init__(self):
+ self._hostutils = hostutils.HostUtils()
+ self._vmutils = vmutils.VMUtils()
+ self._vhdutils = vhdutils.VHDUtils()
+ self._pathutils = pathutils.PathUtils()
+ self._volumeops = volumeops.VolumeOps()
+ self._vmops = vmops.VMOps()
+ self._imagecache = imagecache.ImageCache()
+
+ def _migrate_disk_files(self, instance_name, disk_files, dest):
+ same_host = False
+ if dest in self._hostutils.get_local_ips():
+ same_host = True
+ LOG.debug(_("Migration target is the source host"))
+ else:
+ LOG.debug(_("Migration target host: %s") % dest)
+
+ instance_path = self._pathutils.get_instance_dir(instance_name)
+ revert_path = self._pathutils.get_instance_migr_revert_dir(
+ instance_name, remove_dir=True)
+ dest_path = None
+
+ try:
+ if same_host:
+ # Since source and target are the same, we copy the files to
+ # a temporary location before moving them into place
+ dest_path = '%s_tmp' % instance_path
+ if self._pathutils.exists(dest_path):
+ self._pathutils.rmtree(dest_path)
+ self._pathutils.makedirs(dest_path)
+ else:
+ dest_path = self._pathutils.get_instance_dir(
+ instance_name, dest, remove_dir=True)
+ for disk_file in disk_files:
+ # Skip the config drive as the instance is already configured
+ if os.path.basename(disk_file).lower() != 'configdrive.vhd':
+ LOG.debug(_('Copying disk "%(disk_file)s" to '
+ '"%(dest_path)s"') % locals())
+ self._pathutils.copy(disk_file, dest_path)
+
+ self._pathutils.rename(instance_path, revert_path)
+
+ if same_host:
+ self._pathutils.rename(dest_path, instance_path)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ self._cleanup_failed_disk_migration(instance_path, revert_path,
+ dest_path)
+
+ def _cleanup_failed_disk_migration(self, instance_path,
+ revert_path, dest_path):
+ try:
+ if dest_path and self._pathutils.exists(dest_path):
+ self._pathutils.rmtree(dest_path)
+ if self._pathutils.exists(revert_path):
+ self._pathutils.rename(revert_path, instance_path)
+ except Exception as ex:
+ # Log and ignore this exception
+ LOG.exception(ex)
+ LOG.error(_("Cannot cleanup migration files"))
+
+ def migrate_disk_and_power_off(self, context, instance, dest,
+ instance_type, network_info,
+ block_device_info=None):
+ LOG.debug(_("migrate_disk_and_power_off called"), instance=instance)
+
+ self._vmops.power_off(instance)
+
+ instance_name = instance["name"]
+
+ (disk_files,
+ volume_drives) = self._vmutils.get_vm_storage_paths(instance_name)
+
+ if disk_files:
+ self._migrate_disk_files(instance_name, disk_files, dest)
+
+ self._vmops.destroy(instance, destroy_disks=False)
+
+ # disk_info is not used
+ return ""
+
+ def confirm_migration(self, migration, instance, network_info):
+ LOG.debug(_("confirm_migration called"), instance=instance)
+
+ self._pathutils.get_instance_migr_revert_dir(instance['name'],
+ remove_dir=True)
+
+ def _revert_migration_files(self, instance_name):
+ instance_path = self._pathutils.get_instance_dir(
+ instance_name, create_dir=False, remove_dir=True)
+
+ revert_path = self._pathutils.get_instance_migr_revert_dir(
+ instance_name)
+ self._pathutils.rename(revert_path, instance_path)
+
+ def finish_revert_migration(self, instance, network_info,
+ block_device_info=None):
+ LOG.debug(_("finish_revert_migration called"), instance=instance)
+
+ instance_name = instance['name']
+ self._revert_migration_files(instance_name)
+
+ if self._volumeops.ebs_root_in_block_devices(block_device_info):
+ boot_vhd_path = None
+ else:
+ boot_vhd_path = self._pathutils.get_vhd_path(instance_name)
+ self._vmops.create_instance(instance, network_info, block_device_info,
+ boot_vhd_path)
+
+ self._vmops.power_on(instance)
+
+ def _merge_base_vhd(self, diff_vhd_path, base_vhd_path):
+
+ base_vhd_copy_path = os.path.join(os.path.dirname(diff_vhd_path),
+ os.path.basename(base_vhd_path))
+ try:
+ LOG.debug(_('Copying base disk %(base_vhd_path)s to '
+ '%(base_vhd_copy_path)s'), locals())
+ self._pathutils.copyfile(base_vhd_path, base_vhd_copy_path)
+
+ LOG.debug(_("Reconnecting copied base VHD "
+ "%(base_vhd_copy_path)s and diff "
+ "VHD %(diff_vhd_path)s"), locals())
+ self._vhdutils.reconnect_parent_vhd(diff_vhd_path,
+ base_vhd_copy_path)
+
+ LOG.debug(_("Merging base disk %(base_vhd_copy_path)s and "
+ "diff disk %(diff_vhd_path)s"), locals())
+ self._vhdutils.merge_vhd(diff_vhd_path, base_vhd_copy_path)
+
+ # Replace the differential VHD with the merged one
+ self._pathutils.rename(base_vhd_copy_path, diff_vhd_path)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ if self._pathutils.exists(base_vhd_copy_path):
+ self._pathutils.remove(base_vhd_copy_path)
+
+ def _resize_vhd(self, vhd_path, new_size):
+ LOG.debug(_("Getting info for disk: %s"), vhd_path)
+ base_disk_path = self._vhdutils.get_vhd_parent_path(vhd_path)
+ if base_disk_path:
+ # A differential VHD cannot be resized
+ self._merge_base_vhd(vhd_path, base_disk_path)
+ LOG.debug(_("Resizing disk \"%(vhd_path)s\" to new max "
+ "size %(new_size)s"), locals())
+ self._vhdutils.resize_vhd(vhd_path, new_size)
+
+ def _check_base_disk(self, context, instance, diff_vhd_path,
+ src_base_disk_path):
+ base_disk_file_name = os.path.basename(src_base_disk_path)
+ if os.path.splitext(base_disk_file_name)[0] != instance["image_ref"]:
+ raise vmutils.HyperVException(_("Unexpected base VHD path"))
+
+ base_vhd_path = self._imagecache.get_cached_image(context, instance)
+
+ # If the location of the base host differs between source
+ # and target hosts we need to reconnect the base disk
+ if src_base_disk_path.lower() != base_vhd_path.lower():
+ LOG.debug(_("Reconnecting copied base VHD "
+ "%(base_vhd_path)s and diff "
+ "VHD %(diff_vhd_path)s"), locals())
+ self._vhdutils.reconnect_parent_vhd(diff_vhd_path,
+ base_vhd_path)
+
+ def finish_migration(self, context, migration, instance, disk_info,
+ network_info, image_meta, resize_instance=False,
+ block_device_info=None):
+ LOG.debug(_("finish_migration called"), instance=instance)
+
+ instance_name = instance['name']
+
+ if self._volumeops.ebs_root_in_block_devices(block_device_info):
+ boot_vhd_path = None
+ else:
+ boot_vhd_path = self._pathutils.get_vhd_path(instance_name)
+ if not self._pathutils.exists(boot_vhd_path):
+ raise vmutils.HyperVException(_("Cannot find boot VHD "
+ "file: %s") % boot_vhd_path)
+
+ vhd_info = self._vhdutils.get_vhd_info(boot_vhd_path)
+ src_base_disk_path = vhd_info.get("ParentPath")
+ if src_base_disk_path:
+ self._check_base_disk(context, instance, boot_vhd_path,
+ src_base_disk_path)
+
+ if resize_instance:
+ curr_size = vhd_info['MaxInternalSize']
+ new_size = instance['root_gb'] * 1024 * 1024 * 1024
+ if new_size < curr_size:
+ raise vmutils.HyperVException(_("Cannot resize a VHD to a "
+ "smaller size"))
+ elif new_size > curr_size:
+ self._resize_vhd(boot_vhd_path, new_size)
+
+ self._vmops.create_instance(instance, network_info, block_device_info,
+ boot_vhd_path)
+ self._vmops.power_on(instance)
diff --git a/nova/virt/hyperv/pathutils.py b/nova/virt/hyperv/pathutils.py
index 7bc2e7ac2..05cfffaac 100644
--- a/nova/virt/hyperv/pathutils.py
+++ b/nova/virt/hyperv/pathutils.py
@@ -23,7 +23,18 @@ from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
+hyperv_opts = [
+ cfg.StrOpt('instances_path_share',
+ default="",
+ help='The name of a Windows share name mapped to the '
+ '"instances_path" dir and used by the resize feature '
+ 'to copy files to the target host. If left blank, an '
+ 'administrative share will be used, looking for the same '
+ '"instances_path" used locally'),
+]
+
CONF = cfg.CONF
+CONF.register_opts(hyperv_opts, 'HYPERV')
CONF.import_opt('instances_path', 'nova.compute.manager')
@@ -33,35 +44,79 @@ class PathUtils(object):
import __builtin__
return __builtin__.open(path, mode)
- def get_instances_path(self):
- return os.path.normpath(CONF.instances_path)
+ def exists(self, path):
+ return os.path.exists(path)
+
+ def makedirs(self, path):
+ os.makedirs(path)
+
+ def remove(self, path):
+ os.remove(path)
+
+ def rename(self, src, dest):
+ os.rename(src, dest)
+
+ def copyfile(self, src, dest):
+ shutil.copyfile(src, dest)
+
+ def copy(self, src, dest):
+ shutil.copy(src, dest)
+
+ def rmtree(self, path):
+ shutil.rmtree(path)
+
+ def get_instances_dir(self, remote_server=None):
+ local_instance_path = os.path.normpath(CONF.instances_path)
- def get_instance_path(self, instance_name):
- instance_path = os.path.join(self.get_instances_path(), instance_name)
- if not os.path.exists(instance_path):
- LOG.debug(_('Creating folder %s '), instance_path)
- os.makedirs(instance_path)
- return instance_path
+ if remote_server:
+ if CONF.HYPERV.instances_path_share:
+ path = CONF.HYPERV.instances_path_share
+ else:
+ # Use an administrative share
+ path = local_instance_path.replace(':', '$')
+ return '\\\\%(remote_server)s\\%(path)s' % locals()
+ else:
+ return local_instance_path
+
+ def _check_create_dir(self, path):
+ if not self.exists(path):
+ LOG.debug(_('Creating directory: %s') % path)
+ self.makedirs(path)
+
+ def _check_remove_dir(self, path):
+ if self.exists(path):
+ LOG.debug(_('Removing directory: %s') % path)
+ self.rmtree(path)
+
+ def _get_instances_sub_dir(self, dir_name, remote_server=None,
+ create_dir=True, remove_dir=False):
+ instances_path = self.get_instances_dir(remote_server)
+ path = os.path.join(instances_path, dir_name)
+ if remove_dir:
+ self._check_remove_dir(path)
+ if create_dir:
+ self._check_create_dir(path)
+ return path
+
+ def get_instance_migr_revert_dir(self, instance_name, create_dir=False,
+ remove_dir=False):
+ dir_name = '%s_revert' % instance_name
+ return self._get_instances_sub_dir(dir_name, None, create_dir,
+ remove_dir)
+
+ def get_instance_dir(self, instance_name, remote_server=None,
+ create_dir=True, remove_dir=False):
+ return self._get_instances_sub_dir(instance_name, remote_server,
+ create_dir, remove_dir)
def get_vhd_path(self, instance_name):
- instance_path = self.get_instance_path(instance_name)
- return os.path.join(instance_path, instance_name + ".vhd")
-
- def get_base_vhd_path(self, image_name):
- base_dir = os.path.join(self.get_instances_path(), '_base')
- if not os.path.exists(base_dir):
- os.makedirs(base_dir)
- return os.path.join(base_dir, image_name + ".vhd")
-
- def make_export_path(self, instance_name):
- export_folder = os.path.join(self.get_instances_path(), "export",
- instance_name)
- if os.path.isdir(export_folder):
- LOG.debug(_('Removing existing folder %s '), export_folder)
- shutil.rmtree(export_folder)
- LOG.debug(_('Creating folder %s '), export_folder)
- os.makedirs(export_folder)
- return export_folder
-
- def vhd_exists(self, path):
- return os.path.exists(path)
+ instance_path = self.get_instance_dir(instance_name)
+ return os.path.join(instance_path, 'root.vhd')
+
+ def get_base_vhd_dir(self):
+ return self._get_instances_sub_dir('_base')
+
+ def get_export_dir(self, instance_name):
+ dir_name = os.path.join('export', instance_name)
+ return self._get_instances_sub_dir(dir_name, create_dir=True,
+ remove_dir=True)
diff --git a/nova/virt/hyperv/snapshotops.py b/nova/virt/hyperv/snapshotops.py
index c43f59b70..ab7c96943 100644
--- a/nova/virt/hyperv/snapshotops.py
+++ b/nova/virt/hyperv/snapshotops.py
@@ -19,7 +19,6 @@
Management class for VM snapshot operations.
"""
import os
-import shutil
from nova.compute import task_states
from nova.image import glance
@@ -57,7 +56,7 @@ class SnapshotOps(object):
snapshot_path = self._vmutils.take_vm_snapshot(instance_name)
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
- export_folder = None
+ export_dir = None
try:
src_vhd_path = self._pathutils.get_vhd_path(instance_name)
@@ -66,23 +65,24 @@ class SnapshotOps(object):
src_base_disk_path = self._vhdutils.get_vhd_parent_path(
src_vhd_path)
- export_folder = self._pathutils.make_export_path(instance_name)
+ export_dir = self._pathutils.get_export_dir(instance_name)
- dest_vhd_path = os.path.join(export_folder, os.path.basename(
+ dest_vhd_path = os.path.join(export_dir, os.path.basename(
src_vhd_path))
LOG.debug(_('Copying VHD %(src_vhd_path)s to %(dest_vhd_path)s'),
locals())
- shutil.copyfile(src_vhd_path, dest_vhd_path)
+ self._pathutils.copyfile(src_vhd_path, dest_vhd_path)
image_vhd_path = None
if not src_base_disk_path:
image_vhd_path = dest_vhd_path
else:
basename = os.path.basename(src_base_disk_path)
- dest_base_disk_path = os.path.join(export_folder, basename)
+ dest_base_disk_path = os.path.join(export_dir, basename)
LOG.debug(_('Copying base disk %(src_vhd_path)s to '
'%(dest_base_disk_path)s'), locals())
- shutil.copyfile(src_base_disk_path, dest_base_disk_path)
+ self._pathutils.copyfile(src_base_disk_path,
+ dest_base_disk_path)
LOG.debug(_("Reconnecting copied base VHD "
"%(dest_base_disk_path)s and diff "
@@ -111,6 +111,6 @@ class SnapshotOps(object):
LOG.exception(ex)
LOG.warning(_('Failed to remove snapshot for VM %s')
% instance_name)
- if export_folder:
- LOG.debug(_('Removing folder %s '), export_folder)
- shutil.rmtree(export_folder)
+ if export_dir:
+ LOG.debug(_('Removing directory: %s'), export_dir)
+ self._pathutils.rmtree(export_dir)
diff --git a/nova/virt/hyperv/vhdutils.py b/nova/virt/hyperv/vhdutils.py
index 21c4b4a6d..1e529807d 100644
--- a/nova/virt/hyperv/vhdutils.py
+++ b/nova/virt/hyperv/vhdutils.py
@@ -55,7 +55,17 @@ class VHDUtils(object):
DestinationPath=dest_vhd_path)
self._vmutils.check_ret_val(ret_val, job_path)
+ def resize_vhd(self, vhd_path, new_max_size):
+ image_man_svc = self._conn.Msvm_ImageManagementService()[0]
+
+ (job_path, ret_val) = image_man_svc.ExpandVirtualHardDisk(
+ Path=vhd_path, MaxInternalSize=new_max_size)
+ self._vmutils.check_ret_val(ret_val, job_path)
+
def get_vhd_parent_path(self, vhd_path):
+ return self.get_vhd_info(vhd_path).get("ParentPath")
+
+ def get_vhd_info(self, vhd_path):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
(vhd_info,
@@ -63,10 +73,19 @@ class VHDUtils(object):
ret_val) = image_man_svc.GetVirtualHardDiskInfo(vhd_path)
self._vmutils.check_ret_val(ret_val, job_path)
- base_disk_path = None
+ vhd_info_dict = {}
+
et = ElementTree.fromstring(vhd_info)
for item in et.findall("PROPERTY"):
- if item.attrib["NAME"] == "ParentPath":
- base_disk_path = item.find("VALUE").text
- break
- return base_disk_path
+ name = item.attrib["NAME"]
+ value_text = item.find("VALUE").text
+ if name == "ParentPath":
+ vhd_info_dict[name] = value_text
+ elif name in ["FileSize", "MaxInternalSize"]:
+ vhd_info_dict[name] = long(value_text)
+ elif name in ["InSavedState", "InUse"]:
+ vhd_info_dict[name] = bool(value_text)
+ elif name == "Type":
+ vhd_info_dict[name] = int(value_text)
+
+ return vhd_info_dict
diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py
index 58c1fc66a..c8acc0fa1 100644
--- a/nova/virt/hyperv/vmops.py
+++ b/nova/virt/hyperv/vmops.py
@@ -24,16 +24,18 @@ import os
from nova.api.metadata import base as instance_metadata
from nova import exception
from nova.openstack.common import cfg
+from nova.openstack.common import excutils
from nova.openstack.common import importutils
-from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt import configdrive
from nova.virt.hyperv import constants
+from nova.virt.hyperv import hostutils
+from nova.virt.hyperv import imagecache
from nova.virt.hyperv import pathutils
from nova.virt.hyperv import vhdutils
from nova.virt.hyperv import vmutils
-from nova.virt import images
+from nova.virt.hyperv import volumeops
LOG = logging.getLogger(__name__)
@@ -69,11 +71,13 @@ class VMOps(object):
'nova.virt.hyperv.vif.HyperVNovaNetworkVIFDriver',
}
- def __init__(self, volumeops):
+ def __init__(self):
+ self._hostutils = hostutils.HostUtils()
self._vmutils = vmutils.VMUtils()
self._vhdutils = vhdutils.VHDUtils()
self._pathutils = pathutils.PathUtils()
- self._volumeops = volumeops
+ self._volumeops = volumeops.VolumeOps()
+ self._imagecache = imagecache.ImageCache()
self._vif_driver = None
self._load_vif_driver_class()
@@ -106,71 +110,77 @@ class VMOps(object):
'num_cpu': info['NumberOfProcessors'],
'cpu_time': info['UpTime']}
+ def _create_boot_vhd(self, context, instance):
+ base_vhd_path = self._imagecache.get_cached_image(context, instance)
+ boot_vhd_path = self._pathutils.get_vhd_path(instance['name'])
+
+ if CONF.use_cow_images:
+ LOG.debug(_("Creating differencing VHD. Parent: "
+ "%(base_vhd_path)s, Target: %(boot_vhd_path)s")
+ % locals())
+ self._vhdutils.create_differencing_vhd(boot_vhd_path,
+ base_vhd_path)
+ else:
+ self._pathutils.copyfile(base_vhd_path, boot_vhd_path)
+ return boot_vhd_path
+
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info=None):
"""Create a new VM and start it."""
+ LOG.info(_("Spawning new instance"), instance=instance)
instance_name = instance['name']
if self._vmutils.vm_exists(instance_name):
raise exception.InstanceExists(name=instance_name)
- ebs_root = self._volumeops.volume_in_mapping(
- self._volumeops.get_default_root_device(),
- block_device_info)
-
- #If is not a boot from volume spawn
- if not (ebs_root):
- #Fetch the file, assume it is a VHD file.
- vhdfile = self._pathutils.get_vhd_path(instance_name)
- try:
- self._cache_image(fn=self._fetch_image,
- context=context,
- target=vhdfile,
- fname=instance['image_ref'],
- image_id=instance['image_ref'],
- user=instance['user_id'],
- project=instance['project_id'],
- cow=CONF.use_cow_images)
- except Exception as exn:
- LOG.exception(_('cache image failed: %s'), exn)
- raise
+ if self._volumeops.ebs_root_in_block_devices(block_device_info):
+ boot_vhd_path = None
+ else:
+ boot_vhd_path = self._create_boot_vhd(context, instance)
try:
- self._vmutils.create_vm(instance_name,
- instance['memory_mb'],
- instance['vcpus'],
- CONF.limit_cpu_features)
-
- if not ebs_root:
- self._vmutils.attach_ide_drive(instance_name,
- vhdfile,
- 0,
- 0,
- constants.IDE_DISK)
- else:
- self._volumeops.attach_boot_volume(block_device_info,
- instance_name)
-
- self._vmutils.create_scsi_controller(instance_name)
-
- for vif in network_info:
- LOG.debug(_('Creating nic for instance: %s'), instance_name)
- self._vmutils.create_nic(instance_name,
- vif['id'],
- vif['address'])
- self._vif_driver.plug(instance, vif)
+ self.create_instance(instance, network_info, block_device_info,
+ boot_vhd_path)
if configdrive.required_by(instance):
self._create_config_drive(instance, injected_files,
admin_password)
- self._set_vm_state(instance_name,
- constants.HYPERV_VM_STATE_ENABLED)
+ self.power_on(instance)
except Exception as ex:
LOG.exception(ex)
self.destroy(instance)
raise vmutils.HyperVException(_('Spawn instance failed'))
+ def create_instance(self, instance, network_info,
+ block_device_info, boot_vhd_path):
+ instance_name = instance['name']
+
+ self._vmutils.create_vm(instance_name,
+ instance['memory_mb'],
+ instance['vcpus'],
+ CONF.limit_cpu_features)
+
+ if boot_vhd_path:
+ self._vmutils.attach_ide_drive(instance_name,
+ boot_vhd_path,
+ 0,
+ 0,
+ constants.IDE_DISK)
+
+ self._vmutils.create_scsi_controller(instance_name)
+
+ self._volumeops.attach_volumes(block_device_info,
+ instance_name,
+ boot_vhd_path is None)
+
+ for vif in network_info:
+ LOG.debug(_('Creating nic for instance: %s'), instance_name)
+ self._vmutils.create_nic(instance_name,
+ vif['id'],
+ vif['address'])
+ self._vif_driver.plug(instance, vif)
+
def _create_config_drive(self, instance, injected_files, admin_password):
if CONF.config_drive_format != 'iso9660':
vmutils.HyperVException(_('Invalid config_drive_format "%s"') %
@@ -186,7 +196,7 @@ class VMOps(object):
content=injected_files,
extra_md=extra_md)
- instance_path = self._pathutils.get_instance_path(
+ instance_path = self._pathutils.get_instance_dir(
instance['name'])
configdrive_path_iso = os.path.join(instance_path, 'configdrive.iso')
LOG.info(_('Creating config drive at %(path)s'),
@@ -196,9 +206,9 @@ class VMOps(object):
try:
cdb.make_drive(configdrive_path_iso)
except exception.ProcessExecutionError, e:
- LOG.error(_('Creating config drive failed with error: %s'),
- e, instance=instance)
- raise
+ with excutils.save_and_reraise_exception():
+ LOG.error(_('Creating config drive failed with error: %s'),
+ e, instance=instance)
if not CONF.config_drive_cdrom:
drive_type = constants.IDE_DISK
@@ -213,7 +223,7 @@ class VMOps(object):
configdrive_path_iso,
configdrive_path,
attempts=1)
- os.remove(configdrive_path_iso)
+ self._pathutils.remove(configdrive_path_iso)
else:
drive_type = constants.IDE_DVD
configdrive_path = configdrive_path_iso
@@ -221,19 +231,35 @@ class VMOps(object):
self._vmutils.attach_ide_drive(instance['name'], configdrive_path,
1, 0, drive_type)
+ def _disconnect_volumes(self, volume_drives):
+ for volume_drive in volume_drives:
+ self._volumeops.disconnect_volume(volume_drive)
+
+ def _delete_disk_files(self, instance_name):
+ self._pathutils.get_instance_dir(instance_name,
+ create_dir=False,
+ remove_dir=True)
+
def destroy(self, instance, network_info=None, cleanup=True,
destroy_disks=True):
instance_name = instance['name']
- LOG.debug(_("Got request to destroy instance: %s"), instance_name)
+ LOG.info(_("Got request to destroy instance: %s"), instance_name)
try:
if self._vmutils.vm_exists(instance_name):
- volumes_drives_list = self._vmutils.destroy_vm(instance_name,
- destroy_disks)
- #Disconnect volumes
- for volume_drive in volumes_drives_list:
- self._volumeops.disconnect_volume(volume_drive)
+
+ #Stop the VM first.
+ self.power_off(instance)
+
+ storage = self._vmutils.get_vm_storage_paths(instance_name)
+ (disk_files, volume_drives) = storage
+
+ self._vmutils.destroy_vm(instance_name)
+ self._disconnect_volumes(volume_drives)
else:
LOG.debug(_("Instance not found: %s"), instance_name)
+
+ if destroy_disks:
+ self._delete_disk_files(instance_name)
except Exception as ex:
LOG.exception(ex)
raise vmutils.HyperVException(_('Failed to destroy instance: %s') %
@@ -292,45 +318,3 @@ class VMOps(object):
msg = _("Failed to change vm state of %(vm_name)s"
" to %(req_state)s") % locals()
raise vmutils.HyperVException(msg)
-
- def _fetch_image(self, target, context, image_id, user, project,
- *args, **kwargs):
- images.fetch(context, image_id, target, user, project)
-
- def _cache_image(self, fn, target, fname, cow=False, size=None,
- *args, **kwargs):
- """Wrapper for a method that creates and caches an image.
-
- This wrapper will save the image into a common store and create a
- copy for use by the hypervisor.
-
- The underlying method should specify a kwarg of target representing
- where the image will be saved.
-
- fname is used as the filename of the base image. The filename needs
- to be unique to a given image.
-
- If cow is True, it will make a CoW image instead of a copy.
- """
- @lockutils.synchronized(fname, 'nova-')
- def call_if_not_exists(path, fn, *args, **kwargs):
- if not os.path.exists(path):
- fn(target=path, *args, **kwargs)
-
- if not self._pathutils.vhd_exists(target):
- LOG.debug(_("Use CoW image: %s"), cow)
- if cow:
- parent_path = self._pathutils.get_base_vhd_path(fname)
- call_if_not_exists(parent_path, fn, *args, **kwargs)
-
- LOG.debug(_("Creating differencing VHD. Parent: "
- "%(parent_path)s, Target: %(target)s") % locals())
- try:
- self._vhdutils.create_differencing_vhd(target, parent_path)
- except Exception as ex:
- LOG.exception(ex)
- raise vmutils.HyperVException(
- _('Failed to create a differencing disk from '
- '%(parent_path)s to %(target)s') % locals())
- else:
- call_if_not_exists(target, fn, *args, **kwargs)
diff --git a/nova/virt/hyperv/vmutils.py b/nova/virt/hyperv/vmutils.py
index 876153902..d80144b65 100644
--- a/nova/virt/hyperv/vmutils.py
+++ b/nova/virt/hyperv/vmutils.py
@@ -135,6 +135,12 @@ class VMUtils(object):
self._modify_virt_resource(procsetting, vm.path_())
+ def update_vm(self, vm_name, memory_mb, vcpus_num, limit_cpu_features):
+ vm = self._lookup_vm_check(vm_name)
+ vmsetting = self._get_vm_setting_data(vm)
+ self._set_vm_memory(vm, vmsetting, memory_mb)
+ self._set_vm_vcpus(vm, vmsetting, vcpus_num, limit_cpu_features)
+
def create_vm(self, vm_name, memory_mb, vcpus_num, limit_cpu_features):
"""Creates a VM."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
@@ -318,14 +324,9 @@ class VMUtils(object):
LOG.debug(_("Successfully changed vm state of %(vm_name)s"
" to %(req_state)s") % locals())
- def destroy_vm(self, vm_name, destroy_disks=True):
- """Destroy the VM. Also destroy the associated VHD disk files."""
-
+ def get_vm_storage_paths(self, vm_name):
vm = self._lookup_vm_check(vm_name)
- #Stop the VM first.
- self.set_vm_state(vm_name, constants.HYPERV_VM_STATE_DISABLED)
-
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
vmsettings = vm.associators(
wmi_result_class='Msvm_VirtualSystemSettingData')
@@ -338,35 +339,25 @@ class VMUtils(object):
if r.ResourceSubType ==
'Microsoft Physical Disk Drive']
- #Collect volumes information before destroying the VM.
- volumes_drives_list = []
+ volume_drives = []
for volume_resource in volume_resources:
drive_path = volume_resource.HostResource[0]
- #Appending the Msvm_Disk path
- volumes_drives_list.append(drive_path)
+ volume_drives.append(drive_path)
- #Collect disk file information before destroying the VM.
disk_files = []
for disk_resource in disk_resources:
disk_files.extend([c for c in disk_resource.Connection])
+ return (disk_files, volume_drives)
+
+ def destroy_vm(self, vm_name):
+ vm = self._lookup_vm_check(vm_name)
+
+ vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
#Remove the VM. Does not destroy disks.
(job_path, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_())
self.check_ret_val(ret_val, job_path)
- if destroy_disks:
- #Delete associated vhd disk files.
- for disk in disk_files:
- LOG.debug(_("Deleting disk file: %(disk)s") % locals())
- self._delete_file(disk)
-
- return volumes_drives_list
-
- def _delete_file(self, path):
- f = self._conn_cimv2.query("Select * from CIM_DataFile where "
- "Name = '%s'" % path.replace("'", "''"))[0]
- f.Delete()
-
def check_ret_val(self, ret_val, job_path, success_values=[0]):
if ret_val == constants.WMI_JOB_STATUS_STARTED:
self._wait_for_job(job_path)
diff --git a/nova/virt/hyperv/volumeops.py b/nova/virt/hyperv/volumeops.py
index a7e56b739..c665cbeb7 100644
--- a/nova/virt/hyperv/volumeops.py
+++ b/nova/virt/hyperv/volumeops.py
@@ -68,42 +68,25 @@ class VolumeOps(object):
else:
return volumeutils.VolumeUtils()
- def attach_boot_volume(self, block_device_info, vm_name):
- """Attach the boot volume to the IDE controller."""
-
- LOG.debug(_("block device info: %s"), block_device_info)
- ebs_root = driver.block_device_info_get_mapping(
- block_device_info)[0]
-
- connection_info = ebs_root['connection_info']
- data = connection_info['data']
- target_lun = data['target_lun']
- target_iqn = data['target_iqn']
- target_portal = data['target_portal']
- self._volutils.login_storage_target(target_lun, target_iqn,
- target_portal)
- try:
- #Getting the mounted disk
- mounted_disk_path = self._get_mounted_disk_from_lun(target_iqn,
- target_lun)
- #Find the IDE controller for the vm.
- ctrller_path = self._vmutils.get_vm_ide_controller(vm_name, 0)
- #Attaching to the same slot as the VHD disk file
- self._vmutils.attach_volume_to_controller(vm_name,
- ctrller_path, 0,
- mounted_disk_path)
- except Exception as exn:
- LOG.exception(_('Attach boot from volume failed: %s'), exn)
- self._volutils.logout_storage_target(target_iqn)
- raise vmutils.HyperVException(
- _('Unable to attach boot volume to instance %s') % vm_name)
-
- def volume_in_mapping(self, mount_device, block_device_info):
- return self._volutils.volume_in_mapping(mount_device,
+ def ebs_root_in_block_devices(self, block_device_info):
+ return self._volutils.volume_in_mapping(self._default_root_device,
block_device_info)
- def attach_volume(self, connection_info, instance_name):
- """Attach a volume to the SCSI controller."""
+ def attach_volumes(self, block_device_info, instance_name, ebs_root):
+ mapping = driver.block_device_info_get_mapping(block_device_info)
+
+ if ebs_root:
+ self.attach_volume(mapping[0]['connection_info'],
+ instance_name, True)
+ mapping = mapping[1:]
+ for vol in mapping:
+ self.attach_volume(vol['connection_info'], instance_name)
+
+ def attach_volume(self, connection_info, instance_name, ebs_root=False):
+ """
+ Attach a volume to the SCSI controller or to the IDE controller if
+ ebs_root is True
+ """
LOG.debug(_("Attach_volume: %(connection_info)s to %(instance_name)s")
% locals())
data = connection_info['data']
@@ -116,10 +99,19 @@ class VolumeOps(object):
#Getting the mounted disk
mounted_disk_path = self._get_mounted_disk_from_lun(target_iqn,
target_lun)
- #Find the SCSI controller for the vm
- ctrller_path = self._vmutils.get_vm_iscsi_controller(instance_name)
- slot = self._get_free_controller_slot(ctrller_path)
+ if ebs_root:
+ #Find the IDE controller for the vm.
+ ctrller_path = self._vmutils.get_vm_ide_controller(
+ instance_name, 0)
+ #Attaching to the first slot
+ slot = 0
+ else:
+ #Find the SCSI controller for the vm
+ ctrller_path = self._vmutils.get_vm_iscsi_controller(
+ instance_name)
+ slot = self._get_free_controller_slot(ctrller_path)
+
self._vmutils.attach_volume_to_controller(instance_name,
ctrller_path,
slot,
@@ -134,6 +126,11 @@ class VolumeOps(object):
#Slots starts from 0, so the lenght of the disks gives us the free slot
return self._vmutils.get_attached_disks_count(scsi_controller_path)
+ def detach_volumes(self, block_device_info, instance_name):
+ mapping = driver.block_device_info_get_mapping(block_device_info)
+ for vol in mapping:
+ self.detach_volume(vol['connection_info'], instance_name)
+
def detach_volume(self, connection_info, instance_name):
"""Dettach a volume to the SCSI controller."""
LOG.debug(_("Detach_volume: %(connection_info)s "
@@ -192,6 +189,3 @@ class VolumeOps(object):
physical_drive_path)
#Logging out the target
self._volutils.execute_log_out(session_id)
-
- def get_default_root_device(self):
- return self._default_root_device
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 018badecf..a5c960486 100644..100755
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -184,10 +184,10 @@ def qemu_img_info(path):
return QemuImgInfo(out)
-def convert_image(source, dest, out_format):
+def convert_image(source, dest, out_format, run_as_root=False):
"""Convert image to other format."""
cmd = ('qemu-img', 'convert', '-O', out_format, source, dest)
- utils.execute(*cmd)
+ utils.execute(*cmd, run_as_root=run_as_root)
def fetch(context, image_href, path, _user_id, _project_id):
diff --git a/nova/virt/libvirt/blockinfo.py b/nova/virt/libvirt/blockinfo.py
index fc4a7dbfb..0098410cd 100644
--- a/nova/virt/libvirt/blockinfo.py
+++ b/nova/virt/libvirt/blockinfo.py
@@ -175,8 +175,7 @@ def is_disk_bus_valid_for_virt(virt_type, disk_bus):
}
if virt_type not in valid_bus:
- raise exception.NovaException(
- _("Unsupported virt type %s") % virt_type)
+ raise exception.UnsupportedVirtType(virt=virt_type)
return disk_bus in valid_bus[virt_type]
@@ -200,9 +199,8 @@ def get_disk_bus_for_device_type(virt_type,
disk_bus = image_meta.get('properties', {}).get(key)
if disk_bus is not None:
if not is_disk_bus_valid_for_virt(virt_type, disk_bus):
- raise exception.NovaException(
- _("Disk bus %(disk_bus)s is not valid for %(virt)s") %
- {'disk_bus': disk_bus, 'virt': virt_type})
+ raise exception.UnsupportedHardware(model=disk_bus,
+ virt=virt_type)
return disk_bus
# Otherwise pick a hypervisor default disk bus
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 485f661e5..4e2fb9d39 100755
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -151,7 +151,10 @@ libvirt_opts = [
'fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver',
'rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
'sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
- 'nfs=nova.virt.libvirt.volume.LibvirtNFSVolumeDriver'
+ 'nfs=nova.virt.libvirt.volume.LibvirtNFSVolumeDriver',
+ 'aoe=nova.virt.libvirt.volume.LibvirtAOEVolumeDriver',
+ 'glusterfs='
+ 'nova.virt.libvirt.volume.LibvirtGlusterfsVolumeDriver'
],
help='Libvirt handlers for remote volumes.'),
cfg.StrOpt('libvirt_disk_prefix',
@@ -1892,9 +1895,9 @@ class LibvirtDriver(driver.ComputeDriver):
guest.add_device(cfg)
for (network, mapping) in network_info:
- self.vif_driver.plug(instance, (network, mapping))
cfg = self.vif_driver.get_config(instance,
- network, mapping)
+ network, mapping,
+ image_meta)
guest.add_device(cfg)
if CONF.libvirt_type == "qemu" or CONF.libvirt_type == "kvm":
@@ -2203,18 +2206,15 @@ class LibvirtDriver(driver.ComputeDriver):
"""
total = 0
+ if CONF.libvirt_type == 'lxc':
+ return total + 1
+
dom_ids = self.list_instance_ids()
for dom_id in dom_ids:
try:
dom = self._conn.lookupByID(dom_id)
vcpus = dom.vcpus()
- if vcpus is None:
- # dom.vcpus is not implemented for lxc, but returning 0 for
- # a used count is hardly useful for something measuring
- # usage
- total += 1
- else:
- total += len(vcpus[1])
+ total += len(vcpus[1])
except libvirt.libvirtError as err:
if err.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
LOG.debug(_("List of domains returned by libVirt: %s")
diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py
index 74148a866..0a84b22e5 100755
--- a/nova/virt/libvirt/imagebackend.py
+++ b/nova/virt/libvirt/imagebackend.py
@@ -43,6 +43,10 @@ __imagebackend_opts = [
default=False,
help='Create sparse logical volumes (with virtualsize)'
' if this flag is set to True.'),
+ cfg.IntOpt('libvirt_lvm_snapshot_size',
+ default='1000',
+ help='The amount of storage (in megabytes) to allocate for LVM'
+ ' snapshot copy-on-write blocks.'),
]
CONF = cfg.CONF
@@ -239,6 +243,11 @@ class Lvm(Image):
self.sparse = CONF.libvirt_sparse_logical_volumes
+ if snapshot_name:
+ self.snapshot_name = snapshot_name
+ self.snapshot_path = os.path.join('/dev', self.vg,
+ self.snapshot_name)
+
def create_image(self, prepare_template, base, size, *args, **kwargs):
@lockutils.synchronized(base, 'nova-', external=True,
lock_path=self.lock_path)
@@ -248,8 +257,7 @@ class Lvm(Image):
size = size if resize else base_size
libvirt_utils.create_lvm_image(self.vg, self.lv,
size, sparse=self.sparse)
- cmd = ('dd', 'if=%s' % base, 'of=%s' % self.path, 'bs=4M')
- utils.execute(*cmd, run_as_root=True)
+ images.convert_image(base, self.path, 'raw', run_as_root=True)
if resize:
disk.resize2fs(self.path, run_as_root=True)
@@ -274,6 +282,21 @@ class Lvm(Image):
with excutils.save_and_reraise_exception():
libvirt_utils.remove_logical_volumes(path)
+ def snapshot_create(self):
+ size = CONF.libvirt_lvm_snapshot_size
+ cmd = ('lvcreate', '-L', size, '-s', '--name', self.snapshot_name,
+ self.path)
+ libvirt_utils.execute(*cmd, run_as_root=True, attempts=3)
+
+ def snapshot_extract(self, target, out_format):
+ images.convert_image(self.snapshot_path, target, out_format,
+ run_as_root=True)
+
+ def snapshot_delete(self):
+ # NOTE (rmk): Snapshot volumes are automatically zeroed by LVM
+ cmd = ('lvremove', '-f', self.snapshot_path)
+ libvirt_utils.execute(*cmd, run_as_root=True, attempts=3)
+
class Backend(object):
def __init__(self, use_cow):
diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py
index 0990f29b1..ee4f7e194 100644
--- a/nova/virt/libvirt/vif.py
+++ b/nova/virt/libvirt/vif.py
@@ -54,6 +54,24 @@ CONF.import_opt('use_ipv6', 'nova.netconf')
LIBVIRT_OVS_VPORT_VERSION = 9011
+def is_vif_model_valid_for_virt(virt_type, vif_model):
+ valid_models = {
+ 'qemu': ['virtio', 'ne2k_pci', 'pcnet', 'rtl8139', 'e1000'],
+ 'kvm': ['virtio', 'ne2k_pci', 'pcnet', 'rtl8139', 'e1000'],
+ 'xen': ['netfront', 'ne2k_pci', 'pcnet', 'rtl8139', 'e1000'],
+ 'lxc': [],
+ 'uml': [],
+ }
+
+ if vif_model is None:
+ return True
+
+ if virt_type not in valid_models:
+ raise exception.UnsupportedVirtType(virt=virt_type)
+
+ return vif_model in valid_models[virt_type]
+
+
class LibvirtBaseVIFDriver(object):
def __init__(self, get_connection):
@@ -74,17 +92,35 @@ class LibvirtBaseVIFDriver(object):
return mapping['vif_devname']
return ("nic" + mapping['vif_uuid'])[:network_model.NIC_NAME_LEN]
- def get_config(self, instance, network, mapping):
+ def get_config(self, instance, network, mapping, image_meta):
conf = vconfig.LibvirtConfigGuestInterface()
+ # Default to letting libvirt / the hypervisor choose the model
model = None
driver = None
- if (CONF.libvirt_type in ('kvm', 'qemu') and
+
+ # If the user has specified a 'vif_model' against the
+ # image then honour that model
+ if image_meta:
+ vif_model = image_meta.get('properties', {}).get('vif_model')
+ if vif_model is not None:
+ model = vif_model
+
+ # Else if the virt type is KVM/QEMU, use virtio according
+ # to the global config parameter
+ if (model is None and
+ CONF.libvirt_type in ('kvm', 'qemu') and
CONF.libvirt_use_virtio_for_bridges):
model = "virtio"
- # Workaround libvirt bug, where it mistakenly
- # enables vhost mode, even for non-KVM guests
- if CONF.libvirt_type == "qemu":
- driver = "qemu"
+
+ # Workaround libvirt bug, where it mistakenly
+ # enables vhost mode, even for non-KVM guests
+ if model == "virtio" and CONF.libvirt_type == "qemu":
+ driver = "qemu"
+
+ if not is_vif_model_valid_for_virt(CONF.libvirt_type,
+ model):
+ raise exception.UnsupportedHardware(model=model,
+ virt=CONF.libvirt_type)
designer.set_vif_guest_frontend_config(
conf, mapping['mac'], model, driver)
@@ -122,12 +158,13 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
return True
return False
- def get_config_bridge(self, instance, network, mapping):
+ def get_config_bridge(self, instance, network, mapping, image_meta):
"""Get VIF configurations for bridge type."""
conf = super(LibvirtGenericVIFDriver,
self).get_config(instance,
network,
- mapping)
+ mapping,
+ image_meta)
designer.set_vif_host_backend_bridge_config(
conf, self.get_bridge_name(network),
@@ -154,22 +191,24 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
return conf
- def get_config_ovs_ethernet(self, instance, network, mapping):
+ def get_config_ovs_ethernet(self, instance, network, mapping, image_meta):
conf = super(LibvirtGenericVIFDriver,
self).get_config(instance,
network,
- mapping)
+ mapping,
+ image_meta)
dev = self.get_vif_devname(mapping)
designer.set_vif_host_backend_ethernet_config(conf, dev)
return conf
- def get_config_ovs_bridge(self, instance, network, mapping):
+ def get_config_ovs_bridge(self, instance, network, mapping, image_meta):
conf = super(LibvirtGenericVIFDriver,
self).get_config(instance,
network,
- mapping)
+ mapping,
+ image_meta)
designer.set_vif_host_backend_ovs_config(
conf, self.get_bridge_name(network),
@@ -178,29 +217,34 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
return conf
- def get_config_ovs_hybrid(self, instance, network, mapping):
+ def get_config_ovs_hybrid(self, instance, network, mapping, image_meta):
newnet = copy.deepcopy(network)
newnet['bridge'] = self.get_br_name(mapping['vif_uuid'])
return self.get_config_bridge(instance,
newnet,
- mapping)
+ mapping,
+ image_meta)
- def get_config_ovs(self, instance, network, mapping):
+ def get_config_ovs(self, instance, network, mapping, image_meta):
if self.get_firewall_required():
return self.get_config_ovs_hybrid(instance, network,
- mapping)
+ mapping,
+ image_meta)
elif self.has_libvirt_version(LIBVIRT_OVS_VPORT_VERSION):
return self.get_config_ovs_bridge(instance, network,
- mapping)
+ mapping,
+ image_meta)
else:
return self.get_config_ovs_ethernet(instance, network,
- mapping)
+ mapping,
+ image_meta)
- def get_config_802qbg(self, instance, network, mapping):
+ def get_config_802qbg(self, instance, network, mapping, image_meta):
conf = super(LibvirtGenericVIFDriver,
self).get_config(instance,
network,
- mapping)
+ mapping,
+ image_meta)
params = mapping["qbg_params"]
designer.set_vif_host_backend_802qbg_config(
@@ -212,11 +256,12 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
return conf
- def get_config_802qbh(self, instance, network, mapping):
+ def get_config_802qbh(self, instance, network, mapping, image_meta):
conf = super(LibvirtGenericVIFDriver,
self).get_config(instance,
network,
- mapping)
+ mapping,
+ image_meta)
params = mapping["qbh_params"]
designer.set_vif_host_backend_802qbh_config(
@@ -225,7 +270,7 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
return conf
- def get_config(self, instance, network, mapping):
+ def get_config(self, instance, network, mapping, image_meta):
vif_type = mapping.get('vif_type')
LOG.debug(_("vif_type=%(vif_type)s instance=%(instance)s "
@@ -238,13 +283,21 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
"for this vif_driver implementation"))
if vif_type == network_model.VIF_TYPE_BRIDGE:
- return self.get_config_bridge(instance, network, mapping)
+ return self.get_config_bridge(instance,
+ network, mapping,
+ image_meta)
elif vif_type == network_model.VIF_TYPE_OVS:
- return self.get_config_ovs(instance, network, mapping)
+ return self.get_config_ovs(instance,
+ network, mapping,
+ image_meta)
elif vif_type == network_model.VIF_TYPE_802_QBG:
- return self.get_config_802qbg(instance, network, mapping)
+ return self.get_config_802qbg(instance,
+ network, mapping,
+ image_meta)
elif vif_type == network_model.VIF_TYPE_802_QBH:
- return self.get_config_802qbh(instance, network, mapping)
+ return self.get_config_802qbh(instance,
+ network, mapping,
+ image_meta)
else:
raise exception.NovaException(
_("Unexpected vif_type=%s") % vif_type)
@@ -453,8 +506,8 @@ class LibvirtBridgeDriver(LibvirtGenericVIFDriver):
drivers which do not yet report 'vif_type' port binding.
Will be deprecated in Havana, and removed in Ixxxx."""
- def get_config(self, instance, network, mapping):
- return self.get_config_bridge(instance, network, mapping)
+ def get_config(self, instance, network, mapping, image_meta):
+ return self.get_config_bridge(instance, network, mapping, image_meta)
def plug(self, instance, vif):
self.plug_bridge(instance, vif)
@@ -474,8 +527,10 @@ class LibvirtOpenVswitchDriver(LibvirtGenericVIFDriver):
def get_ovs_interfaceid(self, mapping):
return mapping.get('ovs_interfaceid') or mapping['vif_uuid']
- def get_config(self, instance, network, mapping):
- return self.get_config_ovs_ethernet(instance, network, mapping)
+ def get_config(self, instance, network, mapping, image_meta):
+ return self.get_config_ovs_ethernet(instance,
+ network, mapping,
+ image_meta)
def plug(self, instance, vif):
self.plug_ovs_ethernet(instance, vif)
@@ -495,8 +550,10 @@ class LibvirtHybridOVSBridgeDriver(LibvirtGenericVIFDriver):
def get_ovs_interfaceid(self, mapping):
return mapping.get('ovs_interfaceid') or mapping['vif_uuid']
- def get_config(self, instance, network, mapping):
- return self.get_config_ovs_hybrid(instance, network, mapping)
+ def get_config(self, instance, network, mapping, image_meta):
+ return self.get_config_ovs_hybrid(instance,
+ network, mapping,
+ image_meta)
def plug(self, instance, vif):
return self.plug_ovs_hybrid(instance, vif)
@@ -516,8 +573,10 @@ class LibvirtOpenVswitchVirtualPortDriver(LibvirtGenericVIFDriver):
def get_ovs_interfaceid(self, mapping):
return mapping.get('ovs_interfaceid') or mapping['vif_uuid']
- def get_config(self, instance, network, mapping):
- return self.get_config_ovs_bridge(instance, network, mapping)
+ def get_config(self, instance, network, mapping, image_meta):
+ return self.get_config_ovs_bridge(instance,
+ network, mapping,
+ image_meta)
def plug(self, instance, vif):
return self.plug_ovs_bridge(instance, vif)
@@ -535,8 +594,12 @@ class QuantumLinuxBridgeVIFDriver(LibvirtGenericVIFDriver):
def_bridge = ("brq" + network['id'])[:network_model.NIC_NAME_LEN]
return network.get('bridge') or def_bridge
- def get_config(self, instance, network, mapping):
- return self.get_config_bridge(instance, network, mapping)
+ def get_config(self, instance, network, mapping, image_meta):
+ # In order for libvirt to make use of the bridge name then it has
+ # to ensure that the bridge exists
+ if 'should_create_bridge' not in mapping:
+ mapping['should_create_bridge'] = True
+ return self.get_config_bridge(instance, network, mapping, image_meta)
def plug(self, instance, vif):
self.plug_bridge(instance, vif)
diff --git a/nova/virt/libvirt/volume.py b/nova/virt/libvirt/volume.py
index 724a859b8..c368f66f6 100644
--- a/nova/virt/libvirt/volume.py
+++ b/nova/virt/libvirt/volume.py
@@ -50,6 +50,13 @@ volume_opts = [
default=None,
help='Mount options passed to the nfs client. See section '
'of the nfs man page for details'),
+ cfg.StrOpt('num_aoe_discover_tries',
+ default=3,
+ help='number of times to rediscover AoE target to find volume'),
+ cfg.StrOpt('glusterfs_mount_point_base',
+ default=paths.state_path_def('mnt'),
+ help='Dir where the glusterfs volume is mounted on the '
+ 'compute node'),
]
CONF = cfg.CONF
@@ -322,3 +329,124 @@ class LibvirtNFSVolumeDriver(LibvirtBaseVolumeDriver):
return utils.execute('stat', path, run_as_root=True)
except exception.ProcessExecutionError:
return False
+
+
+class LibvirtAOEVolumeDriver(LibvirtBaseVolumeDriver):
+ """Driver to attach AoE volumes to libvirt."""
+ def __init__(self, connection):
+ super(LibvirtAOEVolumeDriver,
+ self).__init__(connection, is_block_dev=True)
+
+ def _aoe_discover(self):
+ """Call aoe-discover (aoe-tools) AoE Discover."""
+ (out, err) = utils.execute('aoe-discover',
+ run_as_root=True, check_exit_code=0)
+ return (out, err)
+
+ def _aoe_revalidate(self, aoedev):
+ """Revalidate the LUN Geometry (When an AoE ID is reused)."""
+ (out, err) = utils.execute('aoe-revalidate', aoedev,
+ run_as_root=True, check_exit_code=0)
+ return (out, err)
+
+ def connect_volume(self, connection_info, mount_device):
+ shelf = connection_info['data']['target_shelf']
+ lun = connection_info['data']['target_lun']
+ aoedev = 'e%s.%s' % (shelf, lun)
+ aoedevpath = '/dev/etherd/%s' % (aoedev)
+
+ if os.path.exists(aoedevpath):
+ # NOTE(jbr_): If aoedevpath already exists, revalidate the LUN.
+ self._aoe_revalidate(aoedev)
+ else:
+ # NOTE(jbr_): If aoedevpath does not exist, do a discover.
+ self._aoe_discover()
+
+ #NOTE(jbr_): Device path is not always present immediately
+ def _wait_for_device_discovery(aoedevpath, mount_device):
+ tries = self.tries
+ if os.path.exists(aoedevpath):
+ raise utils.LoopingCallDone()
+
+ if self.tries >= CONF.num_aoe_discover_tries:
+ raise exception.NovaException(_("AoE device not found at %s") %
+ (aoedevpath))
+ LOG.warn(_("AoE volume not yet found at: %(aoedevpath)s. "
+ "Try number: %(tries)s") %
+ locals())
+
+ self._aoe_discover()
+ self.tries = self.tries + 1
+
+ self.tries = 0
+ timer = utils.FixedIntervalLoopingCall(_wait_for_device_discovery,
+ aoedevpath, mount_device)
+ timer.start(interval=2).wait()
+
+ tries = self.tries
+ if tries != 0:
+ LOG.debug(_("Found AoE device %(aoedevpath)s "
+ "(after %(tries)s rediscover)") %
+ locals())
+
+ conf = super(LibvirtAOEVolumeDriver,
+ self).connect_volume(connection_info, mount_device)
+ conf.source_type = "block"
+ conf.source_path = aoedevpath
+ return conf
+
+
+class LibvirtGlusterfsVolumeDriver(LibvirtBaseVolumeDriver):
+ """Class implements libvirt part of volume driver for GlusterFS."""
+
+ def __init__(self, connection):
+ """Create back-end to glusterfs."""
+ super(LibvirtGlusterfsVolumeDriver,
+ self).__init__(connection, is_block_dev=False)
+
+ def connect_volume(self, connection_info, mount_device):
+ """Connect the volume. Returns xml for libvirt."""
+ conf = super(LibvirtGlusterfsVolumeDriver,
+ self).connect_volume(connection_info, mount_device)
+ path = self._ensure_mounted(connection_info['data']['export'])
+ path = os.path.join(path, connection_info['data']['name'])
+ conf.source_type = 'file'
+ conf.source_path = path
+ return conf
+
+ def _ensure_mounted(self, glusterfs_export):
+ """
+ @type glusterfs_export: string
+ """
+ mount_path = os.path.join(CONF.glusterfs_mount_point_base,
+ self.get_hash_str(glusterfs_export))
+ self._mount_glusterfs(mount_path, glusterfs_export, ensure=True)
+ return mount_path
+
+ def _mount_glusterfs(self, mount_path, glusterfs_share, ensure=False):
+ """Mount glusterfs export to mount path."""
+ if not self._path_exists(mount_path):
+ utils.execute('mkdir', '-p', mount_path)
+
+ try:
+ utils.execute('mount', '-t', 'glusterfs', glusterfs_share,
+ mount_path,
+ run_as_root=True)
+ except exception.ProcessExecutionError as exc:
+ if ensure and 'already mounted' in exc.message:
+ LOG.warn(_("%s is already mounted"), glusterfs_share)
+ else:
+ raise
+
+ @staticmethod
+ def get_hash_str(base_str):
+ """returns string that represents hash of base_str (in hex format)."""
+ return hashlib.md5(base_str).hexdigest()
+
+ @staticmethod
+ def _path_exists(path):
+ """Check path."""
+ try:
+ return utils.execute('stat', path, run_as_root=True)
+ except exception.ProcessExecutionError:
+ return False
diff --git a/nova/virt/powervm/blockdev.py b/nova/virt/powervm/blockdev.py
index 76caca1b9..dc539814e 100644
--- a/nova/virt/powervm/blockdev.py
+++ b/nova/virt/powervm/blockdev.py
@@ -164,8 +164,47 @@ class PowerVMLocalVolumeAdapter(PowerVMDiskAdapter):
LOG.warn(_("Failed to clean up snapshot file "
"%(snapshot_file_path)s") % locals())
- def migrate_volume(self):
- raise NotImplementedError()
+ def migrate_volume(self, lv_name, src_host, dest, image_path,
+ instance_name=None):
+ """Copy a logical volume to file, compress, and transfer
+
+ :param lv_name: logical volume device name
+ :param dest: destination IP or DNS name
+ :param image_path: path to remote image storage directory
+ :param instance_name: name of instance that is being migrated
+ :returns: file path on destination of image file that was moved
+ """
+ if instance_name:
+ file_name = ''.join([instance_name, '_rsz'])
+ else:
+ file_name = ''.join([lv_name, '_rsz'])
+ file_path = os.path.join(image_path, file_name)
+ self._copy_device_to_file(lv_name, file_path)
+ cmds = 'gzip %s' % file_path
+ self.run_vios_command_as_root(cmds)
+ file_path = file_path + '.gz'
+ # If destination is not same host
+ # transfer file to destination VIOS system
+ if (src_host != dest):
+ with common.vios_to_vios_auth(self.connection_data.host,
+ dest,
+ self.connection_data) as key_name:
+ cmd = ''.join(['scp -o "StrictHostKeyChecking no"',
+ ('-i %s' % key_name),
+ file_path,
+ '%s@%s:%s' % (self.connection_data.username,
+ dest,
+ image_path)
+ ])
+ # do the remote copy
+ self.run_vios_command(cmd)
+
+ # cleanup local file only if transferring to remote system
+ # otherwise keep the file to boot from locally and clean up later
+ cleanup_cmd = 'rm %s' % file_path
+ self.run_vios_command_as_root(cleanup_cmd)
+
+ return file_path
def attach_volume_to_host(self, *args, **kargs):
pass
diff --git a/nova/virt/powervm/command.py b/nova/virt/powervm/command.py
index 3e51c933c..25cc2c6cd 100644
--- a/nova/virt/powervm/command.py
+++ b/nova/virt/powervm/command.py
@@ -65,6 +65,9 @@ class BaseCommand(object):
def vhost_by_instance_id(self, instance_id_hex):
pass
+ def chsyscfg(self, args=''):
+ return 'chsyscfg %s' % args
+
class IVMCommand(BaseCommand):
diff --git a/nova/virt/powervm/common.py b/nova/virt/powervm/common.py
index bf69be84e..d98d4ae89 100644
--- a/nova/virt/powervm/common.py
+++ b/nova/virt/powervm/common.py
@@ -14,13 +14,16 @@
# License for the specific language governing permissions and limitations
# under the License.
+import contextlib
import ftplib
import os
+import uuid
import paramiko
from nova import exception as nova_exception
from nova.openstack.common import log as logging
+from nova import utils
from nova.virt.powervm import exception
LOG = logging.getLogger(__name__)
@@ -85,7 +88,7 @@ def ssh_command_as_root(ssh_connection, cmd, check_exit_code=True):
raise nova_exception.ProcessExecutionError(exit_code=exit_status,
stdout=stdout,
stderr=stderr,
- cmd=' '.join(cmd))
+ cmd=''.join(cmd))
return (stdout, stderr)
@@ -154,3 +157,66 @@ def aix_path_join(path_one, path_two):
final_path = path_one + '/' + path_two
return final_path
+
+
+@contextlib.contextmanager
+def vios_to_vios_auth(source, dest, conn_info):
+ """Context allowing for SSH between VIOS partitions
+
+ This context will build an SSH key on the source host, put the key
+ into the authorized_keys on the destination host, and make the
+ private key file name available within the context.
+ The key files and key inserted into authorized_keys will be
+ removed when the context exits.
+
+ :param source: source IP or DNS name
+ :param dest: destination IP or DNS name
+ :param conn_info: dictionary object with SSH connection
+ information for both hosts
+ """
+ KEY_BASE_NAME = "os-%s" % uuid.uuid4().hex
+ keypair_uuid = uuid.uuid4()
+ src_conn_obj = ssh_connect(conn_info)
+
+ dest_conn_info = Connection(dest, conn_info.username,
+ conn_info.password)
+ dest_conn_obj = ssh_connect(dest_conn_info)
+
+ def run_command(conn_obj, cmd):
+ stdout, stderr = utils.ssh_execute(conn_obj, cmd)
+ return stdout.strip().splitlines()
+
+ def build_keypair_on_source():
+ mkkey = ('ssh-keygen -f %s -N "" -C %s' %
+ (KEY_BASE_NAME, keypair_uuid.hex))
+ ssh_command_as_root(src_conn_obj, mkkey)
+
+ chown_key = ('chown %s %s*' % (conn_info.username, KEY_BASE_NAME))
+ ssh_command_as_root(src_conn_obj, chown_key)
+
+ cat_key = ('cat %s.pub' % KEY_BASE_NAME)
+ pubkey = run_command(src_conn_obj, cat_key)
+
+ return pubkey[0]
+
+ def cleanup_key_on_source():
+ rmkey = 'rm %s*' % KEY_BASE_NAME
+ run_command(src_conn_obj, rmkey)
+
+ def insert_into_authorized_keys(public_key):
+ echo_key = 'echo "%s" >> .ssh/authorized_keys' % public_key
+ ssh_command_as_root(dest_conn_obj, echo_key)
+
+ def remove_from_authorized_keys():
+ rmkey = ('sed /%s/d .ssh/authorized_keys > .ssh/authorized_keys' %
+ keypair_uuid.hex)
+ ssh_command_as_root(dest_conn_obj, rmkey)
+
+ public_key = build_keypair_on_source()
+ insert_into_authorized_keys(public_key)
+
+ try:
+ yield KEY_BASE_NAME
+ finally:
+ remove_from_authorized_keys()
+ cleanup_key_on_source()
diff --git a/nova/virt/powervm/driver.py b/nova/virt/powervm/driver.py
index dd0f473a6..9cd6453ab 100755
--- a/nova/virt/powervm/driver.py
+++ b/nova/virt/powervm/driver.py
@@ -14,6 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import socket
import time
from nova.image import glance
@@ -44,7 +45,7 @@ powervm_opts = [
help='PowerVM image remote path'),
cfg.StrOpt('powervm_img_local_path',
default=None,
- help='Local directory to download glance images to'),
+ help='Local directory to download glance images to')
]
CONF = cfg.CONF
@@ -113,10 +114,15 @@ class PowerVMDriver(driver.ComputeDriver):
pass
def get_host_ip_addr(self):
- """
- Retrieves the IP address of the dom0
- """
- pass
+ """Retrieves the IP address of the hypervisor host."""
+ LOG.debug(_("In get_host_ip_addr"))
+ # TODO(mrodden): use operator get_hostname instead
+ hostname = CONF.powervm_mgr
+ LOG.debug(_("Attempting to resolve %s") % hostname)
+ ip_addr = socket.gethostbyname(hostname)
+ LOG.debug(_("%(hostname)s was successfully resolved to %(ip_addr)s") %
+ {'hostname': hostname, 'ip_addr': ip_addr})
+ return ip_addr
def snapshot(self, context, instance, image_id):
"""Snapshots the specified instance.
@@ -208,3 +214,89 @@ class PowerVMDriver(driver.ComputeDriver):
the cache and remove images which are no longer of interest.
"""
pass
+
+ def migrate_disk_and_power_off(self, context, instance, dest,
+ instance_type, network_info,
+ block_device_info=None):
+ """Transfers the disk of a running instance in multiple phases, turning
+ off the instance before the end.
+
+ :returns: disk_info dictionary that is passed as the
+ disk_info parameter to finish_migration
+ on the destination nova-compute host
+ """
+ src_host = self.get_host_ip_addr()
+ pvm_op = self._powervm._operator
+ lpar_obj = pvm_op.get_lpar(instance['name'])
+ vhost = pvm_op.get_vhost_by_instance_id(lpar_obj['lpar_id'])
+ diskname = pvm_op.get_disk_name_by_vhost(vhost)
+
+ self._powervm.power_off(instance['name'], timeout=120)
+
+ disk_info = self._powervm.migrate_disk(
+ diskname, src_host, dest, CONF.powervm_img_remote_path,
+ instance['name'])
+ disk_info['old_lv_size'] = pvm_op.get_logical_vol_size(diskname)
+ new_name = self._get_resize_name(instance['name'])
+ pvm_op.rename_lpar(instance['name'], new_name)
+ return disk_info
+
+ def _get_resize_name(self, instance_name):
+ """Rename the instance to be migrated to avoid naming conflicts
+
+ :param instance_name: name of instance to be migrated
+ :returns: the new instance name
+ """
+ name_tag = 'rsz_'
+
+ # if the current name would overflow with new tag
+ if ((len(instance_name) + len(name_tag)) > 31):
+ # remove enough chars for the tag to fit
+ num_chars = len(name_tag)
+ old_name = instance_name[num_chars:]
+ else:
+ old_name = instance_name
+
+ return ''.join([name_tag, old_name])
+
+ def finish_migration(self, context, migration, instance, disk_info,
+ network_info, image_meta, resize_instance,
+ block_device_info=None):
+ """Completes a resize, turning on the migrated instance
+
+ :param network_info:
+ :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
+ :param image_meta: image object returned by nova.image.glance that
+ defines the image from which this instance
+ was created
+ """
+ lpar_obj = self._powervm._create_lpar_instance(instance)
+
+ new_lv_size = instance['instance_type']['root_gb']
+ old_lv_size = disk_info['old_lv_size']
+ if 'root_disk_file' in disk_info:
+ disk_size = max(int(new_lv_size), int(old_lv_size))
+ disk_size_bytes = disk_size * 1024 * 1024 * 1024
+ self._powervm.deploy_from_migrated_file(
+ lpar_obj, disk_info['root_disk_file'], disk_size_bytes)
+ else:
+ # this shouldn't get hit unless someone forgot to handle
+ # a certain migration type
+ raise Exception(
+ _('Unrecognized root disk information: %s') %
+ disk_info)
+
+ def confirm_migration(self, migration, instance, network_info):
+ """Confirms a resize, destroying the source VM."""
+
+ new_name = self._get_resize_name(instance['name'])
+ self._powervm.destroy(new_name)
+
+ def finish_revert_migration(self, instance, network_info,
+ block_device_info=None):
+ """Finish reverting a resize, powering back on the instance."""
+
+ # undo instance rename and start
+ new_name = self._get_resize_name(instance['name'])
+ self._powervm._operator.rename_lpar(new_name, instance['name'])
+ self._powervm.power_on(instance['name'])
diff --git a/nova/virt/powervm/lpar.py b/nova/virt/powervm/lpar.py
index 10e8c8e37..907818ca8 100644
--- a/nova/virt/powervm/lpar.py
+++ b/nova/virt/powervm/lpar.py
@@ -49,7 +49,11 @@ def load_from_conf_data(conf_data):
attribs = dict(item.split("=") for item in list(cf_splitter))
lpar = LPAR()
for (key, value) in attribs.items():
- lpar[key] = value
+ try:
+ lpar[key] = value
+ except exception.PowerVMLPARAttributeNotFound, e:
+ LOG.info(_('Encountered unknown LPAR attribute: %s\n'
+ 'Continuing without storing') % key)
return lpar
diff --git a/nova/virt/powervm/operator.py b/nova/virt/powervm/operator.py
index c5c2b5f04..43fa27160 100644
--- a/nova/virt/powervm/operator.py
+++ b/nova/virt/powervm/operator.py
@@ -207,6 +207,16 @@ class PowerVMOperator(object):
# MAC address for the mac_base_value parameter and then
# get the integer value of the final 2 characters as the
# slot_id parameter
+ #
+ # NOTE(mjfork) the slot_id should not exceed 255 (FF) to
+ # to avoid spilling over into the next
+ # highest octet. The contract with
+ # macs_for_instance limits to a value between
+ # 32 and 63 inclusive so we are safe.
+ #
+ # Further, with the contract on slot_id, we
+ # can hard code max_virtual_slots to 64 in
+ # LPAR definition.
mac = network_info[0]['address']
mac_base_value = (mac[:-2]).replace(':', '')
eth_id = self._operator.get_virtual_eth_adapter_id()
@@ -215,8 +225,6 @@ class PowerVMOperator(object):
locals())
# LPAR configuration data
- # max_virtual_slots is hardcoded to 64 since we generate a MAC
- # address that must be placed in slots 32 - 64
lpar_inst = LPAR.LPAR(
name=inst_name, lpar_env='aixlinux',
min_mem=mem_min, desired_mem=mem,
@@ -233,7 +241,7 @@ class PowerVMOperator(object):
LOG.debug(_("Creating LPAR instance '%s'") % instance['name'])
self._operator.create_lpar(lpar_inst)
#TODO(mjfork) capture the error and handle the error when the MAC
- # prefix already exists on the system (1 in 2^28)
+ # prefix already exists on the system (1 in 2^32)
except nova_exception.ProcessExecutionError:
LOG.exception(_("LPAR instance '%s' creation failed") %
instance['name'])
@@ -258,7 +266,16 @@ class PowerVMOperator(object):
spawn_start = time.time()
try:
- _create_lpar_instance(instance)
+ try:
+ host_stats = self.get_host_stats(refresh=True)
+ lpar_inst = self._create_lpar_instance(instance, host_stats)
+ self._operator.create_lpar(lpar_inst)
+ LOG.debug(_("Creating LPAR instance '%s'") % instance['name'])
+ except nova_exception.ProcessExecutionError:
+ LOG.exception(_("LPAR instance '%s' creation failed") %
+ instance['name'])
+ raise exception.PowerVMLPARCreationFailed()
+
_create_image(context, instance, image_id)
LOG.debug(_("Activating the LPAR instance '%s'")
% instance['name'])
@@ -365,6 +382,118 @@ class PowerVMOperator(object):
def macs_for_instance(self, instance):
return self._operator.macs_for_instance(instance)
+ def _create_lpar_instance(self, instance, host_stats=None):
+ inst_name = instance['name']
+
+ # CPU/Memory min and max can be configurable. Lets assume
+ # some default values for now.
+
+ # Memory
+ mem = instance['memory_mb']
+ if host_stats and mem > host_stats['host_memory_free']:
+ LOG.error(_('Not enough free memory in the host'))
+ raise exception.PowerVMInsufficientFreeMemory(
+ instance_name=instance['name'])
+ mem_min = min(mem, constants.POWERVM_MIN_MEM)
+ mem_max = mem + constants.POWERVM_MAX_MEM
+
+ # CPU
+ cpus = instance['vcpus']
+ if host_stats:
+ avail_cpus = host_stats['vcpus'] - host_stats['vcpus_used']
+ if cpus > avail_cpus:
+ LOG.error(_('Insufficient available CPU on PowerVM'))
+ raise exception.PowerVMInsufficientCPU(
+ instance_name=instance['name'])
+ cpus_min = min(cpus, constants.POWERVM_MIN_CPUS)
+ cpus_max = cpus + constants.POWERVM_MAX_CPUS
+ cpus_units_min = decimal.Decimal(cpus_min) / decimal.Decimal(10)
+ cpus_units = decimal.Decimal(cpus) / decimal.Decimal(10)
+
+ # Network
+ eth_id = self._operator.get_virtual_eth_adapter_id()
+
+ # LPAR configuration data
+ lpar_inst = LPAR.LPAR(
+ name=inst_name, lpar_env='aixlinux',
+ min_mem=mem_min, desired_mem=mem,
+ max_mem=mem_max, proc_mode='shared',
+ sharing_mode='uncap', min_procs=cpus_min,
+ desired_procs=cpus, max_procs=cpus_max,
+ min_proc_units=cpus_units_min,
+ desired_proc_units=cpus_units,
+ max_proc_units=cpus_max,
+ virtual_eth_adapters='4/0/%s//0/0' % eth_id)
+ return lpar_inst
+
+ def _check_host_resources(self, instance, vcpus, mem, host_stats):
+ """Checks resources on host for resize, migrate, and spawn
+ :param vcpus: CPUs to be used
+ :param mem: memory requested by instance
+ :param disk: size of disk to be expanded or created
+ """
+ if mem > host_stats['host_memory_free']:
+ LOG.exception(_('Not enough free memory in the host'))
+ raise exception.PowerVMInsufficientFreeMemory(
+ instance_name=instance['name'])
+
+ avail_cpus = host_stats['vcpus'] - host_stats['vcpus_used']
+ if vcpus > avail_cpus:
+ LOG.exception(_('Insufficient available CPU on PowerVM'))
+ raise exception.PowerVMInsufficientCPU(
+ instance_name=instance['name'])
+
+ def migrate_disk(self, device_name, src_host, dest, image_path,
+ instance_name=None):
+ """Migrates SVC or Logical Volume based disks
+
+ :param device_name: disk device name in /dev/
+ :param dest: IP or DNS name of destination host/VIOS
+ :param image_path: path on source and destination to directory
+ for storing image files
+ :param instance_name: name of instance being migrated
+ :returns: disk_info dictionary object describing root volume
+ information used for locating/mounting the volume
+ """
+ dest_file_path = self._disk_adapter.migrate_volume(
+ device_name, src_host, dest, image_path, instance_name)
+ disk_info = {}
+ disk_info['root_disk_file'] = dest_file_path
+ return disk_info
+
+ def deploy_from_migrated_file(self, lpar, file_path, size):
+ # decompress file
+ gzip_ending = '.gz'
+ if file_path.endswith(gzip_ending):
+ raw_file_path = file_path[:-len(gzip_ending)]
+ else:
+ raw_file_path = file_path
+
+ self._operator._decompress_image_file(file_path, raw_file_path)
+
+ try:
+ # deploy lpar from file
+ self._deploy_from_vios_file(lpar, raw_file_path, size)
+ finally:
+ # cleanup migrated file
+ self._operator._remove_file(raw_file_path)
+
+ def _deploy_from_vios_file(self, lpar, file_path, size):
+ self._operator.create_lpar(lpar)
+ lpar = self._operator.get_lpar(lpar['name'])
+ instance_id = lpar['lpar_id']
+ vhost = self._operator.get_vhost_by_instance_id(instance_id)
+
+ # Create logical volume on IVM
+ diskName = self._disk_adapter._create_logical_volume(size)
+ # Attach the disk to LPAR
+ self._operator.attach_disk_to_vhost(diskName, vhost)
+
+ # Copy file to device
+ self._disk_adapter._copy_file_to_device(file_path, diskName)
+
+ self._operator.start_lpar(lpar['name'])
+
class BaseOperator(object):
"""Base operator for IVM and HMC managed systems."""
@@ -596,6 +725,89 @@ class BaseOperator(object):
def macs_for_instance(self, instance):
pass
+ def update_lpar(self, lpar_info):
+ """Resizing an LPAR
+
+ :param lpar_info: dictionary of LPAR information
+ """
+ configuration_data = ('name=%s,min_mem=%s,desired_mem=%s,'
+ 'max_mem=%s,min_procs=%s,desired_procs=%s,'
+ 'max_procs=%s,min_proc_units=%s,'
+ 'desired_proc_units=%s,max_proc_units=%s' %
+ (lpar_info['name'], lpar_info['min_mem'],
+ lpar_info['desired_mem'],
+ lpar_info['max_mem'],
+ lpar_info['min_procs'],
+ lpar_info['desired_procs'],
+ lpar_info['max_procs'],
+ lpar_info['min_proc_units'],
+ lpar_info['desired_proc_units'],
+ lpar_info['max_proc_units']))
+
+ self.run_vios_command(self.command.chsyscfg('-r prof -i "%s"' %
+ configuration_data))
+
+ def get_logical_vol_size(self, diskname):
+ """Finds and calculates the logical volume size in GB
+
+ :param diskname: name of the logical volume
+ :returns: size of logical volume in GB
+ """
+ configuration_data = ("ioscli lslv %s -fmt : -field pps ppsize" %
+ diskname)
+ output = self.run_vios_command(configuration_data)
+ pps, ppsize = output[0].split(':')
+ ppsize = re.findall(r'\d+', ppsize)
+ ppsize = int(ppsize[0])
+ pps = int(pps)
+ lv_size = ((pps * ppsize) / 1024)
+
+ return lv_size
+
+ def rename_lpar(self, instance_name, new_name):
+ """Rename LPAR given by instance_name to new_name
+
+ Note: For IVM based deployments, the name is
+ limited to 31 characters and will be trimmed
+ to meet this requirement
+
+ :param instance_name: name of LPAR to be renamed
+ :param new_name: desired new name of LPAR
+ :returns: new name of renamed LPAR trimmed to 31 characters
+ if necessary
+ """
+
+ # grab first 31 characters of new name
+ new_name_trimmed = new_name[:31]
+
+ cmd = ''.join(['chsyscfg -r lpar -i ',
+ '"',
+ 'name=%s,' % instance_name,
+ 'new_name=%s' % new_name_trimmed,
+ '"'])
+
+ self.run_vios_command(cmd)
+
+ return new_name_trimmed
+
+ def _decompress_image_file(self, file_path, outfile_path):
+ command = "/usr/bin/gunzip -c %s > %s" % (file_path, outfile_path)
+ output = self.run_vios_command_as_root(command)
+
+ # Remove compressed image file
+ command = "/usr/bin/rm %s" % file_path
+ output = self.run_vios_command_as_root(command)
+
+ return outfile_path
+
+ def _remove_file(self, file_path):
+ """Removes a file on the VIOS partition
+
+ :param file_path: absolute path to file to be removed
+ """
+ command = 'rm %s' % file_path
+ self.run_vios_command_as_root(command)
+
class IVMOperator(BaseOperator):
"""Integrated Virtualization Manager (IVM) Operator.
@@ -617,18 +829,21 @@ class IVMOperator(BaseOperator):
# Discussion: https://bugs.launchpad.net/nova/+bug/921838
# NOTE(mjfork): For IVM-based PowerVM, we cannot directly set a MAC
# address on an LPAR, but rather need to construct one
- # that can be used. Retain the 0xfe as noted above,
- # but ensure the final 3 hex values represent a value
+ # that can be used. Retain the 0xfa as noted above,
+ # but ensure the final 2 hex values represent a value
# between 32 and 64 so we can assign as the slot id on
- # the system.
- # FA:xx:xx:xx:x0:[32-64]
+ # the system. For future reference, the last octect
+ # should not exceed FF (255) since it would spill over
+ # into the higher-order octect.
+ #
+ # FA:xx:xx:xx:xx:[32-64]
macs = set()
mac_base = [0xfa,
random.randint(0x00, 0xff),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff),
- random.randint(0x00, 0xff) & 0xf0,
+ random.randint(0x00, 0xff),
random.randint(0x00, 0x00)]
for n in range(32, 64):
mac_base[5] = n
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index 19f984c7d..63d9e3c57 100755
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -164,7 +164,7 @@ class VMwareESXDriver(driver.ComputeDriver):
pass
def legacy_nwinfo(self):
- return True
+ return False
def list_instances(self):
"""List VM instances."""
diff --git a/nova/virt/vmwareapi/vif.py b/nova/virt/vmwareapi/vif.py
index 137045508..e2dfa0427 100644
--- a/nova/virt/vmwareapi/vif.py
+++ b/nova/virt/vmwareapi/vif.py
@@ -36,10 +36,10 @@ vmwareapi_vif_opts = [
CONF.register_opts(vmwareapi_vif_opts)
-def ensure_vlan_bridge(self, session, network, cluster=None):
+def ensure_vlan_bridge(self, session, vif, cluster=None):
"""Create a vlan and bridge unless they already exist."""
- vlan_num = network['vlan']
- bridge = network['bridge']
+ vlan_num = vif['network'].get_meta('vlan')
+ bridge = vif['network']['bridge']
vlan_interface = CONF.vmwareapi_vlan_interface
# Check if the vlan_interface physical network adapter exists on the
diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py
index c1015cb13..bc3bd8a9c 100644
--- a/nova/virt/vmwareapi/vm_util.py
+++ b/nova/virt/vmwareapi/vm_util.py
@@ -73,6 +73,25 @@ def get_vm_create_spec(client_factory, instance, data_store_name,
device_config_spec = vif_spec_list
config_spec.deviceChange = device_config_spec
+
+ # add vm-uuid and iface-id.x values for Quantum
+ extra_config = []
+ opt = client_factory.create('ns0:OptionValue')
+ opt.key = "nvp.vm-uuid"
+ opt.value = instance['uuid']
+ extra_config.append(opt)
+
+ i = 0
+ for vif_info in vif_infos:
+ if vif_info['iface_id']:
+ opt = client_factory.create('ns0:OptionValue')
+ opt.key = "nvp.iface-id.%d" % i
+ opt.value = vif_info['iface_id']
+ extra_config.append(opt)
+ i += 1
+
+ config_spec.extraConfig = extra_config
+
return config_spec
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index 106de0cb0..0aeb58ea3 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -176,19 +176,20 @@ class VMwareVMOps(object):
vif_infos = []
if network_info is None:
return vif_infos
- for (network, mapping) in network_info:
- mac_address = mapping['mac']
- network_name = network['bridge'] or \
+ for vif in network_info:
+ mac_address = vif['address']
+ network_name = vif['network']['bridge'] or \
CONF.vmware.integration_bridge
- if mapping.get('should_create_vlan'):
+ if vif['network'].get_meta('should_create_vlan', False):
network_ref = vmwarevif.ensure_vlan_bridge(
- self._session, network,
+ self._session, vif,
self._cluster)
else:
network_ref = _check_if_network_bridge_exists(network_name)
vif_infos.append({'network_name': network_name,
'mac_address': mac_address,
'network_ref': network_ref,
+ 'iface_id': vif.get_meta('iface_id'),
})
return vif_infos
@@ -1126,6 +1127,33 @@ class VMwareVMOps(object):
return port
+ @staticmethod
+ def _get_machine_id_str(network_info):
+ machine_id_str = ''
+ for vif in network_info:
+ # TODO(vish): add support for dns2
+ # TODO(sateesh): add support for injection of ipv6 configuration
+ network = vif['network']
+ ip_v4 = netmask_v4 = gateway_v4 = broadcast_v4 = dns = None
+ subnets_v4 = [s for s in network['subnets'] if s['version'] == 4]
+ if len(subnets_v4[0]['ips']) > 0:
+ ip_v4 = subnets_v4[0]['ips'][0]
+ if len(subnets_v4[0]['dns']) > 0:
+ dns = subnets_v4[0]['dns'][0]['address']
+
+ netmask_v4 = str(subnets_v4[0].as_netaddr().netmask)
+ gateway_v4 = subnets_v4[0]['gateway']['address']
+ broadcast_v4 = str(subnets_v4[0].as_netaddr().broadcast)
+
+ interface_str = ";".join([vif['address'],
+ ip_v4 and ip_v4['address'] or '',
+ netmask_v4 or '',
+ gateway_v4 or '',
+ broadcast_v4 or '',
+ dns or ''])
+ machine_id_str = machine_id_str + interface_str + '#'
+ return machine_id_str
+
def _set_machine_id(self, client_factory, instance, network_info):
"""
Set the machine id of the VM for guest tools to pick up and reconfigure
@@ -1135,40 +1163,17 @@ class VMwareVMOps(object):
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance['uuid'])
- machine_id_str = ''
- for (network, info) in network_info:
- # TODO(vish): add support for dns2
- # TODO(sateesh): add support for injection of ipv6 configuration
- ip_v4 = ip_v6 = None
- if 'ips' in info and len(info['ips']) > 0:
- ip_v4 = info['ips'][0]
- if 'ip6s' in info and len(info['ip6s']) > 0:
- ip_v6 = info['ip6s'][0]
- if len(info['dns']) > 0:
- dns = info['dns'][0]
- else:
- dns = ''
-
- interface_str = ";".join([info['mac'],
- ip_v4 and ip_v4['ip'] or '',
- ip_v4 and ip_v4['netmask'] or '',
- info['gateway'],
- info['broadcast'],
- dns])
- machine_id_str = machine_id_str + interface_str + '#'
-
machine_id_change_spec = vm_util.get_machine_id_change_spec(
- client_factory, machine_id_str)
+ client_factory,
+ self._get_machine_id_str(network_info))
- LOG.debug(_("Reconfiguring VM instance to set the machine id "
- "with ip - %(ip_addr)s") % {'ip_addr': ip_v4['ip']},
+ LOG.debug(_("Reconfiguring VM instance to set the machine id"),
instance=instance)
reconfig_task = self._session._call_method(self._session._get_vim(),
"ReconfigVM_Task", vm_ref,
spec=machine_id_change_spec)
self._session._wait_for_task(instance['uuid'], reconfig_task)
- LOG.debug(_("Reconfigured VM instance to set the machine id "
- "with ip - %(ip_addr)s") % {'ip_addr': ip_v4['ip']},
+ LOG.debug(_("Reconfigured VM instance to set the machine id"),
instance=instance)
def _set_vnc_config(self, client_factory, instance, port, password):
diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py
index c1a578f3b..6a1cdd6b9 100755
--- a/nova/virt/xenapi/driver.py
+++ b/nova/virt/xenapi/driver.py
@@ -452,8 +452,8 @@ class XenAPIDriver(driver.ComputeDriver):
:param dest_check_data: result of check_can_live_migrate_destination
includes the block_migration flag
"""
- self._vmops.check_can_live_migrate_source(ctxt, instance_ref,
- dest_check_data)
+ return self._vmops.check_can_live_migrate_source(ctxt, instance_ref,
+ dest_check_data)
def get_instance_disk_info(self, instance_name):
"""Used by libvirt for live migration. We rely on xenapi
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index ec6450d9f..3bfe6de09 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -230,8 +230,7 @@ def create_vm(session, instance, name_label, kernel, ramdisk,
'memory_target': mem,
'name_description': '',
'name_label': name_label,
- 'other_config': {'allowvssprovider': str(False),
- 'nova_uuid': str(instance['uuid'])},
+ 'other_config': {'nova_uuid': str(instance['uuid'])},
'PCI_bus': '',
'platform': {'acpi': 'true', 'apic': 'true', 'pae': 'true',
'viridian': 'true', 'timeoffset': '0'},
@@ -247,7 +246,7 @@ def create_vm(session, instance, name_label, kernel, ramdisk,
'VCPUs_at_startup': vcpus,
'VCPUs_max': vcpus,
'VCPUs_params': {},
- 'xenstore_data': {}}
+ 'xenstore_data': {'allowvssprovider': 'false'}}
# Complete VM configuration record according to the image type
# non-raw/raw with PV kernel/raw in HVM mode
@@ -483,7 +482,7 @@ def get_vdi_uuid_for_volume(session, connection_data):
vdi_uuid = vdi_rec['uuid']
except volume_utils.StorageError, exc:
LOG.exception(exc)
- volume_utils.forget_sr(session, sr_uuid)
+ volume_utils.forget_sr(session, sr_ref)
return vdi_uuid
@@ -2084,6 +2083,7 @@ def _sparse_copy(src_path, dst_path, virtual_size, block_size=4096):
break
data = src.read(min(block_size, left))
+ greenthread.sleep(0)
duration = time.time() - start_time
compression_pct = float(skipped_bytes) / bytes_read * 100
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 5fca96817..5dbadc416 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -1640,6 +1640,7 @@ class VMOps(object):
try:
self._call_live_migrate_command(
"VM.assert_can_migrate", vm_ref, migrate_data)
+ return dest_check_data
except self._session.XenAPI.Failure as exc:
LOG.exception(exc)
raise exception.MigrationError(_('VM.assert_can_migrate'
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
index 7921e3e87..40451a48e 100644
--- a/nova/virt/xenapi/volume_utils.py
+++ b/nova/virt/xenapi/volume_utils.py
@@ -1,6 +1,7 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
+# Copyright (c) 2013 Openstack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -22,7 +23,6 @@ and storage repositories
import re
import string
-from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
@@ -38,94 +38,50 @@ class StorageError(Exception):
super(StorageError, self).__init__(message)
-def create_sr(session, label, params):
- LOG.debug(_("creating sr within volume_utils"))
- type = params['sr_type']
- del params['sr_type']
- LOG.debug(_('type is = %s') % type)
- if 'name_description' in params:
- desc = params['name_description']
- LOG.debug(_('name = %s') % desc)
- del params['name_description']
- else:
- desc = ''
+def _handle_sr_params(params):
if 'id' in params:
del params['id']
- LOG.debug(params)
- try:
- sr_ref = session.call_xenapi("SR.create",
- session.get_xenapi_host(),
- params,
- '0', label, desc, type, '', False, {})
- LOG.debug(_('Created %(label)s as %(sr_ref)s.') % locals())
- return sr_ref
+ sr_type = params.pop('sr_type', 'iscsi')
+ sr_desc = params.pop('name_description', '')
+ return sr_type, sr_desc
- except session.XenAPI.Failure, exc:
- LOG.exception(exc)
- raise StorageError(_('Unable to create Storage Repository'))
+
+def create_sr(session, label, params):
+ LOG.debug(_('Creating SR %(label)s') % locals())
+ sr_type, sr_desc = _handle_sr_params(params)
+ sr_ref = session.call_xenapi("SR.create",
+ session.get_xenapi_host(),
+ params,
+ '0', label, sr_desc, sr_type, '', False, {})
+ return sr_ref
def introduce_sr(session, sr_uuid, label, params):
- LOG.debug(_("introducing sr within volume_utils"))
- # If the sr_type is missing, we assume we are
- # using the default iscsi back-end
- type = params.pop('sr_type', 'iscsi')
- LOG.debug(_('type is = %s') % type)
- if 'name_description' in params:
- desc = params['name_description']
- LOG.debug(_('name = %s') % desc)
- del params['name_description']
- else:
- desc = ''
- if 'id' in params:
- del params['id']
- LOG.debug(params)
+ LOG.debug(_('Introducing SR %(label)s') % locals())
- try:
- sr_ref = session.call_xenapi("SR.introduce",
- sr_uuid,
- label,
- desc,
- type,
- '',
- False,
- params,)
- LOG.debug(_('Introduced %(label)s as %(sr_ref)s.') % locals())
-
- #Create pbd
- LOG.debug(_('Creating pbd for SR'))
- pbd_ref = create_pbd(session, sr_ref, params)
- LOG.debug(_('Plugging SR'))
- #Plug pbd
- session.call_xenapi("PBD.plug", pbd_ref)
- session.call_xenapi("SR.scan", sr_ref)
- return sr_ref
+ sr_type, sr_desc = _handle_sr_params(params)
- except session.XenAPI.Failure, exc:
- LOG.exception(exc)
- raise StorageError(_('Unable to introduce Storage Repository'))
+ sr_ref = session.call_xenapi('SR.introduce', sr_uuid, label, sr_desc,
+ sr_type, '', False, params)
+ LOG.debug(_('Creating PBD for SR'))
+ pbd_ref = create_pbd(session, sr_ref, params)
-def forget_sr(session, sr_uuid):
- """
- Forgets the storage repository without destroying the VDIs within
- """
- try:
- sr_ref = session.call_xenapi("SR.get_by_uuid", sr_uuid)
- except session.XenAPI.Failure, exc:
- LOG.exception(exc)
- raise StorageError(_('Unable to get SR using uuid'))
+ LOG.debug(_('Plugging SR'))
+ session.call_xenapi("PBD.plug", pbd_ref)
- LOG.debug(_('Forgetting SR %s...') % sr_ref)
+ session.call_xenapi("SR.scan", sr_ref)
+ return sr_ref
- try:
- unplug_pbds(session, sr_ref)
- sr_ref = session.call_xenapi("SR.forget", sr_ref)
- except session.XenAPI.Failure, exc:
- LOG.exception(exc)
- raise StorageError(_('Unable to forget Storage Repository'))
+def forget_sr(session, sr_ref):
+ """
+ Forgets the storage repository without destroying the VDIs within
+ """
+ LOG.debug(_('Forgetting SR...'))
+ unplug_pbds(session, sr_ref)
+ session.call_xenapi("SR.forget", sr_ref)
def find_sr_by_uuid(session, sr_uuid):
@@ -138,35 +94,6 @@ def find_sr_by_uuid(session, sr_uuid):
return None
-def create_iscsi_storage(session, info, label, description):
- """
- Create an iSCSI storage repository that will be used to mount
- the volume for the specified instance
- """
- sr_ref = session.call_xenapi("SR.get_by_name_label", label)
- if len(sr_ref) == 0:
- LOG.debug(_('Introducing %s...'), label)
- record = {}
- if 'chapuser' in info and 'chappassword' in info:
- record = {'target': info['targetHost'],
- 'port': info['targetPort'],
- 'targetIQN': info['targetIQN'],
- 'chapuser': info['chapuser'],
- 'chappassword': info['chappassword']}
- else:
- record = {'target': info['targetHost'],
- 'port': info['targetPort'],
- 'targetIQN': info['targetIQN']}
- try:
- LOG.debug(_('Introduced %(label)s as %(sr_ref)s.') % locals())
- return sr_ref
- except session.XenAPI.Failure, exc:
- LOG.exception(exc)
- raise StorageError(_('Unable to create Storage Repository'))
- else:
- return sr_ref[0]
-
-
def find_sr_from_vbd(session, vbd_ref):
"""Find the SR reference from the VBD reference."""
try:
@@ -188,18 +115,19 @@ def create_pbd(session, sr_ref, params):
def unplug_pbds(session, sr_ref):
- pbds = []
try:
pbds = session.call_xenapi("SR.get_PBDs", sr_ref)
except session.XenAPI.Failure, exc:
LOG.warn(_('Ignoring exception %(exc)s when getting PBDs'
- ' for %(sr_ref)s') % locals())
+ ' for %(sr_ref)s') % locals())
+ return
+
for pbd in pbds:
try:
session.call_xenapi("PBD.unplug", pbd)
except session.XenAPI.Failure, exc:
LOG.warn(_('Ignoring exception %(exc)s when unplugging'
- ' PBD %(pbd)s') % locals())
+ ' PBD %(pbd)s') % locals())
def introduce_vdi(session, sr_ref, vdi_uuid=None, target_lun=None):
@@ -257,24 +185,15 @@ def introduce_vdi(session, sr_ref, vdi_uuid=None, target_lun=None):
def purge_sr(session, sr_ref):
- try:
- sr_rec = session.call_xenapi("SR.get_record", sr_ref)
- vdi_refs = session.call_xenapi("SR.get_VDIs", sr_ref)
- except StorageError, ex:
- LOG.exception(ex)
- raise StorageError(_('Error finding vdis in SR %s') % sr_ref)
-
+ # Make sure no VBDs are referencing the SR VDIs
+ vdi_refs = session.call_xenapi("SR.get_VDIs", sr_ref)
for vdi_ref in vdi_refs:
- try:
- vbd_refs = session.call_xenapi("VDI.get_VBDs", vdi_ref)
- except StorageError, ex:
- LOG.exception(ex)
- raise StorageError(_('Unable to find vbd for vdi %s') %
- vdi_ref)
- if len(vbd_refs) > 0:
+ vbd_refs = session.call_xenapi("VDI.get_VBDs", vdi_ref)
+ if vbd_refs:
+ LOG.warn(_('Cannot purge SR with referenced VDIs'))
return
- forget_sr(session, sr_rec['uuid'])
+ forget_sr(session, sr_ref)
def get_device_number(mountpoint):
@@ -382,28 +301,3 @@ def _get_target_port(iscsi_string):
return iscsi_string[iscsi_string.find(':') + 1:]
elif iscsi_string is None or CONF.target_port:
return CONF.target_port
-
-
-def introduce_sr_unless_present(session, sr_uuid, label, params):
- LOG.debug(_("Introducing SR %s") % label)
- sr_ref = find_sr_by_uuid(session, sr_uuid)
- if sr_ref:
- LOG.debug(_('SR found in xapi database. No need to introduce'))
- return sr_ref
- sr_ref = introduce_sr(session, sr_uuid, label, params)
-
- if sr_ref is None:
- raise exception.NovaException(_('Could not introduce SR'))
- return sr_ref
-
-
-def forget_sr_if_present(session, sr_uuid):
- sr_ref = find_sr_by_uuid(session, sr_uuid)
- if sr_ref is None:
- LOG.debug(_('SR %s not found in the xapi database') % sr_uuid)
- return
- try:
- forget_sr(session, sr_uuid)
- except StorageError, exc:
- LOG.exception(exc)
- raise exception.NovaException(_('Could not forget SR'))
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index 0c8a9e1c7..88119e10d 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -1,6 +1,7 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
+# Copyright (c) 2013 Openstack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -59,59 +60,42 @@ class VolumeOps(object):
' instance %(instance_name)s') % locals())
def _connect_volume(self, connection_data, dev_number, instance_name,
- vm_ref, hotplug=True):
+ vm_ref, hotplug=True):
+ sr_uuid, sr_label, sr_params = volume_utils.parse_sr_info(
+ connection_data, 'Disk-for:%s' % instance_name)
- description = 'Disk-for:%s' % instance_name
- uuid, label, sr_params = volume_utils.parse_sr_info(connection_data,
- description)
-
- # Introduce SR
- try:
- sr_ref = volume_utils.introduce_sr_unless_present(
- self._session, uuid, label, sr_params)
- LOG.debug(_('Introduced %(label)s as %(sr_ref)s.') % locals())
- except self._session.XenAPI.Failure, exc:
- LOG.exception(exc)
- raise volume_utils.StorageError(
- _('Unable to introduce Storage Repository'))
-
- vdi_uuid = None
- target_lun = None
- if 'vdi_uuid' in connection_data:
- vdi_uuid = connection_data['vdi_uuid']
- elif 'target_lun' in connection_data:
- target_lun = connection_data['target_lun']
- else:
- vdi_uuid = None
-
- # Introduce VDI and attach VBD to VM
- try:
- vdi_ref = volume_utils.introduce_vdi(self._session, sr_ref,
- vdi_uuid, target_lun)
- except volume_utils.StorageError, exc:
- LOG.exception(exc)
- volume_utils.forget_sr_if_present(self._session, uuid)
- raise Exception(_('Unable to create VDI on SR %(sr_ref)s for'
- ' instance %(instance_name)s') % locals())
+ # Introduce SR if not already present
+ sr_ref = volume_utils.find_sr_by_uuid(self._session, sr_uuid)
+ if not sr_ref:
+ sr_ref = volume_utils.introduce_sr(
+ self._session, sr_uuid, sr_label, sr_params)
try:
+ # Introduce VDI
+ if 'vdi_uuid' in connection_data:
+ vdi_ref = volume_utils.introduce_vdi(
+ self._session, sr_ref,
+ vdi_uuid=connection_data['vdi_uuid'])
+ elif 'target_lun' in connection_data:
+ vdi_ref = volume_utils.introduce_vdi(
+ self._session, sr_ref,
+ target_lun=connection_data['target_lun'])
+ else:
+ # NOTE(sirp): This will introduce the first VDI in the SR
+ vdi_ref = volume_utils.introduce_vdi(self._session, sr_ref)
+
+ # Attach
vbd_ref = vm_utils.create_vbd(self._session, vm_ref, vdi_ref,
dev_number, bootable=False,
osvol=True)
- except self._session.XenAPI.Failure, exc:
- LOG.exception(exc)
- volume_utils.forget_sr_if_present(self._session, uuid)
- raise Exception(_('Unable to use SR %(sr_ref)s for'
- ' instance %(instance_name)s') % locals())
-
- if hotplug:
- try:
+
+ if hotplug:
self._session.call_xenapi("VBD.plug", vbd_ref)
- except self._session.XenAPI.Failure, exc:
- LOG.exception(exc)
- volume_utils.forget_sr_if_present(self._session, uuid)
- raise Exception(_('Unable to attach volume to instance %s')
- % instance_name)
+ except Exception:
+ # NOTE(sirp): Forgetting the SR will have the effect of cleaning up
+ # the VDI and VBD records, so no need to handle that explicitly.
+ volume_utils.forget_sr(self._session, sr_ref)
+ raise
def detach_volume(self, connection_info, instance_name, mountpoint):
"""Detach volume storage to VM instance."""
@@ -121,8 +105,15 @@ class VolumeOps(object):
device_number = volume_utils.get_device_number(mountpoint)
vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name)
- vbd_ref = vm_utils.find_vbd_by_number(
- self._session, vm_ref, device_number)
+ try:
+ vbd_ref = vm_utils.find_vbd_by_number(
+ self._session, vm_ref, device_number)
+ except volume_utils.StorageError:
+ # NOTE(sirp): If we don't find the VBD then it must have been
+ # detached previously.
+ LOG.warn(_('Skipping detach because VBD for %(instance_name)s was'
+ ' not found') % locals())
+ return
# Unplug VBD if we're NOT shutdown
unplug = not vm_utils._is_vm_shutdown(self._session, vm_ref)
diff --git a/nova/volume/cinder.py b/nova/volume/cinder.py
index 05918f83d..b58e63011 100644
--- a/nova/volume/cinder.py
+++ b/nova/volume/cinder.py
@@ -51,6 +51,10 @@ cinder_opts = [
cfg.BoolOpt('cinder_api_insecure',
default=False,
help='Allow to perform insecure SSL requests to cinder'),
+ cfg.BoolOpt('cinder_cross_az_attach',
+ default=True,
+ help='Allow attach between instance and volume in different '
+ 'availability zones.'),
]
CONF = cfg.CONF
@@ -195,7 +199,7 @@ class API(base.Base):
return rval
- def check_attach(self, context, volume):
+ def check_attach(self, context, volume, instance=None):
# TODO(vish): abstract status checking?
if volume['status'] != "available":
msg = _("status must be available")
@@ -203,6 +207,10 @@ class API(base.Base):
if volume['attach_status'] == "attached":
msg = _("already attached")
raise exception.InvalidVolume(reason=msg)
+ if instance and not CONF.cinder_cross_az_attach:
+ if instance['availability_zone'] != volume['availability_zone']:
+ msg = _("Instance and volume not in same availability_zone")
+ raise exception.InvalidVolume(reason=msg)
def check_detach(self, context, volume):
# TODO(vish): abstract status checking?
diff --git a/openstack-common.conf b/openstack-common.conf
index 29ed9d82f..fcff378b5 100644
--- a/openstack-common.conf
+++ b/openstack-common.conf
@@ -1,7 +1,7 @@
[DEFAULT]
# The list of modules to copy from openstack-common
-modules=cfg,cliutils,context,db,db.sqlalchemy,excutils,eventlet_backdoor,fileutils,gettextutils,importutils,iniparser,jsonutils,local,lockutils,log,network_utils,notifier,plugin,policy,rootwrap,setup,timeutils,rpc,uuidutils,install_venv_common,flakes
+modules=cfg,cliutils,context,db,db.sqlalchemy,excutils,eventlet_backdoor,fileutils,gettextutils,importutils,iniparser,jsonutils,local,lockutils,log,network_utils,notifier,plugin,policy,rootwrap,setup,timeutils,rpc,uuidutils,install_venv_common,flakes,version
# The base module to hold the copy of openstack.common
base=nova
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/kernel b/plugins/xenserver/xenapi/etc/xapi.d/plugins/kernel
index 9ce6902d7..32e253cde 100755
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/kernel
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/kernel
@@ -116,7 +116,7 @@ def _remove_file(filepath):
def remove_kernel_ramdisk(session, args):
- """Removes kernel and/or ramdisk from dom0's file system"""
+ """Removes kernel and/or ramdisk from dom0's file system."""
kernel_file = optional(args, 'kernel-file')
ramdisk_file = optional(args, 'ramdisk-file')
if kernel_file:
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration
index b9e9da2e2..4b6bf8811 100755
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration
@@ -25,7 +25,7 @@ configure_logging('migration')
def move_vhds_into_sr(session, instance_uuid, sr_path, uuid_stack):
- """Moves the VHDs from their copied location to the SR"""
+ """Moves the VHDs from their copied location to the SR."""
staging_path = "/images/instance%s" % instance_uuid
imported_vhds = utils.import_vhds(sr_path, staging_path, uuid_stack)
utils.cleanup_staging_area(staging_path)
@@ -47,7 +47,7 @@ def _rsync_vhds(instance_uuid, host, staging_path, user="root"):
def transfer_vhd(session, instance_uuid, host, vdi_uuid, sr_path, seq_num):
- """Rsyncs a VHD to an adjacent host"""
+ """Rsyncs a VHD to an adjacent host."""
staging_path = utils.make_staging_area(sr_path)
try:
utils.prepare_staging_area(
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost
index 4d2193908..0319af4d2 100755
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost
@@ -33,9 +33,9 @@ import subprocess
import tempfile
import time
+import pluginlib_nova as pluginlib
import XenAPI
import XenAPIPlugin
-import pluginlib_nova as pluginlib
pluginlib.configure_logging("xenhost")
diff --git a/run_tests.sh b/run_tests.sh
index be9b0fa73..5a76b514f 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -12,6 +12,7 @@ function usage {
echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)."
echo " -n, --no-recreate-db Don't recreate the test database."
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
+ echo " -u, --update Update the virtual environment with any newer package versions"
echo " -p, --pep8 Just run PEP8 and HACKING compliance check"
echo " -P, --no-pep8 Don't run static code checks"
echo " -c, --coverage Generate coverage report"
@@ -33,7 +34,6 @@ function usage {
function process_options {
i=1
while [ $i -le $# ]; do
- FOO=${!i}
case "${!i}" in
-h|--help) usage;;
-V|--virtual-env) always_venv=1; never_venv=0;;
@@ -42,6 +42,7 @@ function process_options {
-r|--recreate-db) recreate_db=1;;
-n|--no-recreate-db) recreate_db=0;;
-f|--force) force=1;;
+ -u|--update) update=1;;
-p|--pep8) just_pep8=1;;
-P|--no-pep8) no_pep8=1;;
-c|--coverage) coverage=1;;
@@ -57,8 +58,8 @@ function process_options {
(( i++ ))
tools_path=${!i}
;;
- -*) testropts="$testropts $1";;
- *) testrargs="$testrargs $1"
+ -*) testropts="$testropts ${!i}";;
+ *) testrargs="$testrargs ${!i}"
esac
(( i++ ))
done
@@ -80,6 +81,7 @@ just_pep8=0
no_pep8=0
coverage=0
recreate_db=1
+update=0
LANG=en_US.UTF-8
LANGUAGE=en_US:en
@@ -147,40 +149,7 @@ function copy_subunit_log {
function run_pep8 {
echo "Running PEP8 and HACKING compliance check..."
-
- # Files of interest
- # NOTE(lzyeval): Avoid selecting nova-api-paste.ini and nova.conf in nova/bin
- # when running on devstack.
- # NOTE(lzyeval): Avoid selecting *.pyc files to reduce pep8 check-up time
- # when running on devstack.
- srcfiles=`find nova -type f -name "*.py" ! -wholename "nova\/openstack*"`
- srcfiles+=" `find bin -type f ! -name "nova.conf*" ! -name "*api-paste.ini*" ! -name "*~"`"
- srcfiles+=" `find tools -type f -name "*.py"`"
- srcfiles+=" `find plugins -type f -name "*.py"`"
- srcfiles+=" `find smoketests -type f -name "*.py"`"
- srcfiles+=" setup.py"
-
- # Until all these issues get fixed, ignore.
- ignore='--ignore=E12,E711,E721,E712,N403,N404'
-
- # First run the hacking selftest, to make sure it's right
- echo "Running hacking.py self test"
- ${wrapper} python tools/hacking.py --doctest
-
- # Then actually run it
- echo "Running pep8"
- ${wrapper} python tools/hacking.py ${ignore} ${srcfiles}
-
- ${wrapper} bash tools/unused_imports.sh
- # NOTE(sdague): as of grizzly-2 these are passing however leaving the comment
- # in here in case we need to break it out when we get more of our hacking working
- # again.
- #
- # NOTE(sirp): Dom0 plugins are written for Python 2.4, meaning some HACKING
- # checks are too strict.
- # pep8onlyfiles=`find plugins -type f -name "*.py"`
- # pep8onlyfiles+=" `find plugins/xenserver/xenapi/etc/xapi.d/plugins/ -type f -perm +111`"
- # ${wrapper} pep8 ${ignore} ${pep8onlyfiles}
+ bash tools/run_pep8.sh
}
@@ -193,6 +162,10 @@ then
echo "Cleaning virtualenv..."
rm -rf ${venv}
fi
+ if [ $update -eq 1 ]; then
+ echo "Updating virtualenv..."
+ python tools/install_venv.py $installvenvopts
+ fi
if [ -e ${venv} ]; then
wrapper="${with_venv}"
else
diff --git a/setup.py b/setup.py
index 78c9062c2..54d62e409 100644
--- a/setup.py
+++ b/setup.py
@@ -18,12 +18,13 @@
import setuptools
from nova.openstack.common import setup as common_setup
-from nova import version
requires = common_setup.parse_requirements()
+project = 'nova'
-setuptools.setup(name='nova',
- version=version.canonical_version_string(),
+setuptools.setup(
+ name=project,
+ version=common_setup.get_version(project, '2013.1'),
description='cloud computing fabric controller',
author='OpenStack',
author_email='nova@lists.launchpad.net',
diff --git a/tools/hacking.py b/tools/hacking.py
index 42a644e7d..d5853d591 100755
--- a/tools/hacking.py
+++ b/tools/hacking.py
@@ -21,6 +21,7 @@
Built on top of pep8.py
"""
+import imp
import inspect
import logging
import os
@@ -45,7 +46,9 @@ logging.disable('LOG')
#N8xx git commit messages
#N9xx other
-IMPORT_EXCEPTIONS = ['sqlalchemy', 'migrate', 'nova.db.sqlalchemy.session']
+IMPORT_EXCEPTIONS = ['sqlalchemy', 'migrate', 'nova.db.sqlalchemy.session',
+ 'nova.openstack.common.log.logging',
+ 'nova.db.sqlalchemy.migration.versioning_api']
START_DOCSTRING_TRIPLE = ['u"""', 'r"""', '"""', "u'''", "r'''", "'''"]
END_DOCSTRING_TRIPLE = ['"""', "'''"]
VERBOSE_MISSING_IMPORT = os.getenv('HACKING_VERBOSE_MISSING_IMPORT', 'False')
@@ -150,111 +153,116 @@ def nova_except_format_assert(logical_line):
yield 1, "N202: assertRaises Exception too broad"
-def nova_one_import_per_line(logical_line):
- r"""Check for import format.
+modules_cache = dict((mod, True) for mod in tuple(sys.modules.keys())
+ + sys.builtin_module_names)
+
+RE_RELATIVE_IMPORT = re.compile('^from\s*[.]')
+
+
+def nova_import_rules(logical_line):
+ r"""Check for imports.
nova HACKING guide recommends one import per line:
Do not import more than one module per line
Examples:
- Okay: from nova.rpc.common import RemoteError
- N301: from nova.rpc.common import RemoteError, LOG
- """
- pos = logical_line.find(',')
- parts = logical_line.split()
- if (pos > -1 and (parts[0] == "import" or
- parts[0] == "from" and parts[2] == "import") and
- not is_import_exception(parts[1])):
- yield pos, "N301: one import per line"
+ Okay: from nova.compute import api
+ N301: from nova.compute import api, utils
-def nova_import_module_only(logical_line):
- r"""Check for import module only.
+ Imports should usually be on separate lines.
nova HACKING guide recommends importing only modules:
Do not import objects, only modules
+ Examples:
Okay: from os import path
Okay: import os.path
+ Okay: from nova.compute import rpcapi
N302: from os.path import dirname as dirname2
- N303 from os.path import *
- N304 import flakes
+ N303: from os.path import *
+ N304: from .compute import rpcapi
"""
- # N302 import only modules
- # N303 Invalid Import
- # N304 Relative Import
+ #NOTE(afazekas): An old style relative import example will not be able to
+ # pass the doctest, since the relativity depends on the file's locality
- # TODO(sdague) actually get these tests working
- # TODO(jogo) simplify this code
- def import_module_check(mod, parent=None, added=False):
- """Checks for relative, modules and invalid imports.
-
- If can't find module on first try, recursively check for relative
- imports.
+ def is_module_for_sure(mod, search_path=sys.path):
+ mod_path = mod.replace('.', os.sep)
+ try:
+ imp.find_module(mod_path, search_path)
+ except ImportError:
+ return False
+ return True
+
+ def is_module_for_sure_cached(mod):
+ if mod in modules_cache:
+ return modules_cache[mod]
+ res = is_module_for_sure(mod)
+ modules_cache[mod] = res
+ return res
+
+ def is_module(mod):
+ """Checks for non module imports.
+
+ If can't find module on first try, recursively check for the parent
+ modules.
When parsing 'from x import y,' x is the parent.
"""
- current_path = os.path.dirname(pep8.current_file)
- try:
- with warnings.catch_warnings():
- warnings.simplefilter('ignore', DeprecationWarning)
- valid = True
- if parent:
- parent_mod = __import__(parent, globals(), locals(),
- [mod], -1)
- valid = inspect.ismodule(getattr(parent_mod, mod))
- else:
- __import__(mod, globals(), locals(), [], -1)
- valid = inspect.ismodule(sys.modules[mod])
- if not valid:
- if added:
- sys.path.pop()
- added = False
- return logical_line.find(mod), ("N304: No "
- "relative imports. '%s' is a relative import"
- % logical_line)
- return logical_line.find(mod), ("N302: import only "
- "modules. '%s' does not import a module"
- % logical_line)
+ if is_module_for_sure_cached(mod):
+ return True
+ parts = mod.split('.')
+ for i in range(len(parts) - 1, 0, -1):
+ path = '.'.join(parts[0:i])
+ if is_module_for_sure_cached(path):
+ return False
+ _missingImport.add(mod)
+ return True
+
+ current_path = os.path.dirname(pep8.current_file)
+ current_mod = os.path.basename(pep8.current_file)
+ if current_mod[-3:] == ".py":
+ current_mod = current_mod[:-3]
- except (ImportError, NameError) as exc:
- if not added:
- added = True
- sys.path.append(current_path)
- return import_module_check(mod, parent, added)
- else:
- name = logical_line.split()[1]
- if name not in _missingImport:
- if VERBOSE_MISSING_IMPORT != 'False':
- print >> sys.stderr, ("ERROR: import '%s' in %s "
- "failed: %s" %
- (name, pep8.current_file, exc))
- _missingImport.add(name)
- added = False
- sys.path.pop()
- return
+ split_line = logical_line.split()
+ split_line_len = len(split_line)
+ if (split_line[0] in ('import', 'from') and split_line_len > 1 and
+ not is_import_exception(split_line[1])):
+ pos = logical_line.find(',')
+ if pos != -1:
+ if split_line[0] == 'from':
+ yield pos, "N301: one import per line"
+ return # ',' is not supported by the N302 checker yet
+ pos = logical_line.find('*')
+ if pos != -1:
+ yield pos, "N303: No wildcard (*) import."
+ return
+
+ if split_line_len in (2, 4, 6) and split_line[1] != "__future__":
+ if 'from' == split_line[0] and split_line_len > 3:
+ mod = '.'.join((split_line[1], split_line[3]))
+ if is_import_exception(mod):
+ return
+ if RE_RELATIVE_IMPORT.search(logical_line):
+ yield logical_line.find('.'), ("N304: No "
+ "relative imports. '%s' is a relative import"
+ % logical_line)
+ return
- except AttributeError:
- # Invalid import
- if "import *" in logical_line:
- # TODO(jogo): handle "from x import *, by checking all
- # "objects in x"
+ if not is_module(mod):
+ yield 0, ("N302: import only modules."
+ "'%s' does not import a module" % logical_line)
return
- return logical_line.find(mod), ("N303: Invalid import, "
- "%s" % mod)
- split_line = logical_line.split()
- if (", " not in logical_line and
- split_line[0] in ('import', 'from') and
- (len(split_line) in (2, 4, 6)) and
- split_line[1] != "__future__"):
- if is_import_exception(split_line[1]):
- return
- if "from" == split_line[0]:
- rval = import_module_check(split_line[3], parent=split_line[1])
- else:
- rval = import_module_check(split_line[1])
- if rval is not None:
- yield rval
+ #NOTE(afazekas): import searches first in the package
+ # The import keyword just imports modules
+ # The guestfs module now imports guestfs
+ mod = split_line[1]
+ if (current_mod != mod and
+ not is_module_for_sure_cached(mod) and
+ is_module_for_sure(mod, [current_path])):
+ yield 0, ("N304: No relative imports."
+ " '%s' is a relative import"
+ % logical_line)
#TODO(jogo): import template: N305
@@ -495,20 +503,34 @@ def nova_localization_strings(logical_line, tokens):
#TODO(jogo) Dict and list objects
+def nova_is_not(logical_line):
+ r"""Check localization in line.
+
+ Okay: if x is not y
+ N901: if not X is Y
+ N901: if not X.B is Y
+ """
+ split_line = logical_line.split()
+ if (len(split_line) == 5 and split_line[0] == 'if' and
+ split_line[1] == 'not' and split_line[3] == 'is'):
+ yield (logical_line.find('not'), "N901: Use the 'is not' "
+ "operator for when testing for unequal identities")
+
+
def nova_not_in(logical_line):
r"""Check localization in line.
Okay: if x not in y
Okay: if not (X in Y or X is Z)
Okay: if not (X in Y)
- N901: if not X in Y
- N901: if not X.B in Y
+ N902: if not X in Y
+ N902: if not X.B in Y
"""
split_line = logical_line.split()
if (len(split_line) == 5 and split_line[0] == 'if' and
split_line[1] == 'not' and split_line[3] == 'in' and not
split_line[2].startswith('(')):
- yield (logical_line.find('not'), "N901: Use the 'not in' "
+ yield (logical_line.find('not'), "N902: Use the 'not in' "
"operator for collection membership evaluation")
current_file = ""
diff --git a/tools/pip-requires b/tools/pip-requires
index 126f0125c..d7e48ff87 100644
--- a/tools/pip-requires
+++ b/tools/pip-requires
@@ -16,6 +16,7 @@ sqlalchemy-migrate>=0.7.2
netaddr
suds==0.4
paramiko
+pyasn1
Babel>=0.9.6
iso8601>=0.1.4
httplib2
diff --git a/tools/regression_tester.py b/tools/regression_tester.py
index 554260fbd..f21af61ba 100755
--- a/tools/regression_tester.py
+++ b/tools/regression_tester.py
@@ -10,8 +10,15 @@ import string
import subprocess
import sys
+gerrit_number = None
+
#TODO(jogo) use proper optParser
-gerrit_number = sys.argv[1]
+if len(sys.argv) == 2:
+ gerrit_number = sys.argv[1]
+else:
+ gerrit_number = None
+ print ("no gerrit review number specified, running on latest commit"
+ "on current branch.")
def run(cmd, fail_ok=False):
@@ -20,7 +27,7 @@ def run(cmd, fail_ok=False):
rval = subprocess.check_output(cmd, shell=True)
except subprocess.CalledProcessError:
if not fail_ok:
- print "the above command terminated with an error"
+ print "The command above terminated with an error."
sys.exit(1)
pass
return rval
@@ -28,8 +35,10 @@ def run(cmd, fail_ok=False):
test_works = False
-original_branch = run("git rev-parse --abbrev-ref HEAD")
-run("git review -d %s" % gerrit_number)
+if gerrit_number:
+ original_branch = run("git rev-parse --abbrev-ref HEAD")
+ run("git review -d %s" % gerrit_number)
+
# run new tests with old code
run("git checkout HEAD^ nova")
run("git checkout HEAD nova/tests")
@@ -41,27 +50,32 @@ test_list = []
for test in tests:
test_list.append(string.replace(test[0:-3], '/', '.'))
-# run new tests, expect them to fail
-expect_failure = run(("tox -epy27 %s 2>&1" % string.join(test_list)),
- fail_ok=True)
-if "FAILED (id=" in expect_failure:
- test_works = True
+if test_list == []:
+ test_works = False
+ expect_failure = ""
+else:
+ # run new tests, expect them to fail
+ expect_failure = run(("tox -epy27 %s 2>&1" % string.join(test_list)),
+ fail_ok=True)
+ if "FAILED (id=" in expect_failure:
+ test_works = True
# cleanup
run("git checkout HEAD nova")
-new_branch = run("git status | head -1 | cut -d ' ' -f 4")
-run("git checkout %s" % original_branch)
-run("git branch -D %s" % new_branch)
+if gerrit_number:
+ new_branch = run("git status | head -1 | cut -d ' ' -f 4")
+ run("git checkout %s" % original_branch)
+ run("git branch -D %s" % new_branch)
if test_works:
print expect_failure
- print ""
- print "*******************************"
- print "SUCCESS: test covers regression"
+ print ""
+ print "*******************************"
+ print "FOUND a regression test"
else:
print expect_failure
print ""
- print "***************************************"
- print "FAILURE: test does not cover regression"
+ print "*******************************"
+ print "NO regression test"
sys.exit(1)
diff --git a/tools/run_pep8.sh b/tools/run_pep8.sh
new file mode 100755
index 000000000..4e7212e08
--- /dev/null
+++ b/tools/run_pep8.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+# This is used by run_tests.sh and tox.ini
+python tools/hacking.py --doctest
+
+# Until all these issues get fixed, ignore.
+PEP8='python tools/hacking.py --ignore=E12,E711,E721,E712,N303,N403,N404'
+
+EXCLUDE='--exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*'
+EXCLUDE+=',*egg,build,./plugins/xenserver/networking/etc/xensource/scripts'
+EXCLUDE+=',./plugins/xenserver/xenapi/etc/xapi.d/plugins'
+${PEP8} ${EXCLUDE} .
+
+${PEP8} --filename=nova* bin
+
+SCRIPT_ROOT=$(echo $(dirname $(readlink -f "$0")) | sed s/\\/tools//)
+
+SCRIPTS_PATH=${SCRIPT_ROOT}/plugins/xenserver/networking/etc/xensource/scripts
+PYTHONPATH=${SCRIPTS_PATH} ${PEP8} ./plugins/xenserver/networking
+
+# NOTE(sirp): Also check Dom0 plugins w/o .py extension
+PLUGINS_PATH=${SCRIPT_ROOT}/plugins/xenserver/xenapi/etc/xapi.d/plugins
+PYTHONPATH=${PLUGINS_PATH} ${PEP8} ./plugins/xenserver/xenapi \
+ `find plugins/xenserver/xenapi/etc/xapi.d/plugins/ -type f -perm +111`
+
+! pyflakes nova/ | grep "imported but unused"
diff --git a/tools/unused_imports.sh b/tools/unused_imports.sh
deleted file mode 100755
index 0e0294517..000000000
--- a/tools/unused_imports.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/sh
-
-#snakefood sfood-checker detects even more unused imports
-! pyflakes nova/ | grep "imported but unused"
diff --git a/tox.ini b/tox.ini
index f54865601..a34315a7f 100644
--- a/tox.ini
+++ b/tox.ini
@@ -18,13 +18,7 @@ downloadcache = ~/cache/pip
deps=
pep8==1.3.3
pyflakes
-commands =
- python tools/hacking.py --doctest
- python tools/hacking.py --ignore=E12,E711,E721,E712,N403,N404 --show-source \
- --exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build .
- python tools/hacking.py --ignore=E12,E711,E721,E712,N403,N404 --show-source \
- --filename=nova* bin
- bash tools/unused_imports.sh
+commands = bash tools/run_pep8.sh
[testenv:pylint]
setenv = VIRTUAL_ENV={envdir}