summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.coveragerc2
-rw-r--r--HACKING.rst19
-rwxr-xr-xbin/nova-baremetal-manage4
-rwxr-xr-xbin/nova-compute23
-rwxr-xr-xbin/nova-manage16
-rwxr-xr-xbin/nova-novncproxy4
-rwxr-xr-xbin/nova-spicehtml5proxy4
-rw-r--r--doc/api_samples/NMN/multinic-add-fixed-ip-req.json5
-rw-r--r--doc/api_samples/NMN/multinic-add-fixed-ip-req.xml3
-rw-r--r--doc/api_samples/NMN/multinic-remove-fixed-ip-req.json5
-rw-r--r--doc/api_samples/NMN/multinic-remove-fixed-ip-req.xml3
-rw-r--r--doc/api_samples/NMN/server-post-req.json16
-rw-r--r--doc/api_samples/NMN/server-post-req.xml19
-rw-r--r--doc/api_samples/NMN/server-post-resp.json16
-rw-r--r--doc/api_samples/NMN/server-post-resp.xml6
-rw-r--r--doc/api_samples/OS-EXT-AZ/server-get-resp.json56
-rw-r--r--doc/api_samples/OS-EXT-AZ/server-get-resp.xml19
-rw-r--r--doc/api_samples/OS-EXT-AZ/server-post-req.json16
-rw-r--r--doc/api_samples/OS-EXT-AZ/server-post-req.xml6
-rw-r--r--doc/api_samples/OS-EXT-AZ/server-post-resp.json16
-rw-r--r--doc/api_samples/OS-EXT-AZ/server-post-resp.xml6
-rw-r--r--doc/api_samples/OS-EXT-AZ/servers-detail-resp.json58
-rw-r--r--doc/api_samples/OS-EXT-AZ/servers-detail-resp.xml21
-rw-r--r--doc/api_samples/OS-EXT-SRV-ATTR/server-get-resp.json (renamed from doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.json)0
-rw-r--r--doc/api_samples/OS-EXT-SRV-ATTR/server-get-resp.xml (renamed from doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.xml)0
-rw-r--r--doc/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.json (renamed from doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.json)0
-rw-r--r--doc/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.xml (renamed from doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.xml)0
-rw-r--r--doc/api_samples/OS-EXT-STS/server-get-resp.json57
-rw-r--r--doc/api_samples/OS-EXT-STS/server-get-resp.xml19
-rw-r--r--doc/api_samples/OS-EXT-STS/servers-list-resp.json18
-rw-r--r--doc/api_samples/OS-EXT-STS/servers-list-resp.xml7
-rw-r--r--doc/api_samples/all_extensions/extensions-get-resp.json16
-rw-r--r--doc/api_samples/all_extensions/extensions-get-resp.xml6
-rw-r--r--doc/api_samples/all_extensions/server-get-resp.json16
-rw-r--r--doc/api_samples/all_extensions/server-get-resp.xml6
-rw-r--r--doc/api_samples/all_extensions/servers-details-resp.json16
-rw-r--r--doc/api_samples/all_extensions/servers-details-resp.xml8
-rw-r--r--doc/api_samples/os-evacuate/server-evacuate-req.json7
-rw-r--r--doc/api_samples/os-evacuate/server-evacuate-req.xml5
-rw-r--r--doc/api_samples/os-evacuate/server-evacuate-resp.json3
-rw-r--r--doc/api_samples/os-evacuate/server-evacuate-resp.xml2
-rw-r--r--doc/api_samples/os-evacuate/server-post-req.json16
-rw-r--r--doc/api_samples/os-evacuate/server-post-req.xml19
-rw-r--r--doc/api_samples/os-evacuate/server-post-resp.json16
-rw-r--r--doc/api_samples/os-evacuate/server-post-resp.xml6
-rw-r--r--doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.json6
-rw-r--r--doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.xml5
-rw-r--r--doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.json6
-rw-r--r--doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.xml5
-rw-r--r--doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.json3
-rw-r--r--doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.xml2
-rw-r--r--doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.json6
-rw-r--r--doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.xml5
-rw-r--r--doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.json3
-rw-r--r--doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.xml2
-rw-r--r--doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.json3
-rw-r--r--doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.xml2
-rw-r--r--doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json7
-rw-r--r--doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.xml5
-rw-r--r--doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json9
-rw-r--r--doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.xml2
-rw-r--r--doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json8
-rw-r--r--doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.xml6
-rw-r--r--doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json8
-rw-r--r--doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.xml2
-rw-r--r--doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json9
-rw-r--r--doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.xml2
-rw-r--r--doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json11
-rw-r--r--doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.xml4
-rw-r--r--doc/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json10
-rw-r--r--doc/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.xml4
-rw-r--r--doc/api_samples/os-fping/fping-get-details-resp.json7
-rw-r--r--doc/api_samples/os-fping/fping-get-details-resp.xml6
-rw-r--r--doc/api_samples/os-fping/fping-get-resp.json9
-rw-r--r--doc/api_samples/os-fping/fping-get-resp.xml8
-rw-r--r--doc/api_samples/os-fping/server-post-req.json16
-rw-r--r--doc/api_samples/os-fping/server-post-req.xml19
-rw-r--r--doc/api_samples/os-fping/server-post-resp.json16
-rw-r--r--doc/api_samples/os-fping/server-post-resp.xml6
-rw-r--r--doc/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.json17
-rw-r--r--doc/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.xml16
-rw-r--r--doc/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.json17
-rw-r--r--doc/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.xml16
-rw-r--r--doc/api_samples/os-networks/network-add-req.json1
-rw-r--r--doc/api_samples/os-networks/network-add-req.xml1
-rw-r--r--doc/api_samples/os-networks/network-create-req.json6
-rw-r--r--doc/api_samples/os-networks/network-create-req.xml4
-rw-r--r--doc/api_samples/os-networks/network-create-resp.json32
-rw-r--r--doc/api_samples/os-networks/network-create-resp.xml31
-rw-r--r--doc/api_samples/os-networks/network-show-resp.json32
-rw-r--r--doc/api_samples/os-networks/network-show-resp.xml31
-rw-r--r--doc/api_samples/os-networks/networks-disassociate-req.json1
-rw-r--r--doc/api_samples/os-networks/networks-disassociate-req.xml1
-rw-r--r--doc/api_samples/os-networks/networks-list-resp.json64
-rw-r--r--doc/api_samples/os-networks/networks-list-resp.xml63
-rw-r--r--doc/source/conf.py7
-rw-r--r--doc/source/devref/filter_scheduler.rst7
-rw-r--r--etc/nova/nova.conf.sample292
-rw-r--r--etc/nova/policy.json6
-rw-r--r--etc/nova/rootwrap.d/compute.filters6
-rw-r--r--nova/api/ec2/cloud.py16
-rw-r--r--nova/api/metadata/handler.py2
-rw-r--r--nova/api/metadata/password.py18
-rw-r--r--nova/api/openstack/__init__.py2
-rw-r--r--nova/api/openstack/compute/contrib/aggregates.py2
-rw-r--r--nova/api/openstack/compute/contrib/availability_zone.py2
-rw-r--r--nova/api/openstack/compute/contrib/console_output.py4
-rw-r--r--nova/api/openstack/compute/contrib/evacuate.py98
-rw-r--r--nova/api/openstack/compute/contrib/extended_availability_zone.py106
-rw-r--r--nova/api/openstack/compute/contrib/flavor_access.py16
-rw-r--r--nova/api/openstack/compute/contrib/flavorextraspecs.py26
-rw-r--r--nova/api/openstack/compute/contrib/floating_ip_dns.py53
-rw-r--r--nova/api/openstack/compute/contrib/floating_ips.py41
-rw-r--r--nova/api/openstack/compute/contrib/floating_ips_bulk.py6
-rw-r--r--nova/api/openstack/compute/contrib/hosts.py5
-rw-r--r--nova/api/openstack/compute/contrib/hypervisors.py39
-rw-r--r--nova/api/openstack/compute/contrib/instance_usage_audit_log.py13
-rw-r--r--nova/api/openstack/compute/contrib/server_password.py5
-rw-r--r--nova/api/openstack/compute/contrib/server_start_stop.py10
-rw-r--r--nova/api/openstack/compute/contrib/simple_tenant_usage.py2
-rw-r--r--nova/api/openstack/compute/contrib/volumes.py8
-rw-r--r--nova/api/openstack/compute/image_metadata.py4
-rw-r--r--nova/api/openstack/compute/servers.py15
-rw-r--r--nova/api/openstack/extensions.py9
-rw-r--r--nova/api/openstack/wsgi.py2
-rw-r--r--nova/availability_zones.py3
-rw-r--r--nova/block_device.py22
-rw-r--r--nova/cells/manager.py104
-rw-r--r--nova/cells/messaging.py147
-rw-r--r--nova/cells/rpcapi.py63
-rw-r--r--nova/cells/utils.py51
-rw-r--r--nova/common/memorycache.py2
-rw-r--r--nova/compute/api.py213
-rw-r--r--nova/compute/cells_api.py130
-rw-r--r--nova/compute/instance_actions.py44
-rw-r--r--nova/compute/instance_types.py61
-rwxr-xr-x[-rw-r--r--]nova/compute/manager.py455
-rw-r--r--nova/compute/resource_tracker.py43
-rw-r--r--nova/compute/utils.py133
-rw-r--r--nova/compute/vm_mode.py2
-rw-r--r--nova/conductor/api.py140
-rw-r--r--nova/conductor/manager.py73
-rw-r--r--nova/conductor/rpcapi.py101
-rw-r--r--nova/config.py6
-rw-r--r--nova/context.py13
-rw-r--r--nova/crypto.py80
-rw-r--r--nova/db/api.py42
-rw-r--r--nova/db/sqlalchemy/api.py141
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/147_no_service_zones.py6
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/152_change_type_of_deleted_column.py3
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/153_instance_type_in_system_metadata.py49
-rw-r--r--nova/db/sqlalchemy/migration.py4
-rw-r--r--nova/db/sqlalchemy/models.py98
-rw-r--r--nova/db/sqlalchemy/utils.py117
-rw-r--r--nova/exception.py38
-rw-r--r--nova/image/glance.py13
-rw-r--r--nova/locale/en_US/LC_MESSAGES/nova.po288
-rw-r--r--nova/locale/nova.pot4581
-rw-r--r--nova/locale/tr_TR/LC_MESSAGES/nova.po9859
-rw-r--r--nova/manager.py10
-rw-r--r--nova/network/api.py127
-rw-r--r--nova/network/api_deprecated.py465
-rw-r--r--nova/network/floating_ips.py691
-rw-r--r--nova/network/l3.py31
-rw-r--r--nova/network/ldapdns.py3
-rw-r--r--nova/network/linux_net.py101
-rw-r--r--nova/network/manager.py722
-rw-r--r--nova/network/model.py25
-rw-r--r--nova/network/quantumv2/__init__.py5
-rw-r--r--nova/network/quantumv2/api.py47
-rw-r--r--nova/network/rpcapi.py21
-rw-r--r--nova/notifications.py1
-rw-r--r--nova/openstack/common/db/__init__.py (renamed from nova/virt/hyperv/ioutils.py)12
-rw-r--r--nova/openstack/common/db/sqlalchemy/__init__.py16
-rw-r--r--nova/openstack/common/db/sqlalchemy/models.py103
-rw-r--r--nova/openstack/common/db/sqlalchemy/session.py (renamed from nova/db/sqlalchemy/session.py)119
-rw-r--r--nova/openstack/common/db/sqlalchemy/utils.py (renamed from nova/common/sqlalchemyutils.py)8
-rw-r--r--nova/openstack/common/importutils.py10
-rw-r--r--nova/openstack/common/jsonutils.py36
-rw-r--r--nova/openstack/common/local.py11
-rw-r--r--nova/openstack/common/lockutils.py110
-rw-r--r--nova/openstack/common/rpc/__init__.py60
-rw-r--r--nova/openstack/common/rpc/amqp.py2
-rw-r--r--nova/openstack/common/rpc/common.py2
-rw-r--r--nova/openstack/common/rpc/impl_fake.py2
-rw-r--r--nova/openstack/common/rpc/impl_kombu.py25
-rw-r--r--nova/openstack/common/rpc/impl_qpid.py39
-rw-r--r--nova/openstack/common/rpc/impl_zmq.py237
-rw-r--r--nova/openstack/common/setup.py213
-rw-r--r--nova/openstack/common/version.py86
-rw-r--r--nova/policy.py7
-rw-r--r--nova/scheduler/driver.py26
-rw-r--r--nova/scheduler/filter_scheduler.py53
-rw-r--r--nova/scheduler/filters/affinity_filter.py42
-rw-r--r--nova/scheduler/filters/availability_zone_filter.py1
-rw-r--r--nova/scheduler/filters/json_filter.py2
-rw-r--r--nova/scheduler/filters/trusted_filter.py2
-rw-r--r--nova/scheduler/host_manager.py7
-rw-r--r--nova/scheduler/manager.py79
-rw-r--r--nova/service.py3
-rw-r--r--nova/servicegroup/api.py3
-rw-r--r--nova/servicegroup/drivers/db.py1
-rw-r--r--nova/servicegroup/drivers/zk.py157
-rw-r--r--nova/test.py7
-rw-r--r--nova/tests/api/ec2/test_cloud.py26
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_console_output.py16
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_evacuate.py156
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_extended_availability_zone.py118
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_flavor_access.py30
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_flavors_extra_specs.py65
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_floating_ip_dns.py104
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_floating_ips.py40
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_hypervisors.py4
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_instance_usage_audit_log.py16
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_quota_classes.py1
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_quotas.py1
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_security_groups.py3
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_server_diagnostics.py1
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_server_password.py5
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_server_start_stop.py21
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py7
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_snapshots.py2
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_virtual_interfaces.py1
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_volumes.py4
-rw-r--r--nova/tests/api/openstack/compute/test_extensions.py2
-rw-r--r--nova/tests/api/openstack/compute/test_limits.py3
-rw-r--r--nova/tests/api/openstack/compute/test_servers.py6
-rw-r--r--nova/tests/api/openstack/compute/test_versions.py1
-rw-r--r--nova/tests/api/openstack/fakes.py8
-rw-r--r--nova/tests/api/openstack/test_common.py8
-rw-r--r--nova/tests/baremetal/db/test_bm_interface.py3
-rw-r--r--nova/tests/baremetal/db/test_bm_pxe_ip.py5
-rw-r--r--nova/tests/baremetal/test_nova_baremetal_manage.py4
-rw-r--r--nova/tests/baremetal/test_pxe.py26
-rw-r--r--nova/tests/baremetal/test_utils.py3
-rw-r--r--nova/tests/baremetal/test_virtual_power_driver.py358
-rw-r--r--nova/tests/cells/test_cells_manager.py215
-rw-r--r--nova/tests/cells/test_cells_messaging.py290
-rw-r--r--nova/tests/cells/test_cells_rpcapi.py80
-rw-r--r--nova/tests/compute/test_compute.py837
-rw-r--r--nova/tests/compute/test_compute_cells.py7
-rw-r--r--nova/tests/compute/test_compute_utils.py64
-rw-r--r--nova/tests/compute/test_host_api.py215
-rw-r--r--nova/tests/compute/test_resource_tracker.py26
-rw-r--r--nova/tests/conductor/test_conductor.py165
-rw-r--r--nova/tests/conf_fixture.py4
-rw-r--r--nova/tests/fake_instance_actions.py30
-rw-r--r--nova/tests/fake_libvirt_utils.py4
-rw-r--r--nova/tests/fake_network.py5
-rw-r--r--nova/tests/fake_policy.py12
-rw-r--r--nova/tests/fake_volume.py2
-rw-r--r--nova/tests/fakeguestfs.py14
-rw-r--r--nova/tests/fakelibvirt.py8
-rw-r--r--nova/tests/hyperv/README.rst83
-rw-r--r--nova/tests/hyperv/__init__.py16
-rw-r--r--nova/tests/hyperv/basetestcase.py105
-rw-r--r--nova/tests/hyperv/db_fakes.py86
-rw-r--r--nova/tests/hyperv/fake.py46
-rw-r--r--nova/tests/hyperv/hypervutils.py262
-rw-r--r--nova/tests/hyperv/mockproxy.py272
-rw-r--r--nova/tests/hyperv/stubs/README.rst2
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_os.p.gzbin670 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_subprocess.p.gzbin2768 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_time.p.gzbin257 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_uuid.p.gzbin660 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_os.p.gzbin702 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_subprocess.p.gzbin571 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_time.p.gzbin277 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_uuid.p.gzbin652 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_wmi.p.gzbin23220 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_wmi.p.gzbin28631 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_boot_from_volume_subprocess.p.gzbin385 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_boot_from_volume_time.p.gzbin260 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_boot_from_volume_uuid.p.gzbin578 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_boot_from_volume_wmi.p.gzbin20274 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_os.p.gzbin725 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_subprocess.p.gzbin426 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_time.p.gzbin257 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_uuid.p.gzbin660 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_wmi.p.gzbin31833 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_os.p.gzbin726 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_time.p.gzbin250 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_uuid.p.gzbin621 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_os.p.gzbin744 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_time.p.gzbin267 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_uuid.p.gzbin640 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_wmi.p.gzbin25238 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_wmi.p.gzbin29404 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_nova.utils.p.gzbin278 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_nova.virt.configdrive.p.gzbin603 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_os.p.gzbin724 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_shutil.p.gzbin300 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_subprocess.p.gzbin2806 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_time.p.gzbin441 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_uuid.p.gzbin756 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_nova.utils.p.gzbin308 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_nova.virt.configdrive.p.gzbin634 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_os.p.gzbin753 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_shutil.p.gzbin331 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_subprocess.p.gzbin605 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_time.p.gzbin458 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_uuid.p.gzbin743 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_wmi.p.gzbin21875 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_wmi.p.gzbin29013 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_nova.utils.p.gzbin280 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_nova.virt.configdrive.p.gzbin607 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_shutil.p.gzbin303 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_subprocess.p.gzbin2810 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_time.p.gzbin443 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_uuid.p.gzbin673 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_wmi.p.gzbin19822 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_nova.utils.p.gzbin272 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_nova.virt.configdrive.p.gzbin598 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_os.p.gzbin750 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_shutil.p.gzbin294 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_time.p.gzbin416 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_uuid.p.gzbin710 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_wmi.p.gzbin23205 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_nova.utils.p.gzbin277 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_nova.virt.configdrive.p.gzbin606 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_os.p.gzbin720 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_shutil.p.gzbin300 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_subprocess.p.gzbin2806 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_time.p.gzbin441 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_uuid.p.gzbin756 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_wmi.p.gzbin29674 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_ctypes.p.gzbin929 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_multiprocessing.p.gzbin266 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_os.p.gzbin423 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_shutil.p.gzbin309 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_wmi.p.gzbin1465 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_os.p.gzbin415 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_shutil.p.gzbin301 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_wmi.p.gzbin1075 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_nova.utils.p.gzbin273 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_nova.virt.configdrive.p.gzbin600 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_os.p.gzbin718 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_shutil.p.gzbin295 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_time.p.gzbin417 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_uuid.p.gzbin705 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_wmi.p.gzbin22566 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_detail_shutil.p.gzbin277 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_detail_wmi.p.gzbin7893 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_shutil.p.gzbin301 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_wmi.p.gzbin1071 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_nova.utils.p.gzbin279 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_nova.virt.configdrive.p.gzbin605 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_os.p.gzbin601 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_shutil.p.gzbin301 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_time.p.gzbin424 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_uuid.p.gzbin716 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_nova.utils.p.gzbin299 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_nova.virt.configdrive.p.gzbin625 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_os.p.gzbin621 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_shutil.p.gzbin321 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_time.p.gzbin441 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_uuid.p.gzbin732 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_wmi.p.gzbin23107 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_wmi.p.gzbin25350 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_nova.utils.p.gzbin285 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_nova.virt.configdrive.p.gzbin612 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_os.p.gzbin731 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_shutil.p.gzbin307 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_time.p.gzbin429 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_uuid.p.gzbin720 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_wmi.p.gzbin22768 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_nova.utils.p.gzbin269 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_nova.virt.configdrive.p.gzbin597 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_os.p.gzbin715 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_shutil.p.gzbin292 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_time.p.gzbin414 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_uuid.p.gzbin708 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_wmi.p.gzbin22557 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_nova.utils.p.gzbin294 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_nova.virt.configdrive.p.gzbin620 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_os.p.gzbin738 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_shutil.p.gzbin316 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_time.p.gzbin437 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_uuid.p.gzbin730 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_wmi.p.gzbin22712 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_nova.utils.p.gzbin273 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_nova.virt.configdrive.p.gzbin599 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_os.p.gzbin718 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_shutil.p.gzbin296 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_time.p.gzbin418 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_uuid.p.gzbin710 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_wmi.p.gzbin22466 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_nova.utils.p.gzbin287 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_nova.virt.configdrive.p.gzbin617 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_os.p.gzbin735 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_shutil.p.gzbin311 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_time.p.gzbin434 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_uuid.p.gzbin726 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_wmi.p.gzbin22540 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_nova.utils.p.gzbin273 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_nova.virt.configdrive.p.gzbin600 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_os.p.gzbin717 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_shutil.p.gzbin295 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_time.p.gzbin418 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_uuid.p.gzbin707 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_wmi.p.gzbin22780 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_nova.utils.p.gzbin343 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_os.p.gzbin536 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_shutil.p.gzbin315 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_time.p.gzbin273 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_uuid.p.gzbin386 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_wmi.p.gzbin1642 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_nova.utils.p.gzbin345 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_shutil.p.gzbin318 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_uuid.p.gzbin388 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_wmi.p.gzbin1073 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_nova.utils.p.gzbin270 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_nova.virt.configdrive.p.gzbin597 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_os.p.gzbin716 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_shutil.p.gzbin293 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_time.p.gzbin416 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_uuid.p.gzbin709 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_wmi.p.gzbin22819 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_nova.utils.p.gzbin287 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_nova.virt.configdrive.p.gzbin613 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_os.p.gzbin732 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_shutil.p.gzbin309 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_time.p.gzbin432 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_uuid.p.gzbin723 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_wmi.p.gzbin22530 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_nova.utils.p.gzbin270 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_nova.virt.configdrive.p.gzbin594 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_os.p.gzbin715 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_shutil.p.gzbin293 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_time.p.gzbin418 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_uuid.p.gzbin707 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_wmi.p.gzbin23017 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_nova.utils.p.gzbin272 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_nova.virt.configdrive.p.gzbin600 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_os.p.gzbin1012 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_shutil.p.gzbin433 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_time.p.gzbin419 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_uuid.p.gzbin750 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_nova.utils.p.gzbin292 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_nova.virt.configdrive.p.gzbin619 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_os.p.gzbin1034 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_shutil.p.gzbin458 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_time.p.gzbin439 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_uuid.p.gzbin773 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_wmi.p.gzbin23801 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_wmi.p.gzbin23695 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_nova.utils.p.gzbin289 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_nova.virt.configdrive.p.gzbin828 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_os.p.gzbin890 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_shutil.p.gzbin311 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_time.p.gzbin432 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_uuid.p.gzbin811 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_wmi.p.gzbin30294 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_nova.utils.p.gzbin493 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_nova.virt.configdrive.p.gzbin822 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_os.p.gzbin913 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_shutil.p.gzbin305 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_time.p.gzbin427 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_uuid.p.gzbin804 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_wmi.p.gzbin29760 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_nova.utils.p.gzbin280 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_nova.virt.configdrive.p.gzbin607 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_os.p.gzbin725 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_shutil.p.gzbin302 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_time.p.gzbin424 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_uuid.p.gzbin716 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_wmi.p.gzbin23822 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_nova.utils.p.gzbin285 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_nova.virt.configdrive.p.gzbin616 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_os.p.gzbin731 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_shutil.p.gzbin308 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_time.p.gzbin430 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_uuid.p.gzbin721 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_wmi.p.gzbin23138 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_nova.utils.p.gzbin283 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_nova.virt.configdrive.p.gzbin608 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_os.p.gzbin605 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_shutil.p.gzbin305 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_time.p.gzbin426 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_uuid.p.gzbin717 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_wmi.p.gzbin23617 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.utils.p.gzbin291 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.virt.configdrive.p.gzbin618 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_os.p.gzbin734 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_shutil.p.gzbin313 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_time.p.gzbin430 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_uuid.p.gzbin725 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_wmi.p.gzbin21340 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_nova.utils.p.gzbin291 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_nova.virt.configdrive.p.gzbin616 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_os.p.gzbin734 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_shutil.p.gzbin312 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_time.p.gzbin433 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_uuid.p.gzbin729 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_wmi.p.gzbin22722 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_nova.utils.p.gzbin271 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_nova.virt.configdrive.p.gzbin598 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_os.p.gzbin717 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_shutil.p.gzbin294 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_time.p.gzbin418 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_uuid.p.gzbin710 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_wmi.p.gzbin22741 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_nova.utils.p.gzbin288 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_nova.virt.configdrive.p.gzbin614 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_os.p.gzbin732 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_shutil.p.gzbin310 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_time.p.gzbin432 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_uuid.p.gzbin724 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_wmi.p.gzbin22524 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_nova.utils.p.gzbin272 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_nova.virt.configdrive.p.gzbin599 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_os.p.gzbin716 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_shutil.p.gzbin294 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_time.p.gzbin416 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_uuid.p.gzbin707 -> 0 bytes
-rw-r--r--nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_wmi.p.gzbin22903 -> 0 bytes
-rw-r--r--nova/tests/image/test_glance.py1
-rw-r--r--nova/tests/integrated/api/client.py2
-rw-r--r--nova/tests/integrated/api_samples/NMN/multinic-add-fixed-ip-req.json.tpl5
-rw-r--r--nova/tests/integrated/api_samples/NMN/multinic-add-fixed-ip-req.xml.tpl3
-rw-r--r--nova/tests/integrated/api_samples/NMN/multinic-remove-fixed-ip-req.json.tpl5
-rw-r--r--nova/tests/integrated/api_samples/NMN/multinic-remove-fixed-ip-req.xml.tpl3
-rw-r--r--nova/tests/integrated/api_samples/NMN/server-post-req.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/NMN/server-post-req.xml.tpl19
-rw-r--r--nova/tests/integrated/api_samples/NMN/server-post-resp.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/NMN/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-AZ/server-get-resp.json.tpl56
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-AZ/server-get-resp.xml.tpl19
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-AZ/server-post-req.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-AZ/server-post-req.xml.tpl19
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-AZ/server-post-resp.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-AZ/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.json.tpl58
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.xml.tpl20
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/server-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.json.tpl)0
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/server-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.xml.tpl)0
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.json.tpl)0
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.xml.tpl)0
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-STS/server-get-resp.json.tpl57
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-STS/server-get-resp.xml.tpl19
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-STS/servers-list-resp.json.tpl18
-rw-r--r--nova/tests/integrated/api_samples/OS-EXT-STS/servers-list-resp.xml.tpl7
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl6
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/server-get-resp.json.tpl2
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/server-get-resp.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/servers-details-resp.json.tpl2
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/servers-details-resp.xml.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-evacuate/server-evacuate-req.json.tpl7
-rw-r--r--nova/tests/integrated/api_samples/os-evacuate/server-evacuate-req.xml.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-evacuate/server-evacuate-resp.json.tpl3
-rw-r--r--nova/tests/integrated/api_samples/os-evacuate/server-evacuate-resp.xml.tpl1
-rw-r--r--nova/tests/integrated/api_samples/os-evacuate/server-post-req.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/os-evacuate/server-post-req.xml.tpl19
-rw-r--r--nova/tests/integrated/api_samples/os-evacuate/server-post-resp.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/os-evacuate/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.json.tpl6
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.xml.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.json.tpl6
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.xml.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.json.tpl3
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.json.tpl6
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.xml.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.json.tpl3
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.json.tpl3
-rw-r--r--nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json.tpl7
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.xml.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json.tpl9
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json.tpl8
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.xml.tpl6
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json.tpl8
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json.tpl9
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json.tpl11
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.xml.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json.tpl10
-rw-r--r--nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.xml.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-fping/fping-get-details-resp.json.tpl7
-rw-r--r--nova/tests/integrated/api_samples/os-fping/fping-get-details-resp.xml.tpl6
-rw-r--r--nova/tests/integrated/api_samples/os-fping/fping-get-resp.json.tpl9
-rw-r--r--nova/tests/integrated/api_samples/os-fping/fping-get-resp.xml.tpl8
-rw-r--r--nova/tests/integrated/api_samples/os-fping/server-post-req.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/os-fping/server-post-req.xml.tpl19
-rw-r--r--nova/tests/integrated/api_samples/os-fping/server-post-resp.json.tpl16
-rw-r--r--nova/tests/integrated/api_samples/os-fping/server-post-resp.xml.tpl6
-rw-r--r--nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.json.tpl17
-rw-r--r--nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.xml.tpl16
-rw-r--r--nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.json.tpl17
-rw-r--r--nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.xml.tpl16
-rw-r--r--nova/tests/integrated/api_samples/os-networks/network-add-req.json.tpl1
-rw-r--r--nova/tests/integrated/api_samples/os-networks/network-add-req.xml.tpl1
-rw-r--r--nova/tests/integrated/api_samples/os-networks/network-create-req.json.tpl6
-rw-r--r--nova/tests/integrated/api_samples/os-networks/network-create-req.xml.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-networks/network-create-resp.json.tpl32
-rw-r--r--nova/tests/integrated/api_samples/os-networks/network-create-resp.xml.tpl30
-rw-r--r--nova/tests/integrated/api_samples/os-networks/network-show-resp.json.tpl33
-rw-r--r--nova/tests/integrated/api_samples/os-networks/network-show-resp.xml.tpl31
-rw-r--r--nova/tests/integrated/api_samples/os-networks/networks-disassociate-req.json.tpl1
-rw-r--r--nova/tests/integrated/api_samples/os-networks/networks-disassociate-req.xml.tpl1
-rw-r--r--nova/tests/integrated/api_samples/os-networks/networks-list-resp.json.tpl64
-rw-r--r--nova/tests/integrated/api_samples/os-networks/networks-list-resp.xml.tpl63
-rw-r--r--nova/tests/integrated/integrated_helpers.py2
-rw-r--r--nova/tests/integrated/test_api_samples.py369
-rw-r--r--nova/tests/integrated/test_multiprocess_api.py2
-rw-r--r--nova/tests/integrated/test_servers.py41
-rw-r--r--nova/tests/network/test_api.py9
-rw-r--r--nova/tests/network/test_linux_net.py74
-rw-r--r--nova/tests/network/test_manager.py134
-rw-r--r--nova/tests/network/test_quantumv2.py9
-rw-r--r--nova/tests/network/test_rpcapi.py28
-rw-r--r--nova/tests/scheduler/test_chance_scheduler.py4
-rw-r--r--nova/tests/scheduler/test_filter_scheduler.py221
-rw-r--r--nova/tests/scheduler/test_host_filters.py42
-rw-r--r--nova/tests/scheduler/test_host_manager.py13
-rw-r--r--nova/tests/scheduler/test_scheduler.py90
-rw-r--r--nova/tests/servicegroup/test_zk_driver.py65
-rw-r--r--nova/tests/test_api.py7
-rw-r--r--nova/tests/test_availability_zones.py1
-rw-r--r--nova/tests/test_bdm.py1
-rw-r--r--nova/tests/test_block_device.py35
-rw-r--r--nova/tests/test_context.py19
-rw-r--r--nova/tests/test_crypto.py28
-rw-r--r--nova/tests/test_db_api.py94
-rw-r--r--nova/tests/test_hypervapi.py905
-rw-r--r--nova/tests/test_imagebackend.py9
-rw-r--r--nova/tests/test_imagecache.py1
-rw-r--r--nova/tests/test_instance_types.py55
-rw-r--r--nova/tests/test_iptables_network.py19
-rw-r--r--nova/tests/test_libvirt.py552
-rw-r--r--nova/tests/test_libvirt_blockinfo.py427
-rw-r--r--nova/tests/test_libvirt_vif.py344
-rw-r--r--nova/tests/test_libvirt_volume.py366
-rw-r--r--nova/tests/test_metadata.py57
-rw-r--r--nova/tests/test_migration_utils.py126
-rw-r--r--nova/tests/test_migrations.py609
-rw-r--r--nova/tests/test_notifications.py2
-rw-r--r--nova/tests/test_periodic_tasks.py1
-rw-r--r--nova/tests/test_powervm.py12
-rw-r--r--nova/tests/test_sqlalchemy.py66
-rw-r--r--nova/tests/test_utils.py185
-rw-r--r--nova/tests/test_versions.py36
-rw-r--r--nova/tests/test_virt_disk.py4
-rw-r--r--nova/tests/test_virt_disk_vfs_localfs.py11
-rw-r--r--nova/tests/test_virt_drivers.py22
-rw-r--r--nova/tests/test_vmwareapi.py9
-rw-r--r--nova/tests/test_xenapi.py85
-rw-r--r--nova/tests/utils.py29
-rw-r--r--nova/tests/virt/xenapi/imageupload/__init__.py0
-rw-r--r--nova/tests/virt/xenapi/imageupload/test_glance.py74
-rw-r--r--nova/tests/virt/xenapi/test_vm_utils.py2
-rw-r--r--nova/tests/xenapi/test_vm_utils.py30
-rw-r--r--nova/utils.py163
-rw-r--r--nova/version.py22
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/api.py2
-rw-r--r--nova/virt/baremetal/db/sqlalchemy/session.py6
-rwxr-xr-x[-rw-r--r--]nova/virt/baremetal/driver.py7
-rw-r--r--nova/virt/baremetal/pxe.py13
-rw-r--r--nova/virt/baremetal/virtual_power_driver.py219
-rw-r--r--nova/virt/baremetal/virtual_power_driver_settings.py61
-rw-r--r--nova/virt/baremetal/volume_driver.py2
-rw-r--r--nova/virt/disk/api.py26
-rw-r--r--nova/virt/disk/mount/api.py5
-rwxr-xr-x[-rw-r--r--]nova/virt/driver.py3
-rwxr-xr-x[-rw-r--r--]nova/virt/fake.py6
-rw-r--r--nova/virt/hyperv/__init__.py16
-rw-r--r--nova/virt/hyperv/baseops.py69
-rw-r--r--nova/virt/hyperv/basevolumeutils.py74
-rw-r--r--nova/virt/hyperv/constants.py9
-rwxr-xr-x[-rw-r--r--]nova/virt/hyperv/driver.py98
-rw-r--r--nova/virt/hyperv/hostops.py164
-rw-r--r--nova/virt/hyperv/hostutils.py74
-rw-r--r--nova/virt/hyperv/livemigrationops.py114
-rw-r--r--nova/virt/hyperv/livemigrationutils.py115
-rw-r--r--nova/virt/hyperv/networkutils.py62
-rw-r--r--nova/virt/hyperv/pathutils.py67
-rw-r--r--nova/virt/hyperv/snapshotops.py160
-rw-r--r--nova/virt/hyperv/vhdutils.py72
-rw-r--r--nova/virt/hyperv/vif.py71
-rw-r--r--nova/virt/hyperv/vmops.py512
-rw-r--r--nova/virt/hyperv/vmutils.py511
-rw-r--r--nova/virt/hyperv/volumeops.py277
-rw-r--r--nova/virt/hyperv/volumeutils.py80
-rw-r--r--nova/virt/hyperv/volumeutilsV2.py70
-rw-r--r--nova/virt/hyperv/volumeutilsv2.py75
-rw-r--r--nova/virt/libvirt/blockinfo.py416
-rw-r--r--nova/virt/libvirt/config.py3
-rw-r--r--nova/virt/libvirt/designer.py32
-rwxr-xr-x[-rw-r--r--]nova/virt/libvirt/driver.py702
-rw-r--r--nova/virt/libvirt/firewall.py38
-rwxr-xr-x[-rw-r--r--]nova/virt/libvirt/imagebackend.py88
-rw-r--r--nova/virt/libvirt/imagecache.py6
-rw-r--r--nova/virt/libvirt/snapshots.py89
-rwxr-xr-x[-rw-r--r--]nova/virt/libvirt/utils.py47
-rw-r--r--nova/virt/libvirt/vif.py456
-rw-r--r--nova/virt/libvirt/volume.py172
-rwxr-xr-x[-rw-r--r--]nova/virt/libvirt/volume_nfs.py84
-rw-r--r--nova/virt/powervm/blockdev.py7
-rwxr-xr-x[-rw-r--r--]nova/virt/powervm/driver.py18
-rw-r--r--nova/virt/powervm/operator.py56
-rw-r--r--nova/virt/vmwareapi/__init__.py1
-rwxr-xr-x[-rw-r--r--]nova/virt/vmwareapi/driver.py138
-rw-r--r--nova/virt/vmwareapi/host.py67
-rw-r--r--nova/virt/vmwareapi/network_util.py36
-rw-r--r--nova/virt/vmwareapi/vif.py15
-rw-r--r--nova/virt/vmwareapi/vm_util.py58
-rw-r--r--nova/virt/vmwareapi/vmops.py80
-rw-r--r--nova/virt/vmwareapi/volume_util.py25
-rw-r--r--nova/virt/vmwareapi/volumeops.py20
-rw-r--r--nova/virt/xenapi/agent.py15
-rwxr-xr-x[-rw-r--r--]nova/virt/xenapi/driver.py5
-rw-r--r--nova/virt/xenapi/fake.py14
-rw-r--r--nova/virt/xenapi/imageupload/__init__.py0
-rw-r--r--nova/virt/xenapi/imageupload/glance.py54
-rw-r--r--nova/virt/xenapi/vm_utils.py122
-rw-r--r--nova/virt/xenapi/vmops.py88
-rw-r--r--nova/virt/xenapi/volumeops.py73
-rw-r--r--nova/volume/cinder.py4
-rw-r--r--openstack-common.conf2
-rwxr-xr-xrun_tests.sh96
-rw-r--r--setup.py7
-rw-r--r--smoketests/base.py3
-rw-r--r--smoketests/public_network_smoketests.py1
-rw-r--r--tools/flakes.py19
-rwxr-xr-xtools/hacking.py18
-rw-r--r--tools/install_venv.py225
-rw-r--r--tools/install_venv_common.py232
-rw-r--r--tools/pip-requires3
-rwxr-xr-xtools/regression_tester.py67
-rw-r--r--tools/test-requires1
-rwxr-xr-xtools/unused_imports.sh4
-rwxr-xr-xtools/with_venv.sh9
-rw-r--r--tox.ini7
737 files changed, 30470 insertions, 9567 deletions
diff --git a/.coveragerc b/.coveragerc
index 902a94349..c89967dfe 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -1,7 +1,7 @@
[run]
branch = True
source = nova
-omit = nova/tests/*,DynamicallyCompiledCheetahTemplate.py
+omit = nova/tests/*,nova/openstack/*,DynamicallyCompiledCheetahTemplate.py
[report]
ignore-errors = True
diff --git a/HACKING.rst b/HACKING.rst
index 35493e55b..213495832 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -28,6 +28,25 @@ General
mylist = Foo().list() # OKAY, does not shadow built-in
+- Use the "is not" operator when testing for unequal identities. Example::
+
+ if not X is Y: # BAD, intended behavior is ambiguous
+ pass
+
+ if X is not Y: # OKAY, intuitive
+ pass
+
+- Use the "not in" operator for evaluating membership in a collection. Example::
+
+ if not X in Y: # BAD, intended behavior is ambiguous
+ pass
+
+ if X not in Y: # OKAY, intuitive
+ pass
+
+ if not (X in Y or X in Z): # OKAY, still better than all those 'not's
+ pass
+
Imports
-------
diff --git a/bin/nova-baremetal-manage b/bin/nova-baremetal-manage
index 34a98caf2..6c27a7b1a 100755
--- a/bin/nova-baremetal-manage
+++ b/bin/nova-baremetal-manage
@@ -187,9 +187,7 @@ def main():
sys.exit(2)
if CONF.category.name == "version":
- print(_("%(version)s (%(vcs)s)") %
- {'version': version.version_string(),
- 'vcs': version.version_string_with_vcs()})
+ print(version.version_string_with_package())
sys.exit(0)
if CONF.category.name == "bash-completion":
diff --git a/bin/nova-compute b/bin/nova-compute
index 8826015d4..814147d66 100755
--- a/bin/nova-compute
+++ b/bin/nova-compute
@@ -31,6 +31,7 @@ else:
import os
import sys
+import traceback
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
@@ -42,6 +43,8 @@ if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
from nova import config
+import nova.db.api
+from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import service
@@ -49,11 +52,31 @@ from nova import utils
CONF = cfg.CONF
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
+CONF.import_opt('use_local', 'nova.conductor.api', group='conductor')
+LOG = logging.getLogger('nova.compute')
+
+
+def block_db_access():
+ class NoDB(object):
+ def __getattr__(self, attr):
+ return self
+
+ def __call__(self, *args, **kwargs):
+ stacktrace = "".join(traceback.format_stack())
+ LOG.error('No db access allowed in nova-compute: %s' % stacktrace)
+ raise exception.DBError('No db access allowed in nova-compute')
+
+ nova.db.api.IMPL = NoDB()
+
if __name__ == '__main__':
config.parse_args(sys.argv)
logging.setup('nova')
utils.monkey_patch()
+
+ if not CONF.conductor.use_local:
+ block_db_access()
+
server = service.Service.create(binary='nova-compute',
topic=CONF.compute_topic,
db_allowed=False)
diff --git a/bin/nova-manage b/bin/nova-manage
index 90d191eca..6e8aa8a9c 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -80,12 +80,14 @@ from nova.db import migration
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import cliutils
+from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova.openstack.common import timeutils
from nova import quota
from nova.scheduler import rpcapi as scheduler_rpcapi
+from nova import servicegroup
from nova import utils
from nova import version
@@ -98,7 +100,7 @@ CONF.import_opt('multi_host', 'nova.network.manager')
CONF.import_opt('network_size', 'nova.network.manager')
CONF.import_opt('vlan_start', 'nova.network.manager')
CONF.import_opt('vpn_start', 'nova.network.manager')
-CONF.import_opt('default_floating_pool', 'nova.network.manager')
+CONF.import_opt('default_floating_pool', 'nova.network.floating_ips')
CONF.import_opt('public_interface', 'nova.network.linux_net')
QUOTAS = quota.QUOTAS
@@ -623,6 +625,7 @@ class ServiceCommands(object):
"""
Show a list of all running services. Filter by host & service name.
"""
+ servicegroup_api = servicegroup.API()
ctxt = context.get_admin_context()
now = timeutils.utcnow()
services = db.service_get_all(ctxt)
@@ -640,8 +643,7 @@ class ServiceCommands(object):
_('State'),
_('Updated_At'))
for svc in services:
- delta = now - (svc['updated_at'] or svc['created_at'])
- alive = abs(utils.total_seconds(delta)) <= CONF.service_down_time
+ alive = servicegroup_api.service_is_up(svc)
art = (alive and ":-)") or "XXX"
active = 'enabled'
if svc['disabled']:
@@ -831,7 +833,7 @@ class InstanceTypeCommands(object):
except exception.InstanceTypeNotFound:
print _("Valid instance type name is required")
sys.exit(1)
- except exception.DBError, e:
+ except db_session.DBError, e:
print _("DB Error: %s") % e
sys.exit(2)
except Exception:
@@ -848,7 +850,7 @@ class InstanceTypeCommands(object):
inst_types = instance_types.get_all_types()
else:
inst_types = instance_types.get_instance_type_by_name(name)
- except exception.DBError, e:
+ except db_session.DBError, e:
_db_error(e)
if isinstance(inst_types.values()[0], dict):
for k, v in inst_types.iteritems():
@@ -879,7 +881,7 @@ class InstanceTypeCommands(object):
ext_spec)
print _("Key %(key)s set to %(value)s on instance"
" type %(name)s") % locals()
- except exception.DBError, e:
+ except db_session.DBError, e:
_db_error(e)
@args('--name', dest='name', metavar='<name>',
@@ -902,7 +904,7 @@ class InstanceTypeCommands(object):
key)
print _("Key %(key)s on instance type %(name)s unset") % locals()
- except exception.DBError, e:
+ except db_session.DBError, e:
_db_error(e)
diff --git a/bin/nova-novncproxy b/bin/nova-novncproxy
index 657f97b48..617e2411d 100755
--- a/bin/nova-novncproxy
+++ b/bin/nova-novncproxy
@@ -16,10 +16,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-'''
+"""
Websocket proxy that is compatible with OpenStack Nova
noVNC consoles. Leverages websockify.py by Joel Martin
-'''
+"""
import os
import sys
diff --git a/bin/nova-spicehtml5proxy b/bin/nova-spicehtml5proxy
index 17f8cb7c2..405092942 100755
--- a/bin/nova-spicehtml5proxy
+++ b/bin/nova-spicehtml5proxy
@@ -16,10 +16,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-'''
+"""
Websocket proxy that is compatible with OpenStack Nova
SPICE HTML5 consoles. Leverages websockify.py by Joel Martin
-'''
+"""
import os
import sys
diff --git a/doc/api_samples/NMN/multinic-add-fixed-ip-req.json b/doc/api_samples/NMN/multinic-add-fixed-ip-req.json
new file mode 100644
index 000000000..860308403
--- /dev/null
+++ b/doc/api_samples/NMN/multinic-add-fixed-ip-req.json
@@ -0,0 +1,5 @@
+{
+ "addFixedIp":{
+ "networkId": 1
+ }
+}
diff --git a/doc/api_samples/NMN/multinic-add-fixed-ip-req.xml b/doc/api_samples/NMN/multinic-add-fixed-ip-req.xml
new file mode 100644
index 000000000..4c72212a7
--- /dev/null
+++ b/doc/api_samples/NMN/multinic-add-fixed-ip-req.xml
@@ -0,0 +1,3 @@
+<addFixedIp>
+ <networkId>1</networkId>
+</addFixedIp>
diff --git a/doc/api_samples/NMN/multinic-remove-fixed-ip-req.json b/doc/api_samples/NMN/multinic-remove-fixed-ip-req.json
new file mode 100644
index 000000000..0756d039e
--- /dev/null
+++ b/doc/api_samples/NMN/multinic-remove-fixed-ip-req.json
@@ -0,0 +1,5 @@
+{
+ "removeFixedIp":{
+ "address": "10.0.0.2"
+ }
+}
diff --git a/doc/api_samples/NMN/multinic-remove-fixed-ip-req.xml b/doc/api_samples/NMN/multinic-remove-fixed-ip-req.xml
new file mode 100644
index 000000000..4e6ccdd87
--- /dev/null
+++ b/doc/api_samples/NMN/multinic-remove-fixed-ip-req.xml
@@ -0,0 +1,3 @@
+<removeFixedIp>
+ <address>10.0.0.2</address>
+</removeFixedIp>
diff --git a/doc/api_samples/NMN/server-post-req.json b/doc/api_samples/NMN/server-post-req.json
new file mode 100644
index 000000000..d88eb4122
--- /dev/null
+++ b/doc/api_samples/NMN/server-post-req.json
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/NMN/server-post-req.xml b/doc/api_samples/NMN/server-post-req.xml
new file mode 100644
index 000000000..0a3c8bb53
--- /dev/null
+++ b/doc/api_samples/NMN/server-post-req.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" flavorRef="http://openstack.example.com/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/NMN/server-post-resp.json b/doc/api_samples/NMN/server-post-resp.json
new file mode 100644
index 000000000..09d9fb612
--- /dev/null
+++ b/doc/api_samples/NMN/server-post-resp.json
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "xrDLoBeMD28B",
+ "id": "3f69b6bd-00a8-4636-96ee-650093624304",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/3f69b6bd-00a8-4636-96ee-650093624304",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/3f69b6bd-00a8-4636-96ee-650093624304",
+ "rel": "bookmark"
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/NMN/server-post-resp.xml b/doc/api_samples/NMN/server-post-resp.xml
new file mode 100644
index 000000000..7f84ac03d
--- /dev/null
+++ b/doc/api_samples/NMN/server-post-resp.xml
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="6ed1d112-6c33-4c8b-9780-e2f978bf5ffd" adminPass="uF9wWxBh3mWL">
+ <metadata/>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/6ed1d112-6c33-4c8b-9780-e2f978bf5ffd" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/6ed1d112-6c33-4c8b-9780-e2f978bf5ffd" rel="bookmark"/>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-AZ/server-get-resp.json b/doc/api_samples/OS-EXT-AZ/server-get-resp.json
new file mode 100644
index 000000000..a7cf031ef
--- /dev/null
+++ b/doc/api_samples/OS-EXT-AZ/server-get-resp.json
@@ -0,0 +1,56 @@
+{
+ "server": {
+ "OS-EXT-AZ:availability_zone": null,
+ "OS-EXT-AZ:host_availability_zone": "nova",
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.0.3",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2013-01-30T13:38:47Z",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "d38ea49a033b0efaf80c165de63f4805c886dfb94dc0fe731227eccb",
+ "id": "fb7babfd-e1a1-4add-90e6-3558180983c7",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/fb7babfd-e1a1-4add-90e6-3558180983c7",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/fb7babfd-e1a1-4add-90e6-3558180983c7",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "2013-01-30T13:38:49Z",
+ "user_id": "fake"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-AZ/server-get-resp.xml b/doc/api_samples/OS-EXT-AZ/server-get-resp.xml
new file mode 100644
index 000000000..7a6edf057
--- /dev/null
+++ b/doc/api_samples/OS-EXT-AZ/server-get-resp.xml
@@ -0,0 +1,19 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:OS-EXT-AZ="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="2013-01-30T14:29:20Z" hostId="471e52951e3182954c5a93489dafc3fc38a9ef3e0b62d26dc740460c" name="new-server-test" created="2013-01-30T14:29:19Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="26ea8424-758d-483a-addc-9a5905afc9e6" OS-EXT-AZ:host_availability_zone="nova" OS-EXT-AZ:availability_zone="None">
+ <image id="70a599e0-31e7-49b7-b260-868f441e862b">
+ <atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="http://openstack.example.com/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="192.168.0.3"/>
+ </network>
+ </addresses>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/26ea8424-758d-483a-addc-9a5905afc9e6" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/26ea8424-758d-483a-addc-9a5905afc9e6" rel="bookmark"/>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-AZ/server-post-req.json b/doc/api_samples/OS-EXT-AZ/server-post-req.json
new file mode 100644
index 000000000..d88eb4122
--- /dev/null
+++ b/doc/api_samples/OS-EXT-AZ/server-post-req.json
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-AZ/server-post-req.xml b/doc/api_samples/OS-EXT-AZ/server-post-req.xml
new file mode 100644
index 000000000..68f0933c7
--- /dev/null
+++ b/doc/api_samples/OS-EXT-AZ/server-post-req.xml
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="5bbcc3c4-1da2-4437-a48a-66f15b1b13f9" adminPass="MVk5HPrazHcG">
+ <metadata/>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/5bbcc3c4-1da2-4437-a48a-66f15b1b13f9" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/5bbcc3c4-1da2-4437-a48a-66f15b1b13f9" rel="bookmark"/>
+</server>
diff --git a/doc/api_samples/OS-EXT-AZ/server-post-resp.json b/doc/api_samples/OS-EXT-AZ/server-post-resp.json
new file mode 100644
index 000000000..76a723f87
--- /dev/null
+++ b/doc/api_samples/OS-EXT-AZ/server-post-resp.json
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "MgXXGVx9ctdF",
+ "id": "75a88708-cecb-459c-8c3d-b7c93e67edbe",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/75a88708-cecb-459c-8c3d-b7c93e67edbe",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/75a88708-cecb-459c-8c3d-b7c93e67edbe",
+ "rel": "bookmark"
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-AZ/server-post-resp.xml b/doc/api_samples/OS-EXT-AZ/server-post-resp.xml
new file mode 100644
index 000000000..d8b1f0837
--- /dev/null
+++ b/doc/api_samples/OS-EXT-AZ/server-post-resp.xml
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="727a3e47-b047-4d55-b37a-ddae4641df0a" adminPass="L34AhxSuya3R">
+ <metadata/>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/727a3e47-b047-4d55-b37a-ddae4641df0a" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/727a3e47-b047-4d55-b37a-ddae4641df0a" rel="bookmark"/>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-AZ/servers-detail-resp.json b/doc/api_samples/OS-EXT-AZ/servers-detail-resp.json
new file mode 100644
index 000000000..5fab02ac0
--- /dev/null
+++ b/doc/api_samples/OS-EXT-AZ/servers-detail-resp.json
@@ -0,0 +1,58 @@
+{
+ "servers": [
+ {
+ "OS-EXT-AZ:availability_zone": null,
+ "OS-EXT-AZ:host_availability_zone": "nova",
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.0.3",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2013-01-30T13:26:51Z",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "60c988a84401fa15888a32833e5848e9caa99a45778310ba7b363165",
+ "id": "3dbf5b00-dabc-41ff-b6ab-4409568fae9d",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/3dbf5b00-dabc-41ff-b6ab-4409568fae9d",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/3dbf5b00-dabc-41ff-b6ab-4409568fae9d",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "2013-01-30T13:26:52Z",
+ "user_id": "fake"
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-AZ/servers-detail-resp.xml b/doc/api_samples/OS-EXT-AZ/servers-detail-resp.xml
new file mode 100644
index 000000000..4f1f311a8
--- /dev/null
+++ b/doc/api_samples/OS-EXT-AZ/servers-detail-resp.xml
@@ -0,0 +1,21 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers xmlns:OS-EXT-AZ="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server status="ACTIVE" updated="2013-01-30T14:29:20Z" hostId="85adf7d0492dedf0a7e3dc44ef7d16186b768ca3df33c4d608e630d9" name="new-server-test" created="2013-01-30T14:29:19Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="a668c72d-2bac-4806-a297-c7c11d97e3b3" OS-EXT-AZ:host_availability_zone="nova" OS-EXT-AZ:availability_zone="None">
+ <image id="70a599e0-31e7-49b7-b260-868f441e862b">
+ <atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="http://openstack.example.com/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="192.168.0.3"/>
+ </network>
+ </addresses>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/a668c72d-2bac-4806-a297-c7c11d97e3b3" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/a668c72d-2bac-4806-a297-c7c11d97e3b3" rel="bookmark"/>
+ </server>
+</servers> \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.json b/doc/api_samples/OS-EXT-SRV-ATTR/server-get-resp.json
index ad3bcab5d..ad3bcab5d 100644
--- a/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.json
+++ b/doc/api_samples/OS-EXT-SRV-ATTR/server-get-resp.json
diff --git a/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.xml b/doc/api_samples/OS-EXT-SRV-ATTR/server-get-resp.xml
index 4b42c3586..4b42c3586 100644
--- a/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.xml
+++ b/doc/api_samples/OS-EXT-SRV-ATTR/server-get-resp.xml
diff --git a/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.json b/doc/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.json
index db3de77f4..db3de77f4 100644
--- a/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.json
+++ b/doc/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.json
diff --git a/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.xml b/doc/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.xml
index 8179a3bf9..8179a3bf9 100644
--- a/doc/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.xml
+++ b/doc/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.xml
diff --git a/doc/api_samples/OS-EXT-STS/server-get-resp.json b/doc/api_samples/OS-EXT-STS/server-get-resp.json
new file mode 100644
index 000000000..763426bb1
--- /dev/null
+++ b/doc/api_samples/OS-EXT-STS/server-get-resp.json
@@ -0,0 +1,57 @@
+{
+ "server": {
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.0.3",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2013-02-07T19:35:09Z",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "570eff4776ab310707d11d181037337197086998a8b3305c90bf87c8",
+ "id": "ecb5e433-fa75-4db2-af3d-a29ae8618edc",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/ecb5e433-fa75-4db2-af3d-a29ae8618edc",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/ecb5e433-fa75-4db2-af3d-a29ae8618edc",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "2013-02-07T19:35:10Z",
+ "user_id": "fake"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-STS/server-get-resp.xml b/doc/api_samples/OS-EXT-STS/server-get-resp.xml
new file mode 100644
index 000000000..53363a224
--- /dev/null
+++ b/doc/api_samples/OS-EXT-STS/server-get-resp.xml
@@ -0,0 +1,19 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="2013-02-07T19:35:10Z" hostId="372afb648339fb6f22faa0b75fdd8834e2382fe02b352af8d7ee0b84" name="new-server-test" created="2013-02-07T19:35:09Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="68647408-85a7-4d9b-85e7-7f1e238983ad" OS-EXT-STS:vm_state="active" OS-EXT-STS:task_state="None" OS-EXT-STS:power_state="1">
+ <image id="70a599e0-31e7-49b7-b260-868f441e862b">
+ <atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="http://openstack.example.com/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="192.168.0.3"/>
+ </network>
+ </addresses>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/68647408-85a7-4d9b-85e7-7f1e238983ad" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/68647408-85a7-4d9b-85e7-7f1e238983ad" rel="bookmark"/>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-STS/servers-list-resp.json b/doc/api_samples/OS-EXT-STS/servers-list-resp.json
deleted file mode 100644
index d0309cc1f..000000000
--- a/doc/api_samples/OS-EXT-STS/servers-list-resp.json
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "servers": [
- {
- "id": "a5dd5b16-552c-441c-8a03-f19c4da9e6f5",
- "links": [
- {
- "href": "http://openstack.example.com/v2/openstack/servers/a5dd5b16-552c-441c-8a03-f19c4da9e6f5",
- "rel": "self"
- },
- {
- "href": "http://openstack.example.com/openstack/servers/a5dd5b16-552c-441c-8a03-f19c4da9e6f5",
- "rel": "bookmark"
- }
- ],
- "name": "new-server-test"
- }
- ]
-} \ No newline at end of file
diff --git a/doc/api_samples/OS-EXT-STS/servers-list-resp.xml b/doc/api_samples/OS-EXT-STS/servers-list-resp.xml
deleted file mode 100644
index f1c3cabf4..000000000
--- a/doc/api_samples/OS-EXT-STS/servers-list-resp.xml
+++ /dev/null
@@ -1,7 +0,0 @@
-<?xml version='1.0' encoding='UTF-8'?>
-<servers xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
- <server name="new-server-test" id="7128d3b9-1993-402c-91ca-ed59a0193ffa">
- <atom:link href="http://openstack.example.com/v2/openstack/servers/7128d3b9-1993-402c-91ca-ed59a0193ffa" rel="self"/>
- <atom:link href="http://openstack.example.com/openstack/servers/7128d3b9-1993-402c-91ca-ed59a0193ffa" rel="bookmark"/>
- </server>
-</servers> \ No newline at end of file
diff --git a/doc/api_samples/all_extensions/extensions-get-resp.json b/doc/api_samples/all_extensions/extensions-get-resp.json
index 604ad6763..439d1af18 100644
--- a/doc/api_samples/all_extensions/extensions-get-resp.json
+++ b/doc/api_samples/all_extensions/extensions-get-resp.json
@@ -17,6 +17,14 @@
"updated": "2011-09-27T00:00:00+00:00"
},
{
+ "alias": "OS-EXT-AZ",
+ "description": "Extended Server Attributes support.",
+ "links": [],
+ "name": "ExtendedAvailabilityZone",
+ "namespace": "http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2",
+ "updated": "2013-01-30T00:00:00+00:00"
+ },
+ {
"alias": "OS-EXT-SRV-ATTR",
"description": "Extended Server Attributes support.",
"links": [],
@@ -177,6 +185,14 @@
"updated": "2011-09-01T00:00:00+00:00"
},
{
+ "alias": "os-evacuate",
+ "description": "Enables server evacuation",
+ "links": [],
+ "name": "Evacuate",
+ "namespace": "http://docs.openstack.org/compute/ext/evacuate/api/v2",
+ "updated": "2012-12-05T00:00:00+00:00"
+ },
+ {
"alias": "os-fixed-ips",
"description": "Fixed IPs support.",
"links": [],
diff --git a/doc/api_samples/all_extensions/extensions-get-resp.xml b/doc/api_samples/all_extensions/extensions-get-resp.xml
index d7f483745..71f79dc3a 100644
--- a/doc/api_samples/all_extensions/extensions-get-resp.xml
+++ b/doc/api_samples/all_extensions/extensions-get-resp.xml
@@ -6,6 +6,9 @@
<extension alias="OS-DCF" updated="2011-09-27T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" name="DiskConfig">
<description>Disk Management Extension.</description>
</extension>
+ <extension alias="OS-EXT-AZ" updated="2013-01-30T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" name="ExtendedAvailabilityZone">
+ <description>Extended Server Attributes support.</description>
+ </extension>
<extension alias="OS-EXT-SRV-ATTR" updated="2011-11-03T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" name="ExtendedServerAttributes">
<description>Extended Server Attributes support.</description>
</extension>
@@ -81,6 +84,9 @@
<extension alias="os-deferred-delete" updated="2011-09-01T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/deferred-delete/api/v1.1" name="DeferredDelete">
<description>Instance deferred delete.</description>
</extension>
+ <extension alias="os-evacuate" updated="2012-12-05T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/evacuate/api/v2" name="Evacuate">
+ <description>Enables server evacuation</description>
+ </extension>
<extension alias="os-fixed-ips" updated="2012-10-18T13:25:27-06:00" namespace="http://docs.openstack.org/compute/ext/fixed_ips/api/v2" name="FixedIPs">
<description>Fixed IPs support.</description>
</extension>
diff --git a/doc/api_samples/all_extensions/server-get-resp.json b/doc/api_samples/all_extensions/server-get-resp.json
index a174bd779..56feee209 100644
--- a/doc/api_samples/all_extensions/server-get-resp.json
+++ b/doc/api_samples/all_extensions/server-get-resp.json
@@ -1,7 +1,9 @@
{
"server": {
"OS-DCF:diskConfig": "AUTO",
- "OS-EXT-SRV-ATTR:host": "543330fc7412414094e79c867798cefd",
+ "OS-EXT-AZ:availability_zone": null,
+ "OS-EXT-AZ:host_availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:host": "9373c31dbfe6422d9a9997c5f42a8789",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"OS-EXT-STS:power_state": 1,
@@ -18,7 +20,7 @@
]
},
"config_drive": "",
- "created": "2012-11-15T19:28:30Z",
+ "created": "2013-01-30T14:03:57Z",
"flavor": {
"id": "1",
"links": [
@@ -28,8 +30,8 @@
}
]
},
- "hostId": "edc4f072b6ca46a2d95c717401aa9835a204d3e4e6b148a7faba9ab0",
- "id": "05c070bf-1c34-4d99-901c-0f97a7239b86",
+ "hostId": "fc3a98f0b240ff341eb60ce8c0ae7412970d218381c5a827cfd398ee",
+ "id": "61608b15-33ea-412f-b9eb-78c6a347ec7b",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
@@ -42,11 +44,11 @@
"key_name": null,
"links": [
{
- "href": "http://openstack.example.com/v2/openstack/servers/05c070bf-1c34-4d99-901c-0f97a7239b86",
+ "href": "http://openstack.example.com/v2/openstack/servers/61608b15-33ea-412f-b9eb-78c6a347ec7b",
"rel": "self"
},
{
- "href": "http://openstack.example.com/openstack/servers/05c070bf-1c34-4d99-901c-0f97a7239b86",
+ "href": "http://openstack.example.com/openstack/servers/61608b15-33ea-412f-b9eb-78c6a347ec7b",
"rel": "bookmark"
}
],
@@ -62,7 +64,7 @@
],
"status": "ACTIVE",
"tenant_id": "openstack",
- "updated": "2012-11-15T19:28:31Z",
+ "updated": "2013-01-30T14:04:01Z",
"user_id": "fake"
}
} \ No newline at end of file
diff --git a/doc/api_samples/all_extensions/server-get-resp.xml b/doc/api_samples/all_extensions/server-get-resp.xml
index 8751a79ca..45e873147 100644
--- a/doc/api_samples/all_extensions/server-get-resp.xml
+++ b/doc/api_samples/all_extensions/server-get-resp.xml
@@ -1,5 +1,5 @@
<?xml version='1.0' encoding='UTF-8'?>
-<server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="2012-11-15T19:28:35Z" hostId="71b7ec711488460249e7b30d505a59e474454e58d379dbddb3655fa3" name="new-server-test" created="2012-11-15T19:28:35Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="72ecf76b-789e-4bc9-9807-e8bb4afe4f74" key_name="None" config_drive="" OS-EXT-SRV-ATTR:vm_state="active" OS-EXT-SRV-ATTR:task_state="None" OS-EXT-SRV-ATTR:power_state="1" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="748c8668e4324a82949d0e7f7e3003e2" OS-EXT-SRV-ATTR:hypervisor_hostname="fake-mini" OS-DCF:diskConfig="AUTO">
+<server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-AZ="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="2013-01-30T14:35:43Z" hostId="1bd51b03d4cc4d191f24e50b7a439bc6f67154fb955c147f8d0fcd09" name="new-server-test" created="2013-01-30T14:35:42Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="072a5fff-8026-4e43-874d-3ed6068884e4" key_name="None" config_drive="" OS-EXT-SRV-ATTR:vm_state="active" OS-EXT-SRV-ATTR:task_state="None" OS-EXT-SRV-ATTR:power_state="1" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="c86d3d0e86c94eac8e87791740ca11f1" OS-EXT-SRV-ATTR:hypervisor_hostname="fake-mini" OS-EXT-AZ:host_availability_zone="nova" OS-EXT-AZ:availability_zone="None" OS-DCF:diskConfig="AUTO">
<image id="70a599e0-31e7-49b7-b260-868f441e862b">
<atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
</image>
@@ -14,8 +14,8 @@
<ip version="4" addr="192.168.0.3"/>
</network>
</addresses>
- <atom:link href="http://openstack.example.com/v2/openstack/servers/72ecf76b-789e-4bc9-9807-e8bb4afe4f74" rel="self"/>
- <atom:link href="http://openstack.example.com/openstack/servers/72ecf76b-789e-4bc9-9807-e8bb4afe4f74" rel="bookmark"/>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/072a5fff-8026-4e43-874d-3ed6068884e4" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/072a5fff-8026-4e43-874d-3ed6068884e4" rel="bookmark"/>
<security_groups>
<security_group name="default"/>
</security_groups>
diff --git a/doc/api_samples/all_extensions/servers-details-resp.json b/doc/api_samples/all_extensions/servers-details-resp.json
index dd0b8ab05..ed5f4d204 100644
--- a/doc/api_samples/all_extensions/servers-details-resp.json
+++ b/doc/api_samples/all_extensions/servers-details-resp.json
@@ -2,7 +2,9 @@
"servers": [
{
"OS-DCF:diskConfig": "AUTO",
- "OS-EXT-SRV-ATTR:host": "f2df66e47d1f427cbd106cf9058360cc",
+ "OS-EXT-AZ:availability_zone": null,
+ "OS-EXT-AZ:host_availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:host": "94d2ccc30d73475ab987661158405463",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"OS-EXT-STS:power_state": 1,
@@ -19,7 +21,7 @@
]
},
"config_drive": "",
- "created": "2012-11-15T19:28:29Z",
+ "created": "2013-01-30T14:03:44Z",
"flavor": {
"id": "1",
"links": [
@@ -29,8 +31,8 @@
}
]
},
- "hostId": "7eedbc35a14388e24ec12917b1eb0bd5dc969619a0e367591d55d9ef",
- "id": "7d7b3dfc-3423-446f-b4b0-7fba038bf8b2",
+ "hostId": "f74e355ecde489405dfc0a1a48f2a85a5e2564e2ac6633d2b3e1b525",
+ "id": "033cc72d-708b-473b-ae8e-41064ea3fa21",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
@@ -43,11 +45,11 @@
"key_name": null,
"links": [
{
- "href": "http://openstack.example.com/v2/openstack/servers/7d7b3dfc-3423-446f-b4b0-7fba038bf8b2",
+ "href": "http://openstack.example.com/v2/openstack/servers/033cc72d-708b-473b-ae8e-41064ea3fa21",
"rel": "self"
},
{
- "href": "http://openstack.example.com/openstack/servers/7d7b3dfc-3423-446f-b4b0-7fba038bf8b2",
+ "href": "http://openstack.example.com/openstack/servers/033cc72d-708b-473b-ae8e-41064ea3fa21",
"rel": "bookmark"
}
],
@@ -63,7 +65,7 @@
],
"status": "ACTIVE",
"tenant_id": "openstack",
- "updated": "2012-11-15T19:28:30Z",
+ "updated": "2013-01-30T14:03:50Z",
"user_id": "fake"
}
]
diff --git a/doc/api_samples/all_extensions/servers-details-resp.xml b/doc/api_samples/all_extensions/servers-details-resp.xml
index bbef848d9..47e452ba1 100644
--- a/doc/api_samples/all_extensions/servers-details-resp.xml
+++ b/doc/api_samples/all_extensions/servers-details-resp.xml
@@ -1,6 +1,6 @@
<?xml version='1.0' encoding='UTF-8'?>
-<servers xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
- <server status="ACTIVE" updated="2012-11-15T19:28:34Z" hostId="aa999c3533a60f2569f2fb142139cdc522b43f9d4153d80bc29bc6ad" name="new-server-test" created="2012-11-15T19:28:34Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="3e89bf3c-4b6b-41c3-aafd-f0dabf5a1172" key_name="None" config_drive="" OS-EXT-SRV-ATTR:vm_state="active" OS-EXT-SRV-ATTR:task_state="None" OS-EXT-SRV-ATTR:power_state="1" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="b82eb08d8d4042a99cdd2bfcc749e057" OS-EXT-SRV-ATTR:hypervisor_hostname="fake-mini" OS-DCF:diskConfig="AUTO">
+<servers xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-AZ="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server status="ACTIVE" updated="2013-01-30T14:36:58Z" hostId="46d42af8fc0d50d4334ef6077b595a85291d2f5682ba8e95c37e69c3" name="new-server-test" created="2013-01-30T14:36:56Z" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="03665c18-c1f1-4eb9-83a8-da00a2c1d552" key_name="None" config_drive="" OS-EXT-SRV-ATTR:vm_state="active" OS-EXT-SRV-ATTR:task_state="None" OS-EXT-SRV-ATTR:power_state="1" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="d868eae67451474180a6193c24cb88c5" OS-EXT-SRV-ATTR:hypervisor_hostname="fake-mini" OS-EXT-AZ:host_availability_zone="nova" OS-EXT-AZ:availability_zone="None" OS-DCF:diskConfig="AUTO">
<image id="70a599e0-31e7-49b7-b260-868f441e862b">
<atom:link href="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" rel="bookmark"/>
</image>
@@ -15,8 +15,8 @@
<ip version="4" addr="192.168.0.3"/>
</network>
</addresses>
- <atom:link href="http://openstack.example.com/v2/openstack/servers/3e89bf3c-4b6b-41c3-aafd-f0dabf5a1172" rel="self"/>
- <atom:link href="http://openstack.example.com/openstack/servers/3e89bf3c-4b6b-41c3-aafd-f0dabf5a1172" rel="bookmark"/>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/03665c18-c1f1-4eb9-83a8-da00a2c1d552" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/03665c18-c1f1-4eb9-83a8-da00a2c1d552" rel="bookmark"/>
<security_groups>
<security_group name="default"/>
</security_groups>
diff --git a/doc/api_samples/os-evacuate/server-evacuate-req.json b/doc/api_samples/os-evacuate/server-evacuate-req.json
new file mode 100644
index 000000000..72a90e19a
--- /dev/null
+++ b/doc/api_samples/os-evacuate/server-evacuate-req.json
@@ -0,0 +1,7 @@
+{
+ "evacuate": {
+ "host": "TargetHost",
+ "adminPass": "MySecretPass",
+ "onSharedStorage": "True"
+ }
+}
diff --git a/doc/api_samples/os-evacuate/server-evacuate-req.xml b/doc/api_samples/os-evacuate/server-evacuate-req.xml
new file mode 100644
index 000000000..636772dca
--- /dev/null
+++ b/doc/api_samples/os-evacuate/server-evacuate-req.xml
@@ -0,0 +1,5 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<rescue xmlns="http://docs.openstack.org/compute/api/v2"
+ host="TargetHost"
+ adminPass="MySecretPass"
+ onSharedStorage="True"/>
diff --git a/doc/api_samples/os-evacuate/server-evacuate-resp.json b/doc/api_samples/os-evacuate/server-evacuate-resp.json
new file mode 100644
index 000000000..a023c720b
--- /dev/null
+++ b/doc/api_samples/os-evacuate/server-evacuate-resp.json
@@ -0,0 +1,3 @@
+{
+ "adminPass": "MySecretPass"
+}
diff --git a/doc/api_samples/os-evacuate/server-evacuate-resp.xml b/doc/api_samples/os-evacuate/server-evacuate-resp.xml
new file mode 100644
index 000000000..582388670
--- /dev/null
+++ b/doc/api_samples/os-evacuate/server-evacuate-resp.xml
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<adminPass>MySecretPass</adminPass> \ No newline at end of file
diff --git a/doc/api_samples/os-evacuate/server-post-req.json b/doc/api_samples/os-evacuate/server-post-req.json
new file mode 100644
index 000000000..d88eb4122
--- /dev/null
+++ b/doc/api_samples/os-evacuate/server-post-req.json
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-evacuate/server-post-req.xml b/doc/api_samples/os-evacuate/server-post-req.xml
new file mode 100644
index 000000000..0a3c8bb53
--- /dev/null
+++ b/doc/api_samples/os-evacuate/server-post-req.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" flavorRef="http://openstack.example.com/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/os-evacuate/server-post-resp.json b/doc/api_samples/os-evacuate/server-post-resp.json
new file mode 100644
index 000000000..d9114225a
--- /dev/null
+++ b/doc/api_samples/os-evacuate/server-post-resp.json
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "xjDVAYHmc34s",
+ "id": "784f5005-bec9-4c22-8c42-5a7dcba88d82",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/784f5005-bec9-4c22-8c42-5a7dcba88d82",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/784f5005-bec9-4c22-8c42-5a7dcba88d82",
+ "rel": "bookmark"
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-evacuate/server-post-resp.xml b/doc/api_samples/os-evacuate/server-post-resp.xml
new file mode 100644
index 000000000..3a31871ba
--- /dev/null
+++ b/doc/api_samples/os-evacuate/server-post-resp.xml
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="70b4263f-bae1-4ac4-a1c5-e5bb193767bc" adminPass="ttv8YPD3tRPm">
+ <metadata/>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/70b4263f-bae1-4ac4-a1c5-e5bb193767bc" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/70b4263f-bae1-4ac4-a1c5-e5bb193767bc" rel="bookmark"/>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.json b/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.json
new file mode 100644
index 000000000..63fc8738b
--- /dev/null
+++ b/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.json
@@ -0,0 +1,6 @@
+{
+ "extra_specs": {
+ "key1": "value1",
+ "key2": "value2"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.xml b/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.xml
new file mode 100644
index 000000000..95c1daab9
--- /dev/null
+++ b/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.xml
@@ -0,0 +1,5 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<extra_specs>
+ <key1>value1</key1>
+ <key2>value2</key2>
+</extra_specs> \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.json b/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.json
new file mode 100644
index 000000000..63fc8738b
--- /dev/null
+++ b/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.json
@@ -0,0 +1,6 @@
+{
+ "extra_specs": {
+ "key1": "value1",
+ "key2": "value2"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.xml b/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.xml
new file mode 100644
index 000000000..06b01a9fc
--- /dev/null
+++ b/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.xml
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<extra_specs>
+ <key2>value2</key2>
+ <key1>value1</key1>
+</extra_specs> \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.json b/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.json
new file mode 100644
index 000000000..e71755fe6
--- /dev/null
+++ b/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.json
@@ -0,0 +1,3 @@
+{
+ "key1": "value1"
+} \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.xml b/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.xml
new file mode 100644
index 000000000..d57579ba6
--- /dev/null
+++ b/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.xml
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<extra_spec key="key1">value1</extra_spec> \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.json b/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.json
new file mode 100644
index 000000000..63fc8738b
--- /dev/null
+++ b/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.json
@@ -0,0 +1,6 @@
+{
+ "extra_specs": {
+ "key1": "value1",
+ "key2": "value2"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.xml b/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.xml
new file mode 100644
index 000000000..06b01a9fc
--- /dev/null
+++ b/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.xml
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<extra_specs>
+ <key2>value2</key2>
+ <key1>value1</key1>
+</extra_specs> \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.json b/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.json
new file mode 100644
index 000000000..a40d79e32
--- /dev/null
+++ b/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.json
@@ -0,0 +1,3 @@
+{
+ "key1": "new_value1"
+} \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.xml b/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.xml
new file mode 100644
index 000000000..b7ae6732b
--- /dev/null
+++ b/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.xml
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+ <key1>new_value1</key1> \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.json b/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.json
new file mode 100644
index 000000000..a40d79e32
--- /dev/null
+++ b/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.json
@@ -0,0 +1,3 @@
+{
+ "key1": "new_value1"
+} \ No newline at end of file
diff --git a/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.xml b/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.xml
new file mode 100644
index 000000000..13208ad7c
--- /dev/null
+++ b/doc/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.xml
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<extra_spec key="key1">new_value1</extra_spec> \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json
new file mode 100644
index 000000000..934ed21b2
--- /dev/null
+++ b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json
@@ -0,0 +1,7 @@
+{
+ 'dns_entry' :
+ {
+ 'ip': '192.168.53.11',
+ 'dns_type': 'A'
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.xml b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.xml
new file mode 100644
index 000000000..36fa41120
--- /dev/null
+++ b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.xml
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<dns_entry>
+ <ip>192.168.53.11</ip>
+ <dns_type>A</dns_type>
+</dns_entry> \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json
new file mode 100644
index 000000000..70c903886
--- /dev/null
+++ b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json
@@ -0,0 +1,9 @@
+{
+ "dns_entry": {
+ "domain": "domain1.example.org",
+ "id": null,
+ "ip": "192.168.1.1",
+ "name": "instance1",
+ "type": "A"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.xml b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.xml
new file mode 100644
index 000000000..25afe599a
--- /dev/null
+++ b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.xml
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<dns_entry ip="192.168.1.1" domain="domain1.example.org" type="A" id="None" name="instance1"/> \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json
new file mode 100644
index 000000000..05aac422d
--- /dev/null
+++ b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json
@@ -0,0 +1,8 @@
+{
+ 'domain_entry' :
+ {
+ 'domain': 'domain1.example.org',
+ 'scope': 'public',
+ 'project': 'project1'
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.xml b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.xml
new file mode 100644
index 000000000..df12b61cf
--- /dev/null
+++ b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.xml
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<domain_entry>
+ <domain>domain1.example.org</domain>
+ <scope>public</scope>
+ <project>project1</project>
+</domain_entry> \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json
new file mode 100644
index 000000000..db43e92d3
--- /dev/null
+++ b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json
@@ -0,0 +1,8 @@
+{
+ "domain_entry": {
+ "availability_zone": null,
+ "domain": "domain1.example.org",
+ "project": "project1",
+ "scope": "public"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.xml b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.xml
new file mode 100644
index 000000000..8bdf42c41
--- /dev/null
+++ b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.xml
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<domain_entry project="project1" scope="public" domain="domain1.example.org" availability_zone="None"/> \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json
new file mode 100644
index 000000000..84ee3930a
--- /dev/null
+++ b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json
@@ -0,0 +1,9 @@
+{
+ "dns_entry": {
+ "domain": "domain1.example.org",
+ "id": null,
+ "ip": "192.168.1.1",
+ "name": "instance1",
+ "type": null
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.xml b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.xml
new file mode 100644
index 000000000..441b977e6
--- /dev/null
+++ b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.xml
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<dns_entry ip="192.168.1.1" domain="domain1.example.org" type="None" id="None" name="instance1"/> \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json
new file mode 100644
index 000000000..3263de43a
--- /dev/null
+++ b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json
@@ -0,0 +1,11 @@
+{
+ "dns_entries": [
+ {
+ "domain": "domain1.example.org",
+ "id": null,
+ "ip": "192.168.1.1",
+ "name": "instance1",
+ "type": null
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.xml b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.xml
new file mode 100644
index 000000000..8d910e09b
--- /dev/null
+++ b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.xml
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<dns_entries>
+ <dns_entry ip="192.168.1.1" domain="domain1.example.org" type="None" id="None" name="instance1"/>
+</dns_entries> \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json
new file mode 100644
index 000000000..8882c23a3
--- /dev/null
+++ b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json
@@ -0,0 +1,10 @@
+{
+ "domain_entries": [
+ {
+ "availability_zone": null,
+ "domain": "domain1.example.org",
+ "project": "project1",
+ "scope": "public"
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.xml b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.xml
new file mode 100644
index 000000000..d874ef058
--- /dev/null
+++ b/doc/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.xml
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<domain_entries>
+ <domain_entry project="project1" scope="public" domain="domain1.example.org" availability_zone="None"/>
+</domain_entries> \ No newline at end of file
diff --git a/doc/api_samples/os-fping/fping-get-details-resp.json b/doc/api_samples/os-fping/fping-get-details-resp.json
new file mode 100644
index 000000000..a5692832b
--- /dev/null
+++ b/doc/api_samples/os-fping/fping-get-details-resp.json
@@ -0,0 +1,7 @@
+{
+ "server": {
+ "alive": false,
+ "id": "f5e6fd6d-c0a3-4f9e-aabf-d69196b6d11a",
+ "project_id": "openstack"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-fping/fping-get-details-resp.xml b/doc/api_samples/os-fping/fping-get-details-resp.xml
new file mode 100644
index 000000000..5b3cb4785
--- /dev/null
+++ b/doc/api_samples/os-fping/fping-get-details-resp.xml
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server>
+ <project_id>openstack</project_id>
+ <id>69d3caab-ed51-4ee7-9d4b-941ee1b45484</id>
+ <alive>False</alive>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/os-fping/fping-get-resp.json b/doc/api_samples/os-fping/fping-get-resp.json
new file mode 100644
index 000000000..11bf37edd
--- /dev/null
+++ b/doc/api_samples/os-fping/fping-get-resp.json
@@ -0,0 +1,9 @@
+{
+ "servers": [
+ {
+ "alive": false,
+ "id": "1d1aea35-472b-40cf-9337-8eb68480aaa1",
+ "project_id": "openstack"
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/os-fping/fping-get-resp.xml b/doc/api_samples/os-fping/fping-get-resp.xml
new file mode 100644
index 000000000..dbf03778b
--- /dev/null
+++ b/doc/api_samples/os-fping/fping-get-resp.xml
@@ -0,0 +1,8 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers>
+ <server>
+ <project_id>openstack</project_id>
+ <id>6a576ebe-8777-473a-ab95-8df34a50dedd</id>
+ <alive>False</alive>
+ </server>
+</servers> \ No newline at end of file
diff --git a/doc/api_samples/os-fping/server-post-req.json b/doc/api_samples/os-fping/server-post-req.json
new file mode 100644
index 000000000..d88eb4122
--- /dev/null
+++ b/doc/api_samples/os-fping/server-post-req.json
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-fping/server-post-req.xml b/doc/api_samples/os-fping/server-post-req.xml
new file mode 100644
index 000000000..0a3c8bb53
--- /dev/null
+++ b/doc/api_samples/os-fping/server-post-req.xml
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" flavorRef="http://openstack.example.com/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/os-fping/server-post-resp.json b/doc/api_samples/os-fping/server-post-resp.json
new file mode 100644
index 000000000..09d9fb612
--- /dev/null
+++ b/doc/api_samples/os-fping/server-post-resp.json
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "xrDLoBeMD28B",
+ "id": "3f69b6bd-00a8-4636-96ee-650093624304",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/3f69b6bd-00a8-4636-96ee-650093624304",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/3f69b6bd-00a8-4636-96ee-650093624304",
+ "rel": "bookmark"
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-fping/server-post-resp.xml b/doc/api_samples/os-fping/server-post-resp.xml
new file mode 100644
index 000000000..7f84ac03d
--- /dev/null
+++ b/doc/api_samples/os-fping/server-post-resp.xml
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="6ed1d112-6c33-4c8b-9780-e2f978bf5ffd" adminPass="uF9wWxBh3mWL">
+ <metadata/>
+ <atom:link href="http://openstack.example.com/v2/openstack/servers/6ed1d112-6c33-4c8b-9780-e2f978bf5ffd" rel="self"/>
+ <atom:link href="http://openstack.example.com/openstack/servers/6ed1d112-6c33-4c8b-9780-e2f978bf5ffd" rel="bookmark"/>
+</server> \ No newline at end of file
diff --git a/doc/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.json b/doc/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.json
new file mode 100644
index 000000000..1d308d4ae
--- /dev/null
+++ b/doc/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.json
@@ -0,0 +1,17 @@
+{
+ "instance_usage_audit_logs": {
+ "hosts_not_run": [
+ "f4eb7cfd155f4574967f8b55a7faed75"
+ ],
+ "log": {},
+ "num_hosts": 1,
+ "num_hosts_done": 0,
+ "num_hosts_not_run": 1,
+ "num_hosts_running": 0,
+ "overall_status": "0 of 1 hosts done. 0 errors.",
+ "period_beginning": "2012-12-01 00:00:00",
+ "period_ending": "2013-01-01 00:00:00",
+ "total_errors": 0,
+ "total_instances": 0
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.xml b/doc/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.xml
new file mode 100644
index 000000000..82d157fb9
--- /dev/null
+++ b/doc/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.xml
@@ -0,0 +1,16 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<instance_usage_audit_logs>
+ <total_errors>0</total_errors>
+ <total_instances>0</total_instances>
+ <log/>
+ <num_hosts_running>0</num_hosts_running>
+ <num_hosts_done>0</num_hosts_done>
+ <num_hosts_not_run>1</num_hosts_not_run>
+ <hosts_not_run>
+ <item>107debd115684f098d4c73ffac7ec515</item>
+ </hosts_not_run>
+ <overall_status>0 of 1 hosts done. 0 errors.</overall_status>
+ <period_ending>2013-01-01 00:00:00</period_ending>
+ <period_beginning>2012-12-01 00:00:00</period_beginning>
+ <num_hosts>1</num_hosts>
+</instance_usage_audit_logs> \ No newline at end of file
diff --git a/doc/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.json b/doc/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.json
new file mode 100644
index 000000000..2b5fe54c1
--- /dev/null
+++ b/doc/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.json
@@ -0,0 +1,17 @@
+{
+ "instance_usage_audit_log": {
+ "hosts_not_run": [
+ "8e33da2b48684ef3ab165444d6a7384c"
+ ],
+ "log": {},
+ "num_hosts": 1,
+ "num_hosts_done": 0,
+ "num_hosts_not_run": 1,
+ "num_hosts_running": 0,
+ "overall_status": "0 of 1 hosts done. 0 errors.",
+ "period_beginning": "2012-06-01 00:00:00",
+ "period_ending": "2012-07-01 00:00:00",
+ "total_errors": 0,
+ "total_instances": 0
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.xml b/doc/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.xml
new file mode 100644
index 000000000..453689737
--- /dev/null
+++ b/doc/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.xml
@@ -0,0 +1,16 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<instance_usage_audit_log>
+ <total_errors>0</total_errors>
+ <total_instances>0</total_instances>
+ <log/>
+ <num_hosts_running>0</num_hosts_running>
+ <num_hosts_done>0</num_hosts_done>
+ <num_hosts_not_run>1</num_hosts_not_run>
+ <hosts_not_run>
+ <item>4b54478b73734afcbf0e2676a3303d1a</item>
+ </hosts_not_run>
+ <overall_status>0 of 1 hosts done. 0 errors.</overall_status>
+ <period_ending>2012-07-01 00:00:00</period_ending>
+ <period_beginning>2012-06-01 00:00:00</period_beginning>
+ <num_hosts>1</num_hosts>
+</instance_usage_audit_log> \ No newline at end of file
diff --git a/doc/api_samples/os-networks/network-add-req.json b/doc/api_samples/os-networks/network-add-req.json
new file mode 100644
index 000000000..eca9c65b8
--- /dev/null
+++ b/doc/api_samples/os-networks/network-add-req.json
@@ -0,0 +1 @@
+{"id": "1"} \ No newline at end of file
diff --git a/doc/api_samples/os-networks/network-add-req.xml b/doc/api_samples/os-networks/network-add-req.xml
new file mode 100644
index 000000000..3ba873da3
--- /dev/null
+++ b/doc/api_samples/os-networks/network-add-req.xml
@@ -0,0 +1 @@
+<id>1</id> \ No newline at end of file
diff --git a/doc/api_samples/os-networks/network-create-req.json b/doc/api_samples/os-networks/network-create-req.json
new file mode 100644
index 000000000..422e77234
--- /dev/null
+++ b/doc/api_samples/os-networks/network-create-req.json
@@ -0,0 +1,6 @@
+{
+ "network": {
+ "label": "new net 111",
+ "cidr": "10.20.105.0/24"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-networks/network-create-req.xml b/doc/api_samples/os-networks/network-create-req.xml
new file mode 100644
index 000000000..890d7bf3a
--- /dev/null
+++ b/doc/api_samples/os-networks/network-create-req.xml
@@ -0,0 +1,4 @@
+<network>
+ <label>new net 111</label>
+ <cidr>10.20.105.0/24</cidr>
+</network> \ No newline at end of file
diff --git a/doc/api_samples/os-networks/network-create-resp.json b/doc/api_samples/os-networks/network-create-resp.json
new file mode 100644
index 000000000..194694a77
--- /dev/null
+++ b/doc/api_samples/os-networks/network-create-resp.json
@@ -0,0 +1,32 @@
+{
+ "network": {
+ "bridge": null,
+ "bridge_interface": null,
+ "broadcast": "10.20.105.255",
+ "cidr": "10.20.105.0/24",
+ "cidr_v6": null,
+ "created_at": null,
+ "deleted": null,
+ "deleted_at": null,
+ "dhcp_start": "10.20.105.2",
+ "dns1": null,
+ "dns2": null,
+ "gateway": "10.20.105.1",
+ "gateway_v6": null,
+ "host": null,
+ "id": "668687f9-d724-4976-a6f4-a6fd3ad83da3",
+ "injected": null,
+ "label": "new net 111",
+ "multi_host": null,
+ "netmask": "255.255.255.0",
+ "netmask_v6": null,
+ "priority": null,
+ "project_id": null,
+ "rxtx_base": null,
+ "updated_at": null,
+ "vlan": null,
+ "vpn_private_address": null,
+ "vpn_public_address": null,
+ "vpn_public_port": null
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-networks/network-create-resp.xml b/doc/api_samples/os-networks/network-create-resp.xml
new file mode 100644
index 000000000..f638228ef
--- /dev/null
+++ b/doc/api_samples/os-networks/network-create-resp.xml
@@ -0,0 +1,31 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<network>
+ <bridge>None</bridge>
+ <vpn_public_port>None</vpn_public_port>
+ <dhcp_start>10.20.105.2</dhcp_start>
+ <bridge_interface>None</bridge_interface>
+ <updated_at>None</updated_at>
+ <id>1bbbed2b-0daa-47a1-b869-1981c29150b1</id>
+ <cidr_v6>None</cidr_v6>
+ <deleted_at>None</deleted_at>
+ <gateway>10.20.105.1</gateway>
+ <rxtx_base>None</rxtx_base>
+ <label>new net 111</label>
+ <priority>None</priority>
+ <project_id>None</project_id>
+ <vpn_private_address>None</vpn_private_address>
+ <deleted>False</deleted>
+ <vlan>None</vlan>
+ <broadcast>10.20.105.255</broadcast>
+ <netmask>255.255.255.0</netmask>
+ <injected>None</injected>
+ <cidr>10.20.105.0/24</cidr>
+ <vpn_public_address>None</vpn_public_address>
+ <multi_host>None</multi_host>
+ <dns2>None</dns2>
+ <created_at>None</created_at>
+ <host>None</host>
+ <gateway_v6>None</gateway_v6>
+ <netmask_v6>None</netmask_v6>
+ <dns1>None</dns1>
+</network> \ No newline at end of file
diff --git a/doc/api_samples/os-networks/network-show-resp.json b/doc/api_samples/os-networks/network-show-resp.json
new file mode 100644
index 000000000..f3d7a18ce
--- /dev/null
+++ b/doc/api_samples/os-networks/network-show-resp.json
@@ -0,0 +1,32 @@
+{
+ "network": {
+ "bridge": "br100",
+ "bridge_interface": "eth0",
+ "broadcast": "10.0.0.7",
+ "cidr": "10.0.0.0/29",
+ "cidr_v6": null,
+ "created_at": "2011-08-15 06:19:19.387525",
+ "deleted": false,
+ "deleted_at": null,
+ "dhcp_start": "10.0.0.3",
+ "dns1": null,
+ "dns2": null,
+ "gateway": "10.0.0.1",
+ "gateway_v6": null,
+ "host": "nsokolov-desktop",
+ "id": "20c8acc0-f747-4d71-a389-46d078ebf047",
+ "injected": false,
+ "label": "mynet_0",
+ "multi_host": false,
+ "netmask": "255.255.255.248",
+ "netmask_v6": null,
+ "priority": null,
+ "project_id": "1234",
+ "rxtx_base": null,
+ "updated_at": "2011-08-16 09:26:13.048257",
+ "vlan": 100,
+ "vpn_private_address": "10.0.0.2",
+ "vpn_public_address": "127.0.0.1",
+ "vpn_public_port": 1000
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-networks/network-show-resp.xml b/doc/api_samples/os-networks/network-show-resp.xml
new file mode 100644
index 000000000..2faed7a28
--- /dev/null
+++ b/doc/api_samples/os-networks/network-show-resp.xml
@@ -0,0 +1,31 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<network>
+ <bridge>br100</bridge>
+ <vpn_public_port>1000</vpn_public_port>
+ <dhcp_start>10.0.0.3</dhcp_start>
+ <bridge_interface>eth0</bridge_interface>
+ <updated_at>2011-08-16 09:26:13.048257</updated_at>
+ <id>20c8acc0-f747-4d71-a389-46d078ebf047</id>
+ <cidr_v6>None</cidr_v6>
+ <deleted_at>None</deleted_at>
+ <gateway>10.0.0.1</gateway>
+ <rxtx_base>None</rxtx_base>
+ <label>mynet_0</label>
+ <priority>None</priority>
+ <project_id>1234</project_id>
+ <vpn_private_address>10.0.0.2</vpn_private_address>
+ <deleted>False</deleted>
+ <vlan>100</vlan>
+ <broadcast>10.0.0.7</broadcast>
+ <netmask>255.255.255.248</netmask>
+ <injected>False</injected>
+ <cidr>10.0.0.0/29</cidr>
+ <vpn_public_address>127.0.0.1</vpn_public_address>
+ <multi_host>False</multi_host>
+ <dns2>None</dns2>
+ <created_at>2011-08-15 06:19:19.387525</created_at>
+ <host>nsokolov-desktop</host>
+ <gateway_v6>None</gateway_v6>
+ <netmask_v6>None</netmask_v6>
+ <dns1>None</dns1>
+</network> \ No newline at end of file
diff --git a/doc/api_samples/os-networks/networks-disassociate-req.json b/doc/api_samples/os-networks/networks-disassociate-req.json
new file mode 100644
index 000000000..81e17d809
--- /dev/null
+++ b/doc/api_samples/os-networks/networks-disassociate-req.json
@@ -0,0 +1 @@
+{"disassociate": null} \ No newline at end of file
diff --git a/doc/api_samples/os-networks/networks-disassociate-req.xml b/doc/api_samples/os-networks/networks-disassociate-req.xml
new file mode 100644
index 000000000..85762fd0d
--- /dev/null
+++ b/doc/api_samples/os-networks/networks-disassociate-req.xml
@@ -0,0 +1 @@
+<disassociate>None</disassociate> \ No newline at end of file
diff --git a/doc/api_samples/os-networks/networks-list-resp.json b/doc/api_samples/os-networks/networks-list-resp.json
new file mode 100644
index 000000000..90da7c15d
--- /dev/null
+++ b/doc/api_samples/os-networks/networks-list-resp.json
@@ -0,0 +1,64 @@
+{
+ "networks": [
+ {
+ "bridge": "br100",
+ "bridge_interface": "eth0",
+ "broadcast": "10.0.0.7",
+ "cidr": "10.0.0.0/29",
+ "cidr_v6": null,
+ "created_at": "2011-08-15 06:19:19.387525",
+ "deleted": false,
+ "deleted_at": null,
+ "dhcp_start": "10.0.0.3",
+ "dns1": null,
+ "dns2": null,
+ "gateway": "10.0.0.1",
+ "gateway_v6": null,
+ "host": "nsokolov-desktop",
+ "id": "20c8acc0-f747-4d71-a389-46d078ebf047",
+ "injected": false,
+ "label": "mynet_0",
+ "multi_host": false,
+ "netmask": "255.255.255.248",
+ "netmask_v6": null,
+ "priority": null,
+ "project_id": "1234",
+ "rxtx_base": null,
+ "updated_at": "2011-08-16 09:26:13.048257",
+ "vlan": 100,
+ "vpn_private_address": "10.0.0.2",
+ "vpn_public_address": "127.0.0.1",
+ "vpn_public_port": 1000
+ },
+ {
+ "bridge": "br101",
+ "bridge_interface": "eth0",
+ "broadcast": "10.0.0.15",
+ "cidr": "10.0.0.10/29",
+ "cidr_v6": null,
+ "created_at": "2011-08-15 06:19:19.885495",
+ "deleted": false,
+ "deleted_at": null,
+ "dhcp_start": "10.0.0.11",
+ "dns1": null,
+ "dns2": null,
+ "gateway": "10.0.0.9",
+ "gateway_v6": null,
+ "host": null,
+ "id": "20c8acc0-f747-4d71-a389-46d078ebf000",
+ "injected": false,
+ "label": "mynet_1",
+ "multi_host": false,
+ "netmask": "255.255.255.248",
+ "netmask_v6": null,
+ "priority": null,
+ "project_id": null,
+ "rxtx_base": null,
+ "updated_at": null,
+ "vlan": 101,
+ "vpn_private_address": "10.0.0.10",
+ "vpn_public_address": null,
+ "vpn_public_port": 1001
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/os-networks/networks-list-resp.xml b/doc/api_samples/os-networks/networks-list-resp.xml
new file mode 100644
index 000000000..59620c228
--- /dev/null
+++ b/doc/api_samples/os-networks/networks-list-resp.xml
@@ -0,0 +1,63 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<networks>
+ <network>
+ <bridge>br100</bridge>
+ <vpn_public_port>1000</vpn_public_port>
+ <dhcp_start>10.0.0.3</dhcp_start>
+ <bridge_interface>eth0</bridge_interface>
+ <updated_at>2011-08-16 09:26:13.048257</updated_at>
+ <id>20c8acc0-f747-4d71-a389-46d078ebf047</id>
+ <cidr_v6>None</cidr_v6>
+ <deleted_at>None</deleted_at>
+ <gateway>10.0.0.1</gateway>
+ <rxtx_base>None</rxtx_base>
+ <label>mynet_0</label>
+ <priority>None</priority>
+ <project_id>1234</project_id>
+ <vpn_private_address>10.0.0.2</vpn_private_address>
+ <deleted>False</deleted>
+ <vlan>100</vlan>
+ <broadcast>10.0.0.7</broadcast>
+ <netmask>255.255.255.248</netmask>
+ <injected>False</injected>
+ <cidr>10.0.0.0/29</cidr>
+ <vpn_public_address>127.0.0.1</vpn_public_address>
+ <multi_host>False</multi_host>
+ <dns2>None</dns2>
+ <created_at>2011-08-15 06:19:19.387525</created_at>
+ <host>nsokolov-desktop</host>
+ <gateway_v6>None</gateway_v6>
+ <netmask_v6>None</netmask_v6>
+ <dns1>None</dns1>
+ </network>
+ <network>
+ <bridge>br101</bridge>
+ <vpn_public_port>1001</vpn_public_port>
+ <dhcp_start>10.0.0.11</dhcp_start>
+ <bridge_interface>eth0</bridge_interface>
+ <updated_at>None</updated_at>
+ <id>20c8acc0-f747-4d71-a389-46d078ebf000</id>
+ <cidr_v6>None</cidr_v6>
+ <deleted_at>None</deleted_at>
+ <gateway>10.0.0.9</gateway>
+ <rxtx_base>None</rxtx_base>
+ <label>mynet_1</label>
+ <priority>None</priority>
+ <project_id>None</project_id>
+ <vpn_private_address>10.0.0.10</vpn_private_address>
+ <deleted>False</deleted>
+ <vlan>101</vlan>
+ <broadcast>10.0.0.15</broadcast>
+ <netmask>255.255.255.248</netmask>
+ <injected>False</injected>
+ <cidr>10.0.0.10/29</cidr>
+ <vpn_public_address>None</vpn_public_address>
+ <multi_host>False</multi_host>
+ <dns2>None</dns2>
+ <created_at>2011-08-15 06:19:19.885495</created_at>
+ <host>None</host>
+ <gateway_v6>None</gateway_v6>
+ <netmask_v6>None</netmask_v6>
+ <dns1>None</dns1>
+ </network>
+</networks> \ No newline at end of file
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 0bdaeb08e..9af5f5494 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -64,12 +64,11 @@ copyright = u'2010-present, OpenStack, LLC'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
-from nova import version as nova_version
-#import nova.version
+from nova.version import version_info
# The full version, including alpha/beta/rc tags.
-release = nova_version.version_string()
+release = version_info.release_string()
# The short X.Y version.
-version = nova_version.canonical_version_string()
+version = version_info.version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
diff --git a/doc/source/devref/filter_scheduler.rst b/doc/source/devref/filter_scheduler.rst
index 63ed95c82..31dcfde77 100644
--- a/doc/source/devref/filter_scheduler.rst
+++ b/doc/source/devref/filter_scheduler.rst
@@ -91,6 +91,8 @@ There are some standard filter classes to use (:mod:`nova.scheduler.filters`):
* |TypeAffinityFilter| - Only passes hosts that are not already running an
instance of the requested type.
* |AggregateTypeAffinityFilter| - limits instance_type by aggregate.
+* |GroupAntiAffinityFilter| - ensures that each instance in group is on a
+ different host.
Now we can focus on these standard filter classes in details. I will pass the
simplest ones, such as |AllHostsFilter|, |CoreFilter| and |RamFilter| are,
@@ -163,6 +165,10 @@ of the set of instances uses.
the network address of the current host is in the same sub network as it was
defined in the request.
+|GroupAntiAffinityFilter| its method `host_passes` returns `True` if host to
+place the instance on is not in a group of hosts. The group of hosts is
+maintained by a group name. The scheduler hint contains the group name.
+
|JsonFilter| - this filter provides the opportunity to write complicated
queries for the hosts capabilities filtering, based on simple JSON-like syntax.
There can be used the following operations for the host states properties:
@@ -336,6 +342,7 @@ in :mod:`nova.tests.scheduler`.
.. |JsonFilter| replace:: :class:`JsonFilter <nova.scheduler.filters.json_filter.JsonFilter>`
.. |RamFilter| replace:: :class:`RamFilter <nova.scheduler.filters.ram_filter.RamFilter>`
.. |SimpleCIDRAffinityFilter| replace:: :class:`SimpleCIDRAffinityFilter <nova.scheduler.filters.affinity_filter.SimpleCIDRAffinityFilter>`
+.. |GroupAntiAffinityFilter| replace:: :class:`GroupAntiAffinityFilter <nova.scheduler.filters.affinity_filter.GroupAntiAffinityFilter>`
.. |DifferentHostFilter| replace:: :class:`DifferentHostFilter <nova.scheduler.filters.affinity_filter.DifferentHostFilter>`
.. |SameHostFilter| replace:: :class:`SameHostFilter <nova.scheduler.filters.affinity_filter.SameHostFilter>`
.. |RetryFilter| replace:: :class:`RetryFilter <nova.scheduler.filters.retry_filter.RetryFilter>`
diff --git a/etc/nova/nova.conf.sample b/etc/nova/nova.conf.sample
index a5f945618..61350b183 100644
--- a/etc/nova/nova.conf.sample
+++ b/etc/nova/nova.conf.sample
@@ -201,6 +201,9 @@
# a list of APIs to enable by default (list value)
#enabled_apis=ec2,osapi_compute,metadata
+# a list of APIs with enabled SSL (list value)
+#enabled_ssl_apis=
+
# IP address for EC2 API to listen (string value)
#ec2_listen=0.0.0.0
@@ -268,7 +271,7 @@
#monkey_patch=false
# List of modules/decorators to monkey patch (list value)
-#monkey_patch_modules=nova.api.ec2.cloud:nova.notifier.api.notify_decorator,nova.compute.api:nova.notifier.api.notify_decorator
+#monkey_patch_modules=nova.api.ec2.cloud:nova.openstack.common.notifier.api.notify_decorator,nova.compute.api:nova.openstack.common.notifier.api.notify_decorator
# Length of generated instance admin passwords (integer value)
#password_length=12
@@ -284,6 +287,10 @@
# commands as root (string value)
#rootwrap_config=/etc/nova/rootwrap.conf
+# Explicitly specify the temporary working directory (string
+# value)
+#tempdir=<None>
+
#
# Options defined in nova.wsgi
@@ -299,6 +306,20 @@
# body_length, wall_seconds. (string value)
#wsgi_log_format=%(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f
+# CA certificate file to use to verify connecting clients
+# (string value)
+#ssl_ca_file=<None>
+
+# SSL certificate of API server (string value)
+#ssl_cert_file=<None>
+
+# SSL private key of API server (string value)
+#ssl_key_file=<None>
+
+# Sets the value of TCP_KEEPIDLE in seconds for each server
+# socket. Not supported on OS X. (integer value)
+#tcp_keepidle=600
+
#
# Options defined in nova.api.auth
@@ -795,57 +816,6 @@
#
-# Options defined in nova.db.sqlalchemy.session
-#
-
-# The SQLAlchemy connection string used to connect to the
-# database (string value)
-#sql_connection=sqlite:///$state_path/$sqlite_db
-
-# the filename to use with sqlite (string value)
-#sqlite_db=nova.sqlite
-
-# timeout before idle sql connections are reaped (integer
-# value)
-#sql_idle_timeout=3600
-
-# If passed, use synchronous mode for sqlite (boolean value)
-#sqlite_synchronous=true
-
-# Minimum number of SQL connections to keep open in a pool
-# (integer value)
-#sql_min_pool_size=1
-
-# Maximum number of SQL connections to keep open in a pool
-# (integer value)
-#sql_max_pool_size=5
-
-# maximum db connection retries during startup. (setting -1
-# implies an infinite retry count) (integer value)
-#sql_max_retries=10
-
-# interval between retries of opening a sql connection
-# (integer value)
-#sql_retry_interval=10
-
-# If set, use this value for max_overflow with sqlalchemy
-# (integer value)
-#sql_max_overflow=<None>
-
-# Verbosity of SQL debugging information. 0=None,
-# 100=Everything (integer value)
-#sql_connection_debug=0
-
-# Add python stack traces to SQL as comment strings (boolean
-# value)
-#sql_connection_trace=false
-
-# enable the use of eventlet's db_pool for MySQL (boolean
-# value)
-#sql_dbpool_enable=false
-
-
-#
# Options defined in nova.image.glance
#
@@ -872,6 +842,11 @@
# (integer value)
#glance_num_retries=0
+# A list of url scheme that can be downloaded directly via the
+# direct_url. Currently supported schemes: [file]. (list
+# value)
+#allowed_direct_url_schemes=
+
#
# Options defined in nova.image.s3
@@ -928,6 +903,29 @@
#
+# Options defined in nova.network.floating_ips
+#
+
+# Default pool for floating ips (string value)
+#default_floating_pool=nova
+
+# Autoassigning floating ip to VM (boolean value)
+#auto_assign_floating_ip=false
+
+# full class name for the DNS Manager for floating IPs (string
+# value)
+#floating_ip_dns_manager=nova.network.noop_dns_driver.NoopDNSDriver
+
+# full class name for the DNS Manager for instance IPs (string
+# value)
+#instance_dns_manager=nova.network.noop_dns_driver.NoopDNSDriver
+
+# full class name for the DNS Zone for instance IPs (string
+# value)
+#instance_dns_domain=
+
+
+#
# Options defined in nova.network.ldapdns
#
@@ -1020,6 +1018,11 @@
# default gateway from dhcp server (boolean value)
#use_single_default_gateway=false
+# An interface that bridges can forward to. If this is set to
+# all then all traffic will be forwarded. Can be specified
+# multiple times. (multi valued)
+#forward_bridge_interface=all
+
# the ip for the metadata api server (string value)
#metadata_host=$my_ip
@@ -1060,18 +1063,9 @@
# First Vpn port for private networks (integer value)
#vpn_start=1000
-# Default value for multi_host in networks (boolean value)
-#multi_host=false
-
# Number of addresses in each private subnet (integer value)
#network_size=256
-# Floating IP address block (string value)
-#floating_range=4.4.4.0/24
-
-# Default pool for floating ips (string value)
-#default_floating_pool=nova
-
# Fixed IP address block (string value)
#fixed_range=10.0.0.0/8
@@ -1095,9 +1089,6 @@
# value)
#create_unique_mac_address_attempts=5
-# Autoassigning floating ip to VM (boolean value)
-#auto_assign_floating_ip=false
-
# If passed, use fake network devices and addresses (boolean
# value)
#fake_network=false
@@ -1134,18 +1125,6 @@
# Indicates underlying L3 management library (string value)
#l3_lib=nova.network.l3.LinuxNetL3
-# full class name for the DNS Manager for instance IPs (string
-# value)
-#instance_dns_manager=nova.network.noop_dns_driver.NoopDNSDriver
-
-# full class name for the DNS Zone for instance IPs (string
-# value)
-#instance_dns_domain=
-
-# full class name for the DNS Manager for floating IPs (string
-# value)
-#floating_ip_dns_manager=nova.network.noop_dns_driver.NoopDNSDriver
-
#
# Options defined in nova.network.quantumv2.api
@@ -1170,6 +1149,10 @@
# (string value)
#quantum_admin_tenant_name=<None>
+# region name for connecting to quantum in admin context
+# (string value)
+#quantum_region_name=<None>
+
# auth url for connecting to quantum in admin context (string
# value)
#quantum_admin_auth_url=http://localhost:5000/v2.0
@@ -1190,6 +1173,11 @@
# the topic network nodes listen on (string value)
#network_topic=network
+# Default value for multi_host in networks. Also, if set, some
+# rpc network calls will be sent directly to host. (boolean
+# value)
+#multi_host=false
+
#
# Options defined in nova.objectstore.s3server
@@ -1206,6 +1194,57 @@
#
+# Options defined in nova.openstack.common.db.sqlalchemy.session
+#
+
+# The SQLAlchemy connection string used to connect to the
+# database (string value)
+#sql_connection=sqlite:////home/markmc/git/openstack/nova/nova/openstack/common/db/$sqlite_db
+
+# the filename to use with sqlite (string value)
+#sqlite_db=nova.sqlite
+
+# timeout before idle sql connections are reaped (integer
+# value)
+#sql_idle_timeout=3600
+
+# If passed, use synchronous mode for sqlite (boolean value)
+#sqlite_synchronous=true
+
+# Minimum number of SQL connections to keep open in a pool
+# (integer value)
+#sql_min_pool_size=1
+
+# Maximum number of SQL connections to keep open in a pool
+# (integer value)
+#sql_max_pool_size=5
+
+# maximum db connection retries during startup. (setting -1
+# implies an infinite retry count) (integer value)
+#sql_max_retries=10
+
+# interval between retries of opening a sql connection
+# (integer value)
+#sql_retry_interval=10
+
+# If set, use this value for max_overflow with sqlalchemy
+# (integer value)
+#sql_max_overflow=<None>
+
+# Verbosity of SQL debugging information. 0=None,
+# 100=Everything (integer value)
+#sql_connection_debug=0
+
+# Add python stack traces to SQL as comment strings (boolean
+# value)
+#sql_connection_trace=false
+
+# enable the use of eventlet's db_pool for MySQL (boolean
+# value)
+#sql_dbpool_enable=false
+
+
+#
# Options defined in nova.openstack.common.eventlet_backdoor
#
@@ -1655,14 +1694,6 @@
#
-# Options defined in nova.virt.baremetal.db.api
-#
-
-# The backend to use for db (string value)
-#baremetal_db_backend=sqlalchemy
-
-
-#
# Options defined in nova.virt.configdrive
#
@@ -1774,7 +1805,7 @@
# value)
#hyperv_attaching_volume_retry_count=10
-# The seconds to wait between a volume attachment attempt
+# The seconds to wait between an volume attachment attempt
# (integer value)
#hyperv_wait_between_attach_retry=5
@@ -1846,10 +1877,10 @@
#snapshot_image_format=<None>
# The libvirt VIF driver to configure the VIFs. (string value)
-#libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtBridgeDriver
+#libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtGenericVIFDriver
# Libvirt handlers for remote volumes. (list value)
-#libvirt_volume_drivers=iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver,local=nova.virt.libvirt.volume.LibvirtVolumeDriver,fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver,rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,nfs=nova.virt.libvirt.volume_nfs.NfsVolumeDriver
+#libvirt_volume_drivers=iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver,local=nova.virt.libvirt.volume.LibvirtVolumeDriver,fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver,rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver,nfs=nova.virt.libvirt.volume.LibvirtNFSVolumeDriver
# Override the default disk prefix for the devices attached to
# a server, which is dependent on libvirt_type. (valid options
@@ -1938,6 +1969,15 @@
#
+# Options defined in nova.virt.libvirt.utils
+#
+
+# Compress snapshot images when possible. This currently
+# applies exclusively to qcow2 images (boolean value)
+#libvirt_snapshot_compression=false
+
+
+#
# Options defined in nova.virt.libvirt.vif
#
@@ -1966,15 +2006,14 @@
# (string value)
#rbd_secret_uuid=<None>
-
-#
-# Options defined in nova.virt.libvirt.volume_nfs
-#
-
-# Base dir where nfs expected to be mounted on compute (string
-# value)
+# Dir where the nfs volume is mounted on the compute node
+# (string value)
#nfs_mount_point_base=$state_path/mnt
+# Mount options passed to the nfs client. See section of the
+# nfs man page for details (string value)
+#nfs_mount_options=<None>
+
#
# Options defined in nova.virt.powervm.driver
@@ -2003,28 +2042,48 @@
# Options defined in nova.virt.vmwareapi.driver
#
-# URL for connection to VMware ESX host.Required if
-# compute_driver is vmwareapi.VMwareESXDriver. (string value)
+# URL for connection to VMware ESX/VC host. Required if
+# compute_driver is vmwareapi.VMwareESXDriver or
+# vmwareapi.VMwareVCDriver. (string value)
#vmwareapi_host_ip=<None>
-# Username for connection to VMware ESX host. Used only if
-# compute_driver is vmwareapi.VMwareESXDriver. (string value)
+# Username for connection to VMware ESX/VC host. Used only if
+# compute_driver is vmwareapi.VMwareESXDriver or
+# vmwareapi.VMwareVCDriver. (string value)
#vmwareapi_host_username=<None>
-# Password for connection to VMware ESX host. Used only if
-# compute_driver is vmwareapi.VMwareESXDriver. (string value)
+# Password for connection to VMware ESX/VC host. Used only if
+# compute_driver is vmwareapi.VMwareESXDriver or
+# vmwareapi.VMwareVCDriver. (string value)
#vmwareapi_host_password=<None>
+# Name of a VMware Cluster ComputeResource. Used only if
+# compute_driver is vmwareapi.VMwareVCDriver. (string value)
+#vmwareapi_cluster_name=<None>
+
# The interval used for polling of remote tasks. Used only if
-# compute_driver is vmwareapi.VMwareESXDriver. (floating point
-# value)
+# compute_driver is vmwareapi.VMwareESXDriver or
+# vmwareapi.VMwareVCDriver. (floating point value)
#vmwareapi_task_poll_interval=5.0
# The number of times we retry on failures, e.g., socket
# error, etc. Used only if compute_driver is
-# vmwareapi.VMwareESXDriver. (integer value)
+# vmwareapi.VMwareESXDriver or vmwareapi.VMwareVCDriver.
+# (integer value)
#vmwareapi_api_retry_count=10
+# VNC starting port (integer value)
+#vnc_port=5900
+
+# Total number of VNC ports (integer value)
+#vnc_port_total=10000
+
+# VNC password (string value)
+#vnc_password=<None>
+
+# Whether to use linked clone (boolean value)
+#use_linked_clone=true
+
#
# Options defined in nova.virt.vmwareapi.vif
@@ -2234,6 +2293,10 @@
# value)
#xenapi_vif_driver=nova.virt.xenapi.vif.XenAPIBridgeDriver
+# Object Store Driver used to handle image uploads. (string
+# value)
+#xenapi_image_upload_handler=nova.virt.xenapi.imageupload.glance.GlanceStore
+
#
# Options defined in nova.vnc
@@ -2413,6 +2476,14 @@
[baremetal]
#
+# Options defined in nova.virt.baremetal.db.api
+#
+
+# The backend to use for bare-metal database (string value)
+#db_backend=sqlalchemy
+
+
+#
# Options defined in nova.virt.baremetal.db.sqlalchemy.session
#
@@ -2542,6 +2613,19 @@
# attestation authorization blob - must change (string value)
#attestation_auth_blob=<None>
+# Attestation status cache valid period length (integer value)
+#attestation_auth_timeout=60
+
+
+[vmware]
+
+#
+# Options defined in nova.virt.vmwareapi.vmops
+#
+
+# Name of Integration Bridge (string value)
+#integration_bridge=br-int
+
[spice]
@@ -2571,4 +2655,4 @@
#keymap=en-us
-# Total option count: 525
+# Total option count: 543
diff --git a/etc/nova/policy.json b/etc/nova/policy.json
index 97ae89a38..923c4a528 100644
--- a/etc/nova/policy.json
+++ b/etc/nova/policy.json
@@ -40,8 +40,10 @@
"compute_extension:createserverext": "",
"compute_extension:deferred_delete": "",
"compute_extension:disk_config": "",
+ "compute_extension:evacuate": "rule:admin_api",
"compute_extension:extended_server_attributes": "rule:admin_api",
"compute_extension:extended_status": "",
+ "compute_extension:extended_availability_zone": "",
"compute_extension:fixed_ips": "rule:admin_api",
"compute_extension:flavor_access": "",
"compute_extension:flavor_disabled": "",
@@ -83,6 +85,10 @@
"compute_extension:virtual_interfaces": "",
"compute_extension:virtual_storage_arrays": "",
"compute_extension:volumes": "",
+ "compute_extension:volume_attachments:index": "",
+ "compute_extension:volume_attachments:show": "",
+ "compute_extension:volume_attachments:create": "",
+ "compute_extension:volume_attachments:delete": "",
"compute_extension:volumetypes": "",
"compute_extension:availability_zone:list": "",
"compute_extension:availability_zone:detail": "rule:admin_api",
diff --git a/etc/nova/rootwrap.d/compute.filters b/etc/nova/rootwrap.d/compute.filters
index e1113a9e7..9562a23aa 100644
--- a/etc/nova/rootwrap.d/compute.filters
+++ b/etc/nova/rootwrap.d/compute.filters
@@ -174,3 +174,9 @@ vgs: CommandFilter, /sbin/vgs, root
# nova/virt/baremetal/volume_driver.py: 'tgtadm', '--lld', 'iscsi', ...
tgtadm: CommandFilter, /usr/sbin/tgtadm, root
+
+# nova/utils.py:read_file_as_root: 'cat', file_path
+# (called from nova/virt/disk/vfs/localfs.py:VFSLocalFS.read_file)
+read_passwd: RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/passwd
+read_shadow: RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/shadow
+
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 48b0f632f..b66b15852 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -280,7 +280,7 @@ class CloudController(object):
host_services = {}
for service in enabled_services:
zone_hosts.setdefault(service['availability_zone'], [])
- if not service['host'] in zone_hosts[service['availability_zone']]:
+ if service['host'] not in zone_hosts[service['availability_zone']]:
zone_hosts[service['availability_zone']].append(
service['host'])
@@ -407,7 +407,7 @@ class CloudController(object):
def describe_key_pairs(self, context, key_name=None, **kwargs):
key_pairs = self.keypair_api.get_key_pairs(context, context.user_id)
- if not key_name is None:
+ if key_name is not None:
key_pairs = [x for x in key_pairs if x['name'] in key_name]
#If looking for non existent key pair
@@ -527,7 +527,7 @@ class CloudController(object):
def _rule_args_to_dict(self, context, kwargs):
rules = []
- if not 'groups' in kwargs and not 'ip_ranges' in kwargs:
+ if 'groups' not in kwargs and 'ip_ranges' not in kwargs:
rule = self._rule_dict_last_step(context, **kwargs)
if rule:
rules.append(rule)
@@ -991,18 +991,22 @@ class CloudController(object):
def describe_instances(self, context, **kwargs):
# Optional DescribeInstances argument
instance_id = kwargs.get('instance_id', None)
+ filters = kwargs.get('filter', None)
instances = self._enforce_valid_instance_ids(context, instance_id)
return self._format_describe_instances(context,
instance_id=instance_id,
- instance_cache=instances)
+ instance_cache=instances,
+ filter=filters)
def describe_instances_v6(self, context, **kwargs):
# Optional DescribeInstancesV6 argument
instance_id = kwargs.get('instance_id', None)
+ filters = kwargs.get('filter', None)
instances = self._enforce_valid_instance_ids(context, instance_id)
return self._format_describe_instances(context,
instance_id=instance_id,
instance_cache=instances,
+ filter=filters,
use_v6=True)
def _format_describe_instances(self, context, **kwargs):
@@ -1545,11 +1549,11 @@ class CloudController(object):
if attribute != 'launchPermission':
raise exception.EC2APIError(_('attribute not supported: %s')
% attribute)
- if not 'user_group' in kwargs:
+ if 'user_group' not in kwargs:
raise exception.EC2APIError(_('user or group not specified'))
if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all':
raise exception.EC2APIError(_('only group "all" is supported'))
- if not operation_type in ['add', 'remove']:
+ if operation_type not in ['add', 'remove']:
msg = _('operation_type must be add or remove')
raise exception.EC2APIError(msg)
LOG.audit(_("Updating image %s publicity"), image_id, context=context)
diff --git a/nova/api/metadata/handler.py b/nova/api/metadata/handler.py
index fbb46930b..4a425f876 100644
--- a/nova/api/metadata/handler.py
+++ b/nova/api/metadata/handler.py
@@ -142,7 +142,7 @@ class MetadataRequestHandler(wsgi.Application):
def _handle_instance_id_request(self, req):
instance_id = req.headers.get('X-Instance-ID')
signature = req.headers.get('X-Instance-ID-Signature')
- remote_address = req.remote_addr
+ remote_address = req.headers.get('X-Forwarded-For')
# Ensure that only one header was passed
diff --git a/nova/api/metadata/password.py b/nova/api/metadata/password.py
index b2bb83b15..f3453e945 100644
--- a/nova/api/metadata/password.py
+++ b/nova/api/metadata/password.py
@@ -15,8 +15,9 @@
from webob import exc
+from nova import conductor
from nova import context
-from nova import db
+from nova import utils
CHUNKS = 4
@@ -33,7 +34,7 @@ def extract_password(instance):
return result or None
-def set_password(context, instance_uuid, password):
+def convert_password(context, password):
"""Stores password as system_metadata items.
Password is stored with the keys 'password_0' -> 'password_3'.
@@ -43,10 +44,7 @@ def set_password(context, instance_uuid, password):
for i in xrange(CHUNKS):
meta['password_%d' % i] = password[:CHUNK_LENGTH]
password = password[CHUNK_LENGTH:]
- db.instance_system_metadata_update(context,
- instance_uuid,
- meta,
- False)
+ return meta
def handle_password(req, meta_data):
@@ -63,6 +61,12 @@ def handle_password(req, meta_data):
if (req.content_length > MAX_SIZE or len(req.body) > MAX_SIZE):
msg = _("Request is too large.")
raise exc.HTTPBadRequest(explanation=msg)
- set_password(ctxt, meta_data.uuid, req.body)
+
+ conductor_api = conductor.API()
+ instance = conductor_api.instance_get_by_uuid(ctxt, meta_data.uuid)
+ sys_meta = utils.metadata_to_dict(instance['system_metadata'])
+ sys_meta.update(convert_password(ctxt, req.body))
+ conductor_api.instance_update(ctxt, meta_data.uuid,
+ system_metadata=sys_meta)
else:
raise exc.HTTPBadRequest()
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py
index d812cef18..a76b74324 100644
--- a/nova/api/openstack/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -102,7 +102,7 @@ class APIMapper(routes.Mapper):
class ProjectMapper(APIMapper):
def resource(self, member_name, collection_name, **kwargs):
- if not ('parent_resource' in kwargs):
+ if 'parent_resource' not in kwargs:
kwargs['path_prefix'] = '{project_id}/'
else:
parent_resource = kwargs['parent_resource']
diff --git a/nova/api/openstack/compute/contrib/aggregates.py b/nova/api/openstack/compute/contrib/aggregates.py
index 91d138be4..84b0358a3 100644
--- a/nova/api/openstack/compute/contrib/aggregates.py
+++ b/nova/api/openstack/compute/contrib/aggregates.py
@@ -106,7 +106,7 @@ class AggregateController(object):
raise exc.HTTPBadRequest
for key in updates.keys():
- if not key in ["name", "availability_zone"]:
+ if key not in ["name", "availability_zone"]:
raise exc.HTTPBadRequest
try:
diff --git a/nova/api/openstack/compute/contrib/availability_zone.py b/nova/api/openstack/compute/contrib/availability_zone.py
index 6cde5ca64..98c508bd7 100644
--- a/nova/api/openstack/compute/contrib/availability_zone.py
+++ b/nova/api/openstack/compute/contrib/availability_zone.py
@@ -110,7 +110,7 @@ class AvailabilityZoneController(wsgi.Controller):
host_services = {}
for service in enabled_services:
zone_hosts.setdefault(service['availability_zone'], [])
- if not service['host'] in zone_hosts[service['availability_zone']]:
+ if service['host'] not in zone_hosts[service['availability_zone']]:
zone_hosts[service['availability_zone']].append(
service['host'])
diff --git a/nova/api/openstack/compute/contrib/console_output.py b/nova/api/openstack/compute/contrib/console_output.py
index 953459d38..60594cb53 100644
--- a/nova/api/openstack/compute/contrib/console_output.py
+++ b/nova/api/openstack/compute/contrib/console_output.py
@@ -65,9 +65,11 @@ class ConsoleOutputController(wsgi.Controller):
length)
except exception.NotFound:
raise webob.exc.HTTPNotFound(_('Unable to get console'))
+ except exception.InstanceNotReady as e:
+ raise webob.exc.HTTPConflict(explanation=unicode(e))
# XML output is not correctly escaped, so remove invalid characters
- remove_re = re.compile('[\x00-\x08\x0B-\x0C\x0E-\x1F]')
+ remove_re = re.compile('[\x00-\x08\x0B-\x0C\x0E-\x1F-\x0D]')
output = remove_re.sub('', output)
return {'output': output}
diff --git a/nova/api/openstack/compute/contrib/evacuate.py b/nova/api/openstack/compute/contrib/evacuate.py
new file mode 100644
index 000000000..4c9229d1e
--- /dev/null
+++ b/nova/api/openstack/compute/contrib/evacuate.py
@@ -0,0 +1,98 @@
+# Copyright 2013 OpenStack, LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from webob import exc
+
+from nova.api.openstack import common
+from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
+from nova import compute
+from nova import exception
+from nova.openstack.common import log as logging
+from nova import utils
+
+LOG = logging.getLogger(__name__)
+authorize = extensions.extension_authorizer('compute', 'evacuate')
+
+
+class Controller(wsgi.Controller):
+ def __init__(self, *args, **kwargs):
+ super(Controller, self).__init__(*args, **kwargs)
+ self.compute_api = compute.API()
+
+ @wsgi.action('evacuate')
+ def _evacuate(self, req, id, body):
+ """
+ Permit admins to evacuate a server from a failed host
+ to a new one.
+ """
+ context = req.environ["nova.context"]
+ if not context.is_admin:
+ msg = _("Instance evacuate is admin only functionality")
+ raise exc.HTTPForbidden(explanation=msg)
+ authorize(context)
+
+ try:
+ if len(body) != 1:
+ raise exc.HTTPBadRequest(_("Malformed request body"))
+
+ evacuate_body = body["evacuate"]
+ host = evacuate_body["host"]
+ on_shared_storage = utils.bool_from_str(
+ evacuate_body["onSharedStorage"])
+
+ if 'adminPass' in evacuate_body:
+ # check that if requested to evacuate server on shared storage
+ # password not specified
+ if on_shared_storage:
+ msg = _("admin password can't be changed on existing disk")
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ password = evacuate_body['adminPass']
+ elif not on_shared_storage:
+ password = utils.generate_password()
+
+ except (TypeError, KeyError):
+ msg = _("host and onSharedStorage must be specified.")
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ try:
+ instance = self.compute_api.get(context, id)
+ self.compute_api.evacuate(context, instance, host,
+ on_shared_storage, password)
+ except exception.InstanceInvalidState as state_error:
+ common.raise_http_conflict_for_instance_invalid_state(state_error,
+ 'evacuate')
+ except Exception as e:
+ msg = _("Error in evacuate, %s") % e
+ LOG.exception(msg, instance=instance)
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ if password:
+ return {'adminPass': password}
+
+
+class Evacuate(extensions.ExtensionDescriptor):
+ """Enables server evacuation."""
+
+ name = "Evacuate"
+ alias = "os-evacuate"
+ namespace = "http://docs.openstack.org/compute/ext/evacuate/api/v2"
+ updated = "2013-01-06T00:00:00+00:00"
+
+ def get_controller_extensions(self):
+ controller = Controller()
+ extension = extensions.ControllerExtension(self, 'servers', controller)
+ return [extension]
diff --git a/nova/api/openstack/compute/contrib/extended_availability_zone.py b/nova/api/openstack/compute/contrib/extended_availability_zone.py
new file mode 100644
index 000000000..b7451cb6a
--- /dev/null
+++ b/nova/api/openstack/compute/contrib/extended_availability_zone.py
@@ -0,0 +1,106 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 Netease, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""The Extended Availability Zone Status API extension."""
+
+from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
+from nova.api.openstack import xmlutil
+from nova import availability_zones
+from nova.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+authorize = extensions.soft_extension_authorizer('compute',
+ 'extended_availability_zone')
+
+
+class ExtendedAZController(wsgi.Controller):
+
+ def _get_host_az(self, context, instance):
+ admin_context = context.elevated()
+ if instance['host']:
+ return availability_zones.get_host_availability_zone(
+ admin_context, instance['host'])
+
+ def _extend_server(self, context, server, instance):
+ key = "%s:availability_zone" % Extended_availability_zone.alias
+ server[key] = instance.get('availability_zone', None)
+
+ key = "%s:host_availability_zone" % Extended_availability_zone.alias
+ server[key] = self._get_host_az(context, instance)
+
+ @wsgi.extends
+ def show(self, req, resp_obj, id):
+ context = req.environ['nova.context']
+ if authorize(context):
+ resp_obj.attach(xml=ExtendedAZTemplate())
+ server = resp_obj.obj['server']
+ db_instance = req.get_db_instance(server['id'])
+ self._extend_server(context, server, db_instance)
+
+ @wsgi.extends
+ def detail(self, req, resp_obj):
+ context = req.environ['nova.context']
+ if authorize(context):
+ resp_obj.attach(xml=ExtendedAZsTemplate())
+ servers = list(resp_obj.obj['servers'])
+ for server in servers:
+ db_instance = req.get_db_instance(server['id'])
+ self._extend_server(context, server, db_instance)
+
+
+class Extended_availability_zone(extensions.ExtensionDescriptor):
+ """Extended Server Attributes support."""
+
+ name = "ExtendedAvailabilityZone"
+ alias = "OS-EXT-AZ"
+ namespace = ("http://docs.openstack.org/compute/ext/"
+ "extended_availability_zone/api/v2")
+ updated = "2013-01-30T00:00:00+00:00"
+
+ def get_controller_extensions(self):
+ controller = ExtendedAZController()
+ extension = extensions.ControllerExtension(self, 'servers', controller)
+ return [extension]
+
+
+def make_server(elem):
+ elem.set('{%s}availability_zone' % Extended_availability_zone.namespace,
+ '%s:availability_zone' % Extended_availability_zone.alias)
+ elem.set('{%s}host_availability_zone' %
+ Extended_availability_zone.namespace,
+ '%s:host_availability_zone' %
+ Extended_availability_zone.alias)
+
+
+class ExtendedAZTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('server', selector='server')
+ make_server(root)
+ alias = Extended_availability_zone.alias
+ namespace = Extended_availability_zone.namespace
+ return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace})
+
+
+class ExtendedAZsTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('servers')
+ elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
+ make_server(elem)
+ alias = Extended_availability_zone.alias
+ namespace = Extended_availability_zone.namespace
+ return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace})
diff --git a/nova/api/openstack/compute/contrib/flavor_access.py b/nova/api/openstack/compute/contrib/flavor_access.py
index 1c5006576..744fea13b 100644
--- a/nova/api/openstack/compute/contrib/flavor_access.py
+++ b/nova/api/openstack/compute/contrib/flavor_access.py
@@ -39,7 +39,7 @@ def make_flavor_access(elem):
elem.set('tenant_id')
-class FlavorextradatumTemplate(xmlutil.TemplateBuilder):
+class FlavorTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('flavor', selector='flavor')
make_flavor(root)
@@ -48,7 +48,7 @@ class FlavorextradatumTemplate(xmlutil.TemplateBuilder):
return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace})
-class FlavorextradataTemplate(xmlutil.TemplateBuilder):
+class FlavorsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('flavors')
elem = xmlutil.SubTemplateElement(root, 'flavor', selector='flavors')
@@ -60,11 +60,7 @@ class FlavorextradataTemplate(xmlutil.TemplateBuilder):
class FlavorAccessTemplate(xmlutil.TemplateBuilder):
def construct(self):
- def wrapped(obj, do_raise=False):
- # wrap bare list in dict
- return dict(flavor_access=obj)
-
- root = xmlutil.TemplateElement('flavor_access', selector=wrapped)
+ root = xmlutil.TemplateElement('flavor_access')
elem = xmlutil.SubTemplateElement(root, 'access',
selector='flavor_access')
make_flavor_access(elem)
@@ -138,7 +134,7 @@ class FlavorActionController(wsgi.Controller):
context = req.environ['nova.context']
if authorize(context):
# Attach our slave template to the response object
- resp_obj.attach(xml=FlavorextradatumTemplate())
+ resp_obj.attach(xml=FlavorTemplate())
db_flavor = req.get_db_flavor(id)
self._extend_flavor(resp_obj.obj['flavor'], db_flavor)
@@ -148,7 +144,7 @@ class FlavorActionController(wsgi.Controller):
context = req.environ['nova.context']
if authorize(context):
# Attach our slave template to the response object
- resp_obj.attach(xml=FlavorextradataTemplate())
+ resp_obj.attach(xml=FlavorsTemplate())
flavors = list(resp_obj.obj['flavors'])
for flavor_rval in flavors:
@@ -160,7 +156,7 @@ class FlavorActionController(wsgi.Controller):
context = req.environ['nova.context']
if authorize(context):
# Attach our slave template to the response object
- resp_obj.attach(xml=FlavorextradatumTemplate())
+ resp_obj.attach(xml=FlavorTemplate())
db_flavor = req.get_db_flavor(resp_obj.obj['flavor']['id'])
diff --git a/nova/api/openstack/compute/contrib/flavorextraspecs.py b/nova/api/openstack/compute/contrib/flavorextraspecs.py
index c8deb7b4c..1349abe78 100644
--- a/nova/api/openstack/compute/contrib/flavorextraspecs.py
+++ b/nova/api/openstack/compute/contrib/flavorextraspecs.py
@@ -34,6 +34,15 @@ class ExtraSpecsTemplate(xmlutil.TemplateBuilder):
return xmlutil.MasterTemplate(xmlutil.make_flat_dict('extra_specs'), 1)
+class ExtraSpecTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ sel = xmlutil.Selector(xmlutil.get_items, 0)
+ root = xmlutil.TemplateElement('extra_spec', selector=sel)
+ root.set('key', 0)
+ root.text = 1
+ return xmlutil.MasterTemplate(root, 1)
+
+
class FlavorExtraSpecsController(object):
"""The flavor extra specs API controller for the OpenStack API."""
@@ -53,13 +62,13 @@ class FlavorExtraSpecsController(object):
def index(self, req, flavor_id):
"""Returns the list of extra specs for a given flavor."""
context = req.environ['nova.context']
- authorize(context)
+ authorize(context, action='index')
return self._get_extra_specs(context, flavor_id)
@wsgi.serializers(xml=ExtraSpecsTemplate)
def create(self, req, flavor_id, body):
context = req.environ['nova.context']
- authorize(context)
+ authorize(context, action='create')
self._check_body(body)
specs = body.get('extra_specs')
try:
@@ -70,12 +79,12 @@ class FlavorExtraSpecsController(object):
raise exc.HTTPBadRequest(explanation=unicode(error))
return body
- @wsgi.serializers(xml=ExtraSpecsTemplate)
+ @wsgi.serializers(xml=ExtraSpecTemplate)
def update(self, req, flavor_id, id, body):
context = req.environ['nova.context']
- authorize(context)
+ authorize(context, action='update')
self._check_body(body)
- if not id in body:
+ if id not in body:
expl = _('Request body and URI mismatch')
raise exc.HTTPBadRequest(explanation=expl)
if len(body) > 1:
@@ -87,14 +96,13 @@ class FlavorExtraSpecsController(object):
body)
except exception.MetadataLimitExceeded as error:
raise exc.HTTPBadRequest(explanation=unicode(error))
-
return body
- @wsgi.serializers(xml=ExtraSpecsTemplate)
+ @wsgi.serializers(xml=ExtraSpecTemplate)
def show(self, req, flavor_id, id):
"""Return a single extra spec item."""
context = req.environ['nova.context']
- authorize(context)
+ authorize(context, action='show')
specs = self._get_extra_specs(context, flavor_id)
if id in specs['extra_specs']:
return {id: specs['extra_specs'][id]}
@@ -104,7 +112,7 @@ class FlavorExtraSpecsController(object):
def delete(self, req, flavor_id, id):
"""Deletes an existing extra spec."""
context = req.environ['nova.context']
- authorize(context)
+ authorize(context, action='delete')
db.instance_type_extra_specs_delete(context, flavor_id, id)
diff --git a/nova/api/openstack/compute/contrib/floating_ip_dns.py b/nova/api/openstack/compute/contrib/floating_ip_dns.py
index fbea0acf9..bddf3580c 100644
--- a/nova/api/openstack/compute/contrib/floating_ip_dns.py
+++ b/nova/api/openstack/compute/contrib/floating_ip_dns.py
@@ -14,6 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License
+import socket
import urllib
import webob
@@ -206,32 +207,40 @@ class FloatingIPDNSEntryController(object):
context = req.environ['nova.context']
authorize(context)
domain = _unquote_domain(domain_id)
- name = id
- entries = self.network_api.get_dns_entries_by_name(context,
- name, domain)
- entry = _create_dns_entry(entries[0], name, domain)
- return _translate_dns_entry_view(entry)
-
- @wsgi.serializers(xml=FloatingIPDNSsTemplate)
- def index(self, req, domain_id):
- """Return a list of dns entries for the specified domain and ip."""
- context = req.environ['nova.context']
- authorize(context)
- params = req.GET
- floating_ip = params.get('ip')
- domain = _unquote_domain(domain_id)
+ floating_ip = None
+ # Check whether id is a valid ipv4/ipv6 address.
+ try:
+ socket.inet_pton(socket.AF_INET, id)
+ floating_ip = id
+ except socket.error:
+ try:
+ socket.inet_pton(socket.AF_INET6, id)
+ floating_ip = id
+ except socket.error:
+ pass
+
+ if floating_ip:
+ entries = self.network_api.get_dns_entries_by_address(context,
+ floating_ip,
+ domain)
+ else:
+ entries = self.network_api.get_dns_entries_by_name(context, id,
+ domain)
- if not floating_ip:
- raise webob.exc.HTTPUnprocessableEntity()
+ if not entries:
+ explanation = _("DNS entries not found.")
+ raise webob.exc.HTTPNotFound(explanation=explanation)
- entries = self.network_api.get_dns_entries_by_address(context,
- floating_ip,
- domain)
- entrylist = [_create_dns_entry(floating_ip, entry, domain)
- for entry in entries]
+ if floating_ip:
+ entrylist = [_create_dns_entry(floating_ip, entry, domain)
+ for entry in entries]
+ dns_entries = _translate_dns_entries_view(entrylist)
+ return wsgi.ResponseObject(dns_entries,
+ xml=FloatingIPDNSsTemplate)
- return _translate_dns_entries_view(entrylist)
+ entry = _create_dns_entry(entries[0], id, domain)
+ return _translate_dns_entry_view(entry)
@wsgi.serializers(xml=FloatingIPDNSTemplate)
def update(self, req, domain_id, id, body):
diff --git a/nova/api/openstack/compute/contrib/floating_ips.py b/nova/api/openstack/compute/contrib/floating_ips.py
index 3f00136f5..f7c1100a5 100644
--- a/nova/api/openstack/compute/contrib/floating_ips.py
+++ b/nova/api/openstack/compute/contrib/floating_ips.py
@@ -67,11 +67,11 @@ def _translate_floating_ip_view(floating_ip):
}
try:
result['fixed_ip'] = floating_ip['fixed_ip']['address']
- except (TypeError, KeyError):
+ except (TypeError, KeyError, AttributeError):
result['fixed_ip'] = None
try:
result['instance_id'] = floating_ip['instance']['uuid']
- except (TypeError, KeyError):
+ except (TypeError, KeyError, AttributeError):
result['instance_id'] = None
return {'floating_ip': result}
@@ -109,33 +109,14 @@ class FloatingIPController(object):
self.network_api = network.API()
super(FloatingIPController, self).__init__()
- def _get_fixed_ip(self, context, fixed_ip_id):
- if fixed_ip_id is None:
- return None
- try:
- return self.network_api.get_fixed_ip(context, fixed_ip_id)
- except exception.FixedIpNotFound:
- return None
-
- def _get_instance(self, context, instance_id):
- return self.compute_api.get(context, instance_id)
-
- def _set_metadata(self, context, floating_ip):
- # When Quantum v2 API is used, 'fixed_ip' and 'instance' are
- # already set. In this case we don't need to update the fields.
-
- if 'fixed_ip' not in floating_ip:
- fixed_ip_id = floating_ip['fixed_ip_id']
- floating_ip['fixed_ip'] = self._get_fixed_ip(context,
- fixed_ip_id)
+ def _normalize_ip(self, floating_ip):
+ # NOTE(vish): translate expects instance to be in the floating_ip
+ # dict but it is returned in the fixed_ip dict by
+ # nova-network
+ fixed_ip = floating_ip.get('fixed_ip')
if 'instance' not in floating_ip:
- instance_uuid = None
- if floating_ip['fixed_ip']:
- instance_uuid = floating_ip['fixed_ip']['instance_uuid']
-
- if instance_uuid:
- floating_ip['instance'] = self._get_instance(context,
- instance_uuid)
+ if fixed_ip:
+ floating_ip['instance'] = fixed_ip['instance']
else:
floating_ip['instance'] = None
@@ -151,7 +132,7 @@ class FloatingIPController(object):
msg = _("Floating ip not found for id %s") % id
raise webob.exc.HTTPNotFound(explanation=msg)
- self._set_metadata(context, floating_ip)
+ self._normalize_ip(floating_ip)
return _translate_floating_ip_view(floating_ip)
@@ -164,7 +145,7 @@ class FloatingIPController(object):
floating_ips = self.network_api.get_floating_ips_by_project(context)
for floating_ip in floating_ips:
- self._set_metadata(context, floating_ip)
+ self._normalize_ip(floating_ip)
return _translate_floating_ips_view(floating_ips)
diff --git a/nova/api/openstack/compute/contrib/floating_ips_bulk.py b/nova/api/openstack/compute/contrib/floating_ips_bulk.py
index f5b8d24dd..11ab0ec69 100644
--- a/nova/api/openstack/compute/contrib/floating_ips_bulk.py
+++ b/nova/api/openstack/compute/contrib/floating_ips_bulk.py
@@ -25,7 +25,7 @@ from nova.openstack.common import cfg
from nova.openstack.common import log as logging
CONF = cfg.CONF
-CONF.import_opt('default_floating_pool', 'nova.network.manager')
+CONF.import_opt('default_floating_pool', 'nova.network.floating_ips')
CONF.import_opt('public_interface', 'nova.network.linux_net')
@@ -80,13 +80,13 @@ class FloatingIPBulkController(object):
context = req.environ['nova.context']
authorize(context)
- if not 'floating_ips_bulk_create' in body:
+ if 'floating_ips_bulk_create' not in body:
raise webob.exc.HTTPUnprocessableEntity()
params = body['floating_ips_bulk_create']
LOG.debug(params)
- if not 'ip_range' in params:
+ if 'ip_range' not in params:
raise webob.exc.HTTPUnprocessableEntity()
ip_range = params['ip_range']
diff --git a/nova/api/openstack/compute/contrib/hosts.py b/nova/api/openstack/compute/contrib/hosts.py
index d1b39d6db..9ce278900 100644
--- a/nova/api/openstack/compute/contrib/hosts.py
+++ b/nova/api/openstack/compute/contrib/hosts.py
@@ -124,11 +124,12 @@ class HostController(object):
"""
context = req.environ['nova.context']
authorize(context)
- filters = {}
+ filters = {'disabled': False}
zone = req.GET.get('zone', None)
if zone:
filters['availability_zone'] = zone
- services = self.api.service_get_all(context, filters=filters)
+ services = self.api.service_get_all(context, filters=filters,
+ set_zones=True)
hosts = []
for service in services:
hosts.append({'host_name': service['host'],
diff --git a/nova/api/openstack/compute/contrib/hypervisors.py b/nova/api/openstack/compute/contrib/hypervisors.py
index 6580212a9..7e477bbf3 100644
--- a/nova/api/openstack/compute/contrib/hypervisors.py
+++ b/nova/api/openstack/compute/contrib/hypervisors.py
@@ -20,8 +20,7 @@ import webob.exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
-from nova.compute import api as compute_api
-from nova import db
+from nova import compute
from nova import exception
from nova.openstack.common import log as logging
@@ -128,7 +127,7 @@ class HypervisorsController(object):
"""The Hypervisors API controller for the OpenStack API."""
def __init__(self):
- self.api = compute_api.HostAPI()
+ self.host_api = compute.HostAPI()
super(HypervisorsController, self).__init__()
def _view_hypervisor(self, hypervisor, detail, servers=None, **kwargs):
@@ -164,22 +163,24 @@ class HypervisorsController(object):
def index(self, req):
context = req.environ['nova.context']
authorize(context)
+ compute_nodes = self.host_api.compute_node_get_all(context)
return dict(hypervisors=[self._view_hypervisor(hyp, False)
- for hyp in db.compute_node_get_all(context)])
+ for hyp in compute_nodes])
@wsgi.serializers(xml=HypervisorDetailTemplate)
def detail(self, req):
context = req.environ['nova.context']
authorize(context)
+ compute_nodes = self.host_api.compute_node_get_all(context)
return dict(hypervisors=[self._view_hypervisor(hyp, True)
- for hyp in db.compute_node_get_all(context)])
+ for hyp in compute_nodes])
@wsgi.serializers(xml=HypervisorTemplate)
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
- hyp = db.compute_node_get(context, int(id))
+ hyp = self.host_api.compute_node_get(context, id)
except (ValueError, exception.ComputeHostNotFound):
msg = _("Hypervisor with ID '%s' could not be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
@@ -190,7 +191,7 @@ class HypervisorsController(object):
context = req.environ['nova.context']
authorize(context)
try:
- hyp = db.compute_node_get(context, int(id))
+ hyp = self.host_api.compute_node_get(context, id)
except (ValueError, exception.ComputeHostNotFound):
msg = _("Hypervisor with ID '%s' could not be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
@@ -198,7 +199,7 @@ class HypervisorsController(object):
# Get the uptime
try:
host = hyp['service']['host']
- uptime = self.api.get_host_uptime(context, host)
+ uptime = self.host_api.get_host_uptime(context, host)
except NotImplementedError:
msg = _("Virt driver does not implement uptime function.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
@@ -210,7 +211,8 @@ class HypervisorsController(object):
def search(self, req, id):
context = req.environ['nova.context']
authorize(context)
- hypervisors = db.compute_node_search_by_hypervisor(context, id)
+ hypervisors = self.host_api.compute_node_search_by_hypervisor(
+ context, id)
if hypervisors:
return dict(hypervisors=[self._view_hypervisor(hyp, False)
for hyp in hypervisors])
@@ -222,21 +224,24 @@ class HypervisorsController(object):
def servers(self, req, id):
context = req.environ['nova.context']
authorize(context)
- hypervisors = db.compute_node_search_by_hypervisor(context, id)
- if hypervisors:
- return dict(hypervisors=[self._view_hypervisor(hyp, False,
- db.instance_get_all_by_host(context,
- hyp['service']['host']))
- for hyp in hypervisors])
- else:
+ compute_nodes = self.host_api.compute_node_search_by_hypervisor(
+ context, id)
+ if not compute_nodes:
msg = _("No hypervisor matching '%s' could be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
+ hypervisors = []
+ for compute_node in compute_nodes:
+ instances = self.host_api.instance_get_all_by_host(context,
+ compute_node['service']['host'])
+ hyp = self._view_hypervisor(compute_node, False, instances)
+ hypervisors.append(hyp)
+ return dict(hypervisors=hypervisors)
@wsgi.serializers(xml=HypervisorStatisticsTemplate)
def statistics(self, req):
context = req.environ['nova.context']
authorize(context)
- stats = db.compute_node_statistics(context)
+ stats = self.host_api.compute_node_statistics(context)
return dict(hypervisor_statistics=stats)
diff --git a/nova/api/openstack/compute/contrib/instance_usage_audit_log.py b/nova/api/openstack/compute/contrib/instance_usage_audit_log.py
index 4b0afdecf..512b58869 100644
--- a/nova/api/openstack/compute/contrib/instance_usage_audit_log.py
+++ b/nova/api/openstack/compute/contrib/instance_usage_audit_log.py
@@ -21,7 +21,7 @@ import datetime
import webob.exc
from nova.api.openstack import extensions
-from nova import db
+from nova import compute
from nova.openstack.common import cfg
from nova import utils
@@ -34,6 +34,8 @@ authorize = extensions.extension_authorizer('compute',
class InstanceUsageAuditLogController(object):
+ def __init__(self):
+ self.host_api = compute.HostAPI()
def index(self, req):
context = req.environ['nova.context']
@@ -78,12 +80,13 @@ class InstanceUsageAuditLogController(object):
begin = defbegin
if end is None:
end = defend
- task_logs = db.task_log_get_all(context, "instance_usage_audit",
- begin, end)
+ task_logs = self.host_api.task_log_get_all(context,
+ "instance_usage_audit",
+ begin, end)
# We do this this way to include disabled compute services,
# which can have instances on them. (mdragon)
- services = [svc for svc in db.service_get_all(context)
- if svc['topic'] == CONF.compute_topic]
+ filters = {'topic': CONF.compute_topic}
+ services = self.host_api.service_get_all(context, filters=filters)
hosts = set(serv['host'] for serv in services)
seen_hosts = set()
done_hosts = set()
diff --git a/nova/api/openstack/compute/contrib/server_password.py b/nova/api/openstack/compute/contrib/server_password.py
index 0fd620fb8..9436d354f 100644
--- a/nova/api/openstack/compute/contrib/server_password.py
+++ b/nova/api/openstack/compute/contrib/server_password.py
@@ -24,6 +24,7 @@ from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
+from nova import db
from nova import exception
@@ -62,7 +63,9 @@ class ServerPasswordController(object):
context = req.environ['nova.context']
authorize(context)
instance = self._get_instance(context, server_id)
- password.set_password(context, instance['uuid'], None)
+ meta = password.convert_password(context, None)
+ db.instance_system_metadata_update(context, instance['uuid'],
+ meta, False)
class Server_password(extensions.ExtensionDescriptor):
diff --git a/nova/api/openstack/compute/contrib/server_start_stop.py b/nova/api/openstack/compute/contrib/server_start_stop.py
index 733972083..a13aabb05 100644
--- a/nova/api/openstack/compute/contrib/server_start_stop.py
+++ b/nova/api/openstack/compute/contrib/server_start_stop.py
@@ -44,7 +44,10 @@ class ServerStartStopActionController(wsgi.Controller):
context = req.environ['nova.context']
instance = self._get_instance(context, id)
LOG.debug(_('start instance'), instance=instance)
- self.compute_api.start(context, instance)
+ try:
+ self.compute_api.start(context, instance)
+ except exception.InstanceNotReady as e:
+ raise webob.exc.HTTPConflict(explanation=unicode(e))
return webob.Response(status_int=202)
@wsgi.action('os-stop')
@@ -53,7 +56,10 @@ class ServerStartStopActionController(wsgi.Controller):
context = req.environ['nova.context']
instance = self._get_instance(context, id)
LOG.debug(_('stop instance'), instance=instance)
- self.compute_api.stop(context, instance)
+ try:
+ self.compute_api.stop(context, instance)
+ except exception.InstanceNotReady as e:
+ raise webob.exc.HTTPConflict(explanation=unicode(e))
return webob.Response(status_int=202)
diff --git a/nova/api/openstack/compute/contrib/simple_tenant_usage.py b/nova/api/openstack/compute/contrib/simple_tenant_usage.py
index 8502e93c4..2313c00ac 100644
--- a/nova/api/openstack/compute/contrib/simple_tenant_usage.py
+++ b/nova/api/openstack/compute/contrib/simple_tenant_usage.py
@@ -159,7 +159,7 @@ class SimpleTenantUsageController(object):
info['uptime'] = delta.days * 24 * 3600 + delta.seconds
- if not info['tenant_id'] in rval:
+ if info['tenant_id'] not in rval:
summary = {}
summary['tenant_id'] = info['tenant_id']
if detailed:
diff --git a/nova/api/openstack/compute/contrib/volumes.py b/nova/api/openstack/compute/contrib/volumes.py
index 47c717495..0ab93db35 100644
--- a/nova/api/openstack/compute/contrib/volumes.py
+++ b/nova/api/openstack/compute/contrib/volumes.py
@@ -33,6 +33,9 @@ from nova import volume
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'volumes')
+authorize_attach = extensions.extension_authorizer('compute',
+ 'volume_attachments')
+
def _translate_volume_detail_view(context, vol):
"""Maps keys for volumes details view."""
@@ -329,6 +332,8 @@ class VolumeAttachmentController(wsgi.Controller):
@wsgi.serializers(xml=VolumeAttachmentsTemplate)
def index(self, req, server_id):
"""Returns the list of volume attachments for a given instance."""
+ context = req.environ['nova.context']
+ authorize_attach(context, action='index')
return self._items(req, server_id,
entity_maker=_translate_attachment_summary_view)
@@ -337,6 +342,7 @@ class VolumeAttachmentController(wsgi.Controller):
"""Return data about the given volume attachment."""
context = req.environ['nova.context']
authorize(context)
+ authorize_attach(context, action='show')
volume_id = id
try:
@@ -377,6 +383,7 @@ class VolumeAttachmentController(wsgi.Controller):
"""Attach a volume to an instance."""
context = req.environ['nova.context']
authorize(context)
+ authorize_attach(context, action='create')
if not self.is_valid_body(body, 'volumeAttachment'):
raise exc.HTTPUnprocessableEntity()
@@ -423,6 +430,7 @@ class VolumeAttachmentController(wsgi.Controller):
"""Detach a volume from an instance."""
context = req.environ['nova.context']
authorize(context)
+ authorize_attach(context, action='delete')
volume_id = id
LOG.audit(_("Detach volume %s"), volume_id, context=context)
diff --git a/nova/api/openstack/compute/image_metadata.py b/nova/api/openstack/compute/image_metadata.py
index 1a467f3a7..7e78d6324 100644
--- a/nova/api/openstack/compute/image_metadata.py
+++ b/nova/api/openstack/compute/image_metadata.py
@@ -76,7 +76,7 @@ class Controller(object):
expl = _('Incorrect request body format')
raise exc.HTTPBadRequest(explanation=expl)
- if not id in meta:
+ if id not in meta:
expl = _('Request body and URI mismatch')
raise exc.HTTPBadRequest(explanation=expl)
if len(meta) > 1:
@@ -105,7 +105,7 @@ class Controller(object):
def delete(self, req, image_id, id):
context = req.environ['nova.context']
image = self._get_image(context, image_id)
- if not id in image['properties']:
+ if id not in image['properties']:
msg = _("Invalid metadata key")
raise exc.HTTPNotFound(explanation=msg)
image['properties'].pop(id)
diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py
index 93a07ec3f..723997eba 100644
--- a/nova/api/openstack/compute/servers.py
+++ b/nova/api/openstack/compute/servers.py
@@ -605,8 +605,7 @@ class Controller(wsgi.Controller):
except TypeError:
expl = _('Bad personality format')
raise exc.HTTPBadRequest(explanation=expl)
- contents = self._decode_base64(contents)
- if contents is None:
+ if self._decode_base64(contents) is None:
expl = _('Personality content for %s cannot be decoded') % path
raise exc.HTTPBadRequest(explanation=expl)
injected_files.append((path, contents))
@@ -751,7 +750,7 @@ class Controller(wsgi.Controller):
server_dict = body['server']
password = self._get_server_admin_password(server_dict)
- if not 'name' in server_dict:
+ if 'name' not in server_dict:
msg = _("Server name is not defined")
raise exc.HTTPBadRequest(explanation=msg)
@@ -911,6 +910,8 @@ class Controller(wsgi.Controller):
except exception.ImageNotFound as error:
msg = _("Can not find requested image")
raise exc.HTTPBadRequest(explanation=msg)
+ except exception.ImageNotActive as error:
+ raise exc.HTTPBadRequest(explanation=unicode(error))
except exception.FlavorNotFound as error:
msg = _("Invalid flavorRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
@@ -1170,7 +1171,7 @@ class Controller(wsgi.Controller):
def _action_change_password(self, req, id, body):
context = req.environ['nova.context']
if (not 'changePassword' in body
- or not 'adminPass' in body['changePassword']):
+ or 'adminPass' not in body['changePassword']):
msg = _("No adminPass was specified")
raise exc.HTTPBadRequest(explanation=msg)
password = body['changePassword']['adminPass']
@@ -1178,7 +1179,11 @@ class Controller(wsgi.Controller):
msg = _("Invalid adminPass")
raise exc.HTTPBadRequest(explanation=msg)
server = self._get_server(context, req, id)
- self.compute_api.set_admin_password(context, server, password)
+ try:
+ self.compute_api.set_admin_password(context, server, password)
+ except NotImplementedError:
+ msg = _("Unable to set password on instance")
+ raise exc.HTTPNotImplemented(explanation=msg)
return webob.Response(status_int=202)
def _validate_metadata(self, metadata):
diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py
index a94065ab0..94ce76ec7 100644
--- a/nova/api/openstack/extensions.py
+++ b/nova/api/openstack/extensions.py
@@ -386,12 +386,15 @@ def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None):
def extension_authorizer(api_name, extension_name):
- def authorize(context, target=None):
+ def authorize(context, target=None, action=None):
if target is None:
target = {'project_id': context.project_id,
'user_id': context.user_id}
- action = '%s_extension:%s' % (api_name, extension_name)
- nova.policy.enforce(context, action, target)
+ if action is None:
+ act = '%s_extension:%s' % (api_name, extension_name)
+ else:
+ act = '%s_extension:%s:%s' % (api_name, extension_name, action)
+ nova.policy.enforce(context, act, target)
return authorize
diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py
index a6f255081..f68eff2a7 100644
--- a/nova/api/openstack/wsgi.py
+++ b/nova/api/openstack/wsgi.py
@@ -150,7 +150,7 @@ class Request(webob.Request):
Does not do any body introspection, only checks header
"""
- if not "Content-Type" in self.headers:
+ if "Content-Type" not in self.headers:
return None
content_type = self.content_type
diff --git a/nova/availability_zones.py b/nova/availability_zones.py
index 09cbd98b8..97faccc9f 100644
--- a/nova/availability_zones.py
+++ b/nova/availability_zones.py
@@ -17,7 +17,6 @@
from nova import db
from nova.openstack.common import cfg
-from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
availability_zone_opts = [
@@ -72,7 +71,7 @@ def get_availability_zones(context):
available_zones = []
for zone in [service['availability_zone'] for service
in enabled_services]:
- if not zone in available_zones:
+ if zone not in available_zones:
available_zones.append(zone)
not_available_zones = []
diff --git a/nova/block_device.py b/nova/block_device.py
index c95961911..7d43d15cb 100644
--- a/nova/block_device.py
+++ b/nova/block_device.py
@@ -17,8 +17,11 @@
import re
+from nova.openstack.common import log as logging
from nova.virt import driver
+LOG = logging.getLogger(__name__)
+
DEFAULT_ROOT_DEV_NAME = '/dev/sda1'
_DEFAULT_MAPPINGS = {'ami': 'sda1',
'ephemeral0': 'sda2',
@@ -144,3 +147,22 @@ def match_device(device):
if not match:
return None
return match.groups()
+
+
+def volume_in_mapping(mount_device, block_device_info):
+ block_device_list = [strip_dev(vol['mount_device'])
+ for vol in
+ driver.block_device_info_get_mapping(
+ block_device_info)]
+
+ swap = driver.block_device_info_get_swap(block_device_info)
+ if driver.swap_is_usable(swap):
+ block_device_list.append(strip_dev(swap['device_name']))
+
+ block_device_list += [strip_dev(ephemeral['device_name'])
+ for ephemeral in
+ driver.block_device_info_get_ephemerals(
+ block_device_info)]
+
+ LOG.debug(_("block_device_list %s"), block_device_list)
+ return strip_dev(mount_device) in block_device_list
diff --git a/nova/cells/manager.py b/nova/cells/manager.py
index 133946794..c07a23ebb 100644
--- a/nova/cells/manager.py
+++ b/nova/cells/manager.py
@@ -65,7 +65,7 @@ class CellsManager(manager.Manager):
Scheduling requests get passed to the scheduler class.
"""
- RPC_API_VERSION = '1.1'
+ RPC_API_VERSION = '1.4'
def __init__(self, *args, **kwargs):
# Mostly for tests.
@@ -229,3 +229,105 @@ class CellsManager(manager.Manager):
"""
self.msg_runner.sync_instances(ctxt, project_id, updated_since,
deleted)
+
+ def service_get_all(self, ctxt, filters):
+ """Return services in this cell and in all child cells."""
+ responses = self.msg_runner.service_get_all(ctxt, filters)
+ ret_services = []
+ # 1 response per cell. Each response is a list of services.
+ for response in responses:
+ services = response.value_or_raise()
+ for service in services:
+ cells_utils.add_cell_to_service(service, response.cell_name)
+ ret_services.append(service)
+ return ret_services
+
+ def service_get_by_compute_host(self, ctxt, host_name):
+ """Return a service entry for a compute host in a certain cell."""
+ cell_name, host_name = cells_utils.split_cell_and_item(host_name)
+ response = self.msg_runner.service_get_by_compute_host(ctxt,
+ cell_name,
+ host_name)
+ service = response.value_or_raise()
+ cells_utils.add_cell_to_service(service, response.cell_name)
+ return service
+
+ def proxy_rpc_to_manager(self, ctxt, topic, rpc_message, call, timeout):
+ """Proxy an RPC message as-is to a manager."""
+ compute_topic = CONF.compute_topic
+ cell_and_host = topic[len(compute_topic) + 1:]
+ cell_name, host_name = cells_utils.split_cell_and_item(cell_and_host)
+ response = self.msg_runner.proxy_rpc_to_manager(ctxt, cell_name,
+ host_name, topic, rpc_message, call, timeout)
+ return response.value_or_raise()
+
+ def task_log_get_all(self, ctxt, task_name, period_beginning,
+ period_ending, host=None, state=None):
+ """Get task logs from the DB from all cells or a particular
+ cell.
+
+ If 'host' is not None, host will be of the format 'cell!name@host',
+ with '@host' being optional. The query will be directed to the
+ appropriate cell and return all task logs, or task logs matching
+ the host if specified.
+
+ 'state' also may be None. If it's not, filter by the state as well.
+ """
+ if host is None:
+ cell_name = None
+ else:
+ result = cells_utils.split_cell_and_item(host)
+ cell_name = result[0]
+ if len(result) > 1:
+ host = result[1]
+ else:
+ host = None
+ responses = self.msg_runner.task_log_get_all(ctxt, cell_name,
+ task_name, period_beginning, period_ending,
+ host=host, state=state)
+ # 1 response per cell. Each response is a list of task log
+ # entries.
+ ret_task_logs = []
+ for response in responses:
+ task_logs = response.value_or_raise()
+ for task_log in task_logs:
+ cells_utils.add_cell_to_task_log(task_log,
+ response.cell_name)
+ ret_task_logs.append(task_log)
+ return ret_task_logs
+
+ def compute_node_get(self, ctxt, compute_id):
+ """Get a compute node by ID in a specific cell."""
+ cell_name, compute_id = cells_utils.split_cell_and_item(
+ compute_id)
+ response = self.msg_runner.compute_node_get(ctxt, cell_name,
+ compute_id)
+ node = response.value_or_raise()
+ cells_utils.add_cell_to_compute_node(node, cell_name)
+ return node
+
+ def compute_node_get_all(self, ctxt, hypervisor_match=None):
+ """Return list of compute nodes in all cells."""
+ responses = self.msg_runner.compute_node_get_all(ctxt,
+ hypervisor_match=hypervisor_match)
+ # 1 response per cell. Each response is a list of compute_node
+ # entries.
+ ret_nodes = []
+ for response in responses:
+ nodes = response.value_or_raise()
+ for node in nodes:
+ cells_utils.add_cell_to_compute_node(node,
+ response.cell_name)
+ ret_nodes.append(node)
+ return ret_nodes
+
+ def compute_node_stats(self, ctxt):
+ """Return compute node stats totals from all cells."""
+ responses = self.msg_runner.compute_node_stats(ctxt)
+ totals = {}
+ for response in responses:
+ data = response.value_or_raise()
+ for key, val in data.iteritems():
+ totals.setdefault(key, 0)
+ totals[key] += val
+ return totals
diff --git a/nova/cells/messaging.py b/nova/cells/messaging.py
index 34ca74855..5c7247085 100644
--- a/nova/cells/messaging.py
+++ b/nova/cells/messaging.py
@@ -37,6 +37,7 @@ from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
+from nova.openstack.common import rpc
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
@@ -60,7 +61,7 @@ LOG = logging.getLogger(__name__)
# Separator used between cell names for the 'full cell name' and routing
# path.
-_PATH_CELL_SEP = '!'
+_PATH_CELL_SEP = cells_utils._PATH_CELL_SEP
def _reverse_path(path):
@@ -599,6 +600,22 @@ class _BaseMessageMethods(base.Base):
self.state_manager = msg_runner.state_manager
self.compute_api = compute.API()
+ def task_log_get_all(self, message, task_name, period_beginning,
+ period_ending, host, state):
+ """Get task logs from the DB. The message could have
+ directly targeted this cell, or it could have been a broadcast
+ message.
+
+ If 'host' is not None, filter by host.
+ If 'state' is not None, filter by state.
+ """
+ task_logs = self.db.task_log_get_all(message.ctxt, task_name,
+ period_beginning,
+ period_ending,
+ host=host,
+ state=state)
+ return jsonutils.to_primitive(task_logs)
+
class _ResponseMessageMethods(_BaseMessageMethods):
"""Methods that are called from a ResponseMessage. There's only
@@ -678,6 +695,28 @@ class _TargetedMessageMethods(_BaseMessageMethods):
"""
self.msg_runner.tell_parents_our_capacities(message.ctxt)
+ def service_get_by_compute_host(self, message, host_name):
+ """Return the service entry for a compute host."""
+ service = self.db.service_get_by_compute_host(message.ctxt,
+ host_name)
+ return jsonutils.to_primitive(service)
+
+ def proxy_rpc_to_manager(self, message, host_name, rpc_message,
+ topic, timeout):
+ """Proxy RPC to the given compute topic."""
+ # Check that the host exists.
+ self.db.service_get_by_compute_host(message.ctxt, host_name)
+ if message.need_response:
+ return rpc.call(message.ctxt, topic, rpc_message,
+ timeout=timeout)
+ rpc.cast(message.ctxt, topic, rpc_message)
+
+ def compute_node_get(self, message, compute_id):
+ """Get compute node by ID."""
+ compute_node = self.db.compute_node_get(message.ctxt,
+ compute_id)
+ return jsonutils.to_primitive(compute_node)
+
class _BroadcastMessageMethods(_BaseMessageMethods):
"""These are the methods that can be called as a part of a broadcast
@@ -800,6 +839,34 @@ class _BroadcastMessageMethods(_BaseMessageMethods):
for instance in instances:
self._sync_instance(message.ctxt, instance)
+ def service_get_all(self, message, filters):
+ if filters is None:
+ filters = {}
+ disabled = filters.pop('disabled', None)
+ services = self.db.service_get_all(message.ctxt, disabled=disabled)
+ ret_services = []
+ for service in services:
+ service = jsonutils.to_primitive(service)
+ for key, val in filters.iteritems():
+ if service[key] != val:
+ break
+ else:
+ ret_services.append(service)
+ return ret_services
+
+ def compute_node_get_all(self, message, hypervisor_match):
+ """Return compute nodes in this cell."""
+ if hypervisor_match is not None:
+ nodes = self.db.compute_node_search_by_hypervisor(message.ctxt,
+ hypervisor_match)
+ else:
+ nodes = self.db.compute_node_get_all(message.ctxt)
+ return jsonutils.to_primitive(nodes)
+
+ def compute_node_stats(self, message):
+ """Return compute node stats from this cell."""
+ return self.db.compute_node_statistics(message.ctxt)
+
_CELL_MESSAGE_TYPE_TO_MESSAGE_CLS = {'targeted': _TargetedMessage,
'broadcast': _BroadcastMessage,
@@ -1038,6 +1105,84 @@ class MessageRunner(object):
run_locally=False)
message.process()
+ def service_get_all(self, ctxt, filters=None):
+ method_kwargs = dict(filters=filters)
+ message = _BroadcastMessage(self, ctxt, 'service_get_all',
+ method_kwargs, 'down',
+ run_locally=True, need_response=True)
+ return message.process()
+
+ def service_get_by_compute_host(self, ctxt, cell_name, host_name):
+ method_kwargs = dict(host_name=host_name)
+ message = _TargetedMessage(self, ctxt,
+ 'service_get_by_compute_host',
+ method_kwargs, 'down', cell_name,
+ need_response=True)
+ return message.process()
+
+ def proxy_rpc_to_manager(self, ctxt, cell_name, host_name, topic,
+ rpc_message, call, timeout):
+ method_kwargs = {'host_name': host_name,
+ 'topic': topic,
+ 'rpc_message': rpc_message,
+ 'timeout': timeout}
+ message = _TargetedMessage(self, ctxt,
+ 'proxy_rpc_to_manager',
+ method_kwargs, 'down', cell_name,
+ need_response=call)
+ return message.process()
+
+ def task_log_get_all(self, ctxt, cell_name, task_name,
+ period_beginning, period_ending,
+ host=None, state=None):
+ """Get task logs from the DB from all cells or a particular
+ cell.
+
+ If 'cell_name' is None or '', get responses from all cells.
+ If 'host' is not None, filter by host.
+ If 'state' is not None, filter by state.
+
+ Return a list of Response objects.
+ """
+ method_kwargs = dict(task_name=task_name,
+ period_beginning=period_beginning,
+ period_ending=period_ending,
+ host=host, state=state)
+ if cell_name:
+ message = _TargetedMessage(self, ctxt, 'task_log_get_all',
+ method_kwargs, 'down',
+ cell_name, need_response=True)
+ # Caller should get a list of Responses.
+ return [message.process()]
+ message = _BroadcastMessage(self, ctxt, 'task_log_get_all',
+ method_kwargs, 'down',
+ run_locally=True, need_response=True)
+ return message.process()
+
+ def compute_node_get_all(self, ctxt, hypervisor_match=None):
+ """Return list of compute nodes in all child cells."""
+ method_kwargs = dict(hypervisor_match=hypervisor_match)
+ message = _BroadcastMessage(self, ctxt, 'compute_node_get_all',
+ method_kwargs, 'down',
+ run_locally=True, need_response=True)
+ return message.process()
+
+ def compute_node_stats(self, ctxt):
+ """Return compute node stats from all child cells."""
+ method_kwargs = dict()
+ message = _BroadcastMessage(self, ctxt, 'compute_node_stats',
+ method_kwargs, 'down',
+ run_locally=True, need_response=True)
+ return message.process()
+
+ def compute_node_get(self, ctxt, cell_name, compute_id):
+ """Return compute node entry from a specific cell by ID."""
+ method_kwargs = dict(compute_id=compute_id)
+ message = _TargetedMessage(self, ctxt, 'compute_node_get',
+ method_kwargs, 'down',
+ cell_name, need_response=True)
+ return message.process()
+
@staticmethod
def get_message_types():
return _CELL_MESSAGE_TYPE_TO_MESSAGE_CLS.keys()
diff --git a/nova/cells/rpcapi.py b/nova/cells/rpcapi.py
index 0ab4fc352..e7db2582f 100644
--- a/nova/cells/rpcapi.py
+++ b/nova/cells/rpcapi.py
@@ -41,6 +41,11 @@ class CellsAPI(rpc_proxy.RpcProxy):
1.0 - Initial version.
1.1 - Adds get_cell_info_for_neighbors() and sync_instances()
+ 1.2 - Adds service_get_all(), service_get_by_compute_host(),
+ and proxy_rpc_to_compute_manager()
+ 1.3 - Adds task_log_get_all()
+ 1.4 - Adds compute_node_get(), compute_node_get_all(), and
+ compute_node_stats()
'''
BASE_RPC_API_VERSION = '1.0'
@@ -155,3 +160,61 @@ class CellsAPI(rpc_proxy.RpcProxy):
updated_since=updated_since,
deleted=deleted),
version='1.1')
+
+ def service_get_all(self, ctxt, filters=None):
+ """Ask all cells for their list of services."""
+ return self.call(ctxt,
+ self.make_msg('service_get_all',
+ filters=filters),
+ version='1.2')
+
+ def service_get_by_compute_host(self, ctxt, host_name):
+ """Get the service entry for a host in a particular cell. The
+ cell name should be encoded within the host_name.
+ """
+ return self.call(ctxt, self.make_msg('service_get_by_compute_host',
+ host_name=host_name),
+ version='1.2')
+
+ def proxy_rpc_to_manager(self, ctxt, rpc_message, topic, call=False,
+ timeout=None):
+ """Proxy RPC to a compute manager. The host in the topic
+ should be encoded with the target cell name.
+ """
+ return self.call(ctxt, self.make_msg('proxy_rpc_to_manager',
+ topic=topic,
+ rpc_message=rpc_message,
+ call=call,
+ timeout=timeout),
+ timeout=timeout,
+ version='1.2')
+
+ def task_log_get_all(self, ctxt, task_name, period_beginning,
+ period_ending, host=None, state=None):
+ """Get the task logs from the DB in child cells."""
+ return self.call(ctxt, self.make_msg('task_log_get_all',
+ task_name=task_name,
+ period_beginning=period_beginning,
+ period_ending=period_ending,
+ host=host, state=state),
+ version='1.3')
+
+ def compute_node_get(self, ctxt, compute_id):
+ """Get a compute node by ID in a specific cell."""
+ return self.call(ctxt, self.make_msg('compute_node_get',
+ compute_id=compute_id),
+ version='1.4')
+
+ def compute_node_get_all(self, ctxt, hypervisor_match=None):
+ """Return list of compute nodes in all cells, optionally
+ filtering by hypervisor host.
+ """
+ return self.call(ctxt,
+ self.make_msg('compute_node_get_all',
+ hypervisor_match=hypervisor_match),
+ version='1.4')
+
+ def compute_node_stats(self, ctxt):
+ """Return compute node stats from all cells."""
+ return self.call(ctxt, self.make_msg('compute_node_stats'),
+ version='1.4')
diff --git a/nova/cells/utils.py b/nova/cells/utils.py
index d25f98fab..e9560969a 100644
--- a/nova/cells/utils.py
+++ b/nova/cells/utils.py
@@ -20,6 +20,12 @@ import random
from nova import db
+# Separator used between cell names for the 'full cell name' and routing
+# path
+_PATH_CELL_SEP = '!'
+# Separator used between cell name and item
+_CELL_ITEM_SEP = '@'
+
def get_instances_to_sync(context, updated_since=None, project_id=None,
deleted=True, shuffle=False, uuids_only=False):
@@ -46,3 +52,48 @@ def get_instances_to_sync(context, updated_since=None, project_id=None,
yield instance['uuid']
else:
yield instance
+
+
+def cell_with_item(cell_name, item):
+ """Turn cell_name and item into <cell_name>@<item>."""
+ return cell_name + _CELL_ITEM_SEP + str(item)
+
+
+def split_cell_and_item(cell_and_item):
+ """Split a combined cell@item and return them."""
+ return cell_and_item.rsplit(_CELL_ITEM_SEP, 1)
+
+
+def _add_cell_to_service(service, cell_name):
+ service['id'] = cell_with_item(cell_name, service['id'])
+ service['host'] = cell_with_item(cell_name, service['host'])
+
+
+def add_cell_to_compute_node(compute_node, cell_name):
+ """Fix compute_node attributes that should be unique. Allows
+ API cell to query the 'id' by cell@id.
+ """
+ compute_node['id'] = cell_with_item(cell_name, compute_node['id'])
+ # Might have a 'service' backref. But if is_primitive() was used
+ # on this and it recursed too deep, 'service' may be "?".
+ service = compute_node.get('service')
+ if isinstance(service, dict):
+ _add_cell_to_service(service, cell_name)
+
+
+def add_cell_to_service(service, cell_name):
+ """Fix service attributes that should be unique. Allows
+ API cell to query the 'id' or 'host' by cell@id/host.
+ """
+ _add_cell_to_service(service, cell_name)
+ compute_node = service.get('compute_node')
+ if compute_node:
+ add_cell_to_compute_node(compute_node[0], cell_name)
+
+
+def add_cell_to_task_log(task_log, cell_name):
+ """Fix task_log attributes that should be unique. In particular,
+ the 'id' and 'host' fields should be prepended with cell name.
+ """
+ task_log['id'] = cell_with_item(cell_name, task_log['id'])
+ task_log['host'] = cell_with_item(cell_name, task_log['host'])
diff --git a/nova/common/memorycache.py b/nova/common/memorycache.py
index f77b3f51a..86057b6ae 100644
--- a/nova/common/memorycache.py
+++ b/nova/common/memorycache.py
@@ -70,7 +70,7 @@ class Client(object):
def add(self, key, value, time=0, min_compress_len=0):
"""Sets the value for a key if it doesn't exist."""
- if not self.get(key) is None:
+ if self.get(key) is not None:
return False
return self.set(key, value, time, min_compress_len)
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 06ce2e07e..a5d8ab6e8 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -31,6 +31,7 @@ import uuid
from nova import availability_zones
from nova import block_device
+from nova.compute import instance_actions
from nova.compute import instance_types
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
@@ -91,7 +92,6 @@ compute_opts = [
CONF = cfg.CONF
CONF.register_opts(compute_opts)
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
-CONF.import_opt('consoleauth_topic', 'nova.consoleauth')
CONF.import_opt('enable', 'nova.cells.opts', group='cells')
MAX_USERDATA_SIZE = 65535
@@ -131,6 +131,15 @@ def check_instance_state(vm_state=None, task_state=(None,)):
return outer
+def check_instance_host(function):
+ @functools.wraps(function)
+ def wrapped(self, context, instance, *args, **kwargs):
+ if not instance['host']:
+ raise exception.InstanceNotReady(instance_id=instance['uuid'])
+ return function(self, context, instance, *args, **kwargs)
+ return wrapped
+
+
def check_instance_lock(function):
@functools.wraps(function)
def inner(self, context, instance, *args, **kwargs):
@@ -186,10 +195,15 @@ class API(base.Base):
(old_ref, instance_ref) = self.db.instance_update_and_get_original(
context, instance_uuid, kwargs)
- notifications.send_update(context, old_ref, instance_ref)
+ notifications.send_update(context, old_ref, instance_ref, 'api')
return instance_ref
+ def _record_action_start(self, context, instance, action):
+ act = compute_utils.pack_action_start(context, instance['uuid'],
+ action)
+ self.db.action_start(context, act)
+
def _check_injected_file_quota(self, context, injected_files):
"""Enforce quota limits on injected files.
@@ -508,6 +522,9 @@ class API(base.Base):
availability_zone, forced_host = self._handle_availability_zone(
availability_zone)
+ system_metadata = instance_types.save_instance_type_info(
+ dict(), instance_type)
+
base_options = {
'reservation_id': reservation_id,
'image_ref': image_href,
@@ -537,7 +554,8 @@ class API(base.Base):
'access_ip_v6': access_ip_v6,
'availability_zone': availability_zone,
'root_device_name': root_device_name,
- 'progress': 0}
+ 'progress': 0,
+ 'system_metadata': system_metadata}
options_from_image = self._inherit_properties_from_image(
image, auto_disk_config)
@@ -558,6 +576,11 @@ class API(base.Base):
security_group, block_device_mapping)
instances.append(instance)
instance_uuids.append(instance['uuid'])
+ self._validate_bdm(context, instance)
+ # send a state update notification for the initial create to
+ # show it going from non-existent to BUILDING
+ notifications.send_update_with_states(context, instance, None,
+ vm_states.BUILDING, None, None, service="api")
# In the case of any exceptions, attempt DB cleanup and rollback the
# quota reservations.
@@ -611,6 +634,10 @@ class API(base.Base):
block_device_mapping, auto_disk_config,
reservation_id, scheduler_hints)
+ for instance in instances:
+ self._record_action_start(context, instance,
+ instance_actions.CREATE)
+
self.scheduler_rpcapi.run_instance(context,
request_spec=request_spec,
admin_password=admin_password, injected_files=injected_files,
@@ -704,6 +731,23 @@ class API(base.Base):
self.db.block_device_mapping_update_or_create(elevated_context,
values)
+ def _validate_bdm(self, context, instance):
+ for bdm in self.db.block_device_mapping_get_all_by_instance(
+ context, instance['uuid']):
+ # NOTE(vish): For now, just make sure the volumes are accessible.
+ snapshot_id = bdm.get('snapshot_id')
+ volume_id = bdm.get('volume_id')
+ if volume_id is not None:
+ try:
+ self.volume_api.get(context, volume_id)
+ except Exception:
+ raise exception.InvalidBDMVolume(id=volume_id)
+ elif snapshot_id is not None:
+ try:
+ self.volume_api.get_snapshot(context, snapshot_id)
+ except Exception:
+ raise exception.InvalidBDMSnapshot(id=snapshot_id)
+
def _populate_instance_for_bdm(self, context, instance, instance_type,
image, block_device_mapping):
"""Populate instance block device mapping information."""
@@ -818,11 +862,6 @@ class API(base.Base):
self._populate_instance_for_bdm(context, instance,
instance_type, image, block_device_mapping)
- # send a state update notification for the initial create to
- # show it going from non-existent to BUILDING
- notifications.send_update_with_states(context, instance, None,
- vm_states.BUILDING, None, None, service="api")
-
return instance
def _check_create_policies(self, context, availability_zone,
@@ -877,9 +916,9 @@ class API(base.Base):
def trigger_provider_fw_rules_refresh(self, context):
"""Called when a rule is added/removed from a provider firewall."""
- host_names = [x['host'] for (x, idx)
- in self.db.service_get_all_compute_sorted(context)]
- for host_name in host_names:
+ for service in self.db.service_get_all_by_topic(context,
+ CONF.compute_topic):
+ host_name = service['host']
self.compute_rpcapi.refresh_provider_fw_rules(context, host_name)
def update_state(self, context, instance, new_state):
@@ -1005,6 +1044,10 @@ class API(base.Base):
context.elevated(), instance['host'])
if self.servicegroup_api.service_is_up(service):
is_up = True
+
+ self._record_action_start(context, instance,
+ instance_actions.DELETE)
+
cb(context, instance, bdms)
except exception.ComputeHostNotFound:
pass
@@ -1148,6 +1191,8 @@ class API(base.Base):
num_instances, quota_reservations = self._check_num_instances_quota(
context, instance_type, 1, 1)
+ self._record_action_start(context, instance, instance_actions.RESTORE)
+
try:
if instance['host']:
instance = self.update(context, instance,
@@ -1177,6 +1222,7 @@ class API(base.Base):
@wrap_check_policy
@check_instance_lock
+ @check_instance_host
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED,
vm_states.ERROR, vm_states.STOPPED],
task_state=[None])
@@ -1189,10 +1235,13 @@ class API(base.Base):
expected_task_state=None,
progress=0)
+ self._record_action_start(context, instance, instance_actions.STOP)
+
self.compute_rpcapi.stop_instance(context, instance, cast=do_cast)
@wrap_check_policy
@check_instance_lock
+ @check_instance_host
@check_instance_state(vm_state=[vm_states.STOPPED])
def start(self, context, instance):
"""Start an instance."""
@@ -1202,6 +1251,7 @@ class API(base.Base):
task_state=task_states.POWERING_ON,
expected_task_state=None)
+ self._record_action_start(context, instance, instance_actions.START)
# TODO(yamahata): injected_files isn't supported right now.
# It is used only for osapi. not for ec2 api.
# availability_zone isn't used by run_instance.
@@ -1211,8 +1261,8 @@ class API(base.Base):
# search_opts in get_all
def get_active_by_window(self, context, begin, end=None, project_id=None):
"""Get instances that were continuously active over a window."""
- return self.db.instance_get_active_by_window(context, begin, end,
- project_id)
+ return self.db.instance_get_active_by_window_joined(context, begin,
+ end, project_id)
#NOTE(bcwaldon): this doesn't really belong in this class
def get_instance_type(self, context, instance_type_id):
@@ -1591,8 +1641,14 @@ class API(base.Base):
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
- vm_states.RESCUED],
- task_state=[None, task_states.REBOOTING])
+ vm_states.RESCUED, vm_states.PAUSED,
+ vm_states.SUSPENDED],
+ task_state=[None, task_states.REBOOTING,
+ task_states.REBOOTING_HARD,
+ task_states.RESUMING,
+ task_states.UNPAUSING,
+ task_states.PAUSING,
+ task_states.SUSPENDING])
def reboot(self, context, instance, reboot_type):
"""Reboot the given instance."""
if (reboot_type == 'SOFT' and
@@ -1612,6 +1668,8 @@ class API(base.Base):
block_info = self._get_block_device_info(elevated,
instance['uuid'])
+ self._record_action_start(context, instance, instance_actions.REBOOT)
+
self.compute_rpcapi.reboot_instance(context, instance=instance,
block_device_info=block_info,
reboot_type=reboot_type)
@@ -1702,6 +1760,8 @@ class API(base.Base):
bdms = self.db.block_device_mapping_get_all_by_instance(context,
instance['uuid'])
+ self._record_action_start(context, instance, instance_actions.REBUILD)
+
self.compute_rpcapi.rebuild_instance(context, instance=instance,
new_pass=admin_password, injected_files=files_to_inject,
image_ref=image_href, orig_image_ref=orig_image_ref,
@@ -1732,6 +1792,9 @@ class API(base.Base):
QUOTAS.commit(context, reservations)
reservations = []
+ self._record_action_start(context, instance,
+ instance_actions.REVERT_RESIZE)
+
self.compute_rpcapi.revert_resize(context,
instance=instance, migration=migration_ref,
host=migration_ref['dest_compute'], reservations=reservations)
@@ -1761,6 +1824,9 @@ class API(base.Base):
QUOTAS.commit(context, reservations)
reservations = []
+ self._record_action_start(context, instance,
+ instance_actions.CONFIRM_RESIZE)
+
self.compute_rpcapi.confirm_resize(context,
instance=instance, migration=migration_ref,
host=migration_ref['source_compute'],
@@ -1936,6 +2002,9 @@ class API(base.Base):
"filter_properties": filter_properties,
"reservations": reservations,
}
+
+ self._record_action_start(context, instance, instance_actions.RESIZE)
+
self.scheduler_rpcapi.prep_resize(context, **args)
@wrap_check_policy
@@ -1962,6 +2031,9 @@ class API(base.Base):
vm_state=vm_states.ACTIVE,
task_state=task_states.PAUSING,
expected_task_state=None)
+
+ self._record_action_start(context, instance, instance_actions.PAUSE)
+
self.compute_rpcapi.pause_instance(context, instance=instance)
@wrap_check_policy
@@ -1974,6 +2046,9 @@ class API(base.Base):
vm_state=vm_states.PAUSED,
task_state=task_states.UNPAUSING,
expected_task_state=None)
+
+ self._record_action_start(context, instance, instance_actions.UNPAUSE)
+
self.compute_rpcapi.unpause_instance(context, instance=instance)
@wrap_check_policy
@@ -1995,6 +2070,9 @@ class API(base.Base):
vm_state=vm_states.ACTIVE,
task_state=task_states.SUSPENDING,
expected_task_state=None)
+
+ self._record_action_start(context, instance, instance_actions.SUSPEND)
+
self.compute_rpcapi.suspend_instance(context, instance=instance)
@wrap_check_policy
@@ -2007,6 +2085,9 @@ class API(base.Base):
vm_state=vm_states.SUSPENDED,
task_state=task_states.RESUMING,
expected_task_state=None)
+
+ self._record_action_start(context, instance, instance_actions.RESUME)
+
self.compute_rpcapi.resume_instance(context, instance=instance)
@wrap_check_policy
@@ -2020,6 +2101,8 @@ class API(base.Base):
task_state=task_states.RESCUING,
expected_task_state=None)
+ self._record_action_start(context, instance, instance_actions.RESCUE)
+
self.compute_rpcapi.rescue_instance(context, instance=instance,
rescue_password=rescue_password)
@@ -2033,6 +2116,9 @@ class API(base.Base):
vm_state=vm_states.RESCUED,
task_state=task_states.UNRESCUING,
expected_task_state=None)
+
+ self._record_action_start(context, instance, instance_actions.UNRESCUE)
+
self.compute_rpcapi.unrescue_instance(context, instance=instance)
@wrap_check_policy
@@ -2045,6 +2131,9 @@ class API(base.Base):
task_state=task_states.UPDATING_PASSWORD,
expected_task_state=None)
+ self._record_action_start(context, instance,
+ instance_actions.CHANGE_PASSWORD)
+
self.compute_rpcapi.set_admin_password(context,
instance=instance,
new_pass=password)
@@ -2057,11 +2146,9 @@ class API(base.Base):
file_contents=file_contents)
@wrap_check_policy
+ @check_instance_host
def get_vnc_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
- if not instance['host']:
- raise exception.InstanceNotReady(instance_id=instance['uuid'])
-
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
@@ -2071,20 +2158,17 @@ class API(base.Base):
return {'url': connect_info['access_url']}
+ @check_instance_host
def get_vnc_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
- if not instance['host']:
- raise exception.InstanceNotReady(instance_id=instance['uuid'])
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
+ @check_instance_host
def get_spice_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
- if not instance['host']:
- raise exception.InstanceNotReady(instance_id=instance['uuid'])
-
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
@@ -2094,15 +2178,15 @@ class API(base.Base):
return {'url': connect_info['access_url']}
+ @check_instance_host
def get_spice_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
- if not instance['host']:
- raise exception.InstanceNotReady(instance_id=instance['uuid'])
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
+ @check_instance_host
def get_console_output(self, context, instance, tail_length=None):
"""Get console output for an instance."""
return self.compute_rpcapi.get_console_output(context,
@@ -2290,6 +2374,39 @@ class API(base.Base):
self.scheduler_rpcapi.live_migration(context, block_migration,
disk_over_commit, instance, host_name)
+ @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED],
+ task_state=[None])
+ def evacuate(self, context, instance, host, on_shared_storage,
+ admin_password=None):
+ """Running evacuate to target host.
+
+ Checking vm compute host state, if the host not in expected_state,
+ raising an exception.
+ """
+ LOG.debug(_('vm evacuation scheduled'))
+ host = instance['host']
+ service = self.db.service_get_by_compute_host(context, host)
+ if self.servicegroup_api.service_is_up(service):
+ msg = (_('Instance compute service state on %(host)s '
+ 'expected to be down, but it was up.'
+ ) % locals())
+ LOG.error(msg)
+ raise exception.ComputeServiceUnavailable(msg)
+
+ instance = self.update(context, instance, expected_task_state=None,
+ task_state=task_states.REBUILDING)
+ return self.compute_rpcapi.rebuild_instance(context,
+ instance=instance,
+ new_pass=admin_password,
+ injected_files=None,
+ image_ref=None,
+ orig_image_ref=None,
+ orig_sys_metadata=None,
+ bdms=None,
+ recreate=True,
+ on_shared_storage=on_shared_storage,
+ host=host)
+
class HostAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host operations."""
@@ -2306,16 +2423,12 @@ class HostAPI(base.Base):
def set_host_enabled(self, context, host_name, enabled):
"""Sets the specified host's ability to accept new instances."""
- # NOTE(comstud): No instance_uuid argument to this compute manager
- # call
self._assert_host_exists(context, host_name)
return self.rpcapi.set_host_enabled(context, enabled=enabled,
host=host_name)
def get_host_uptime(self, context, host_name):
"""Returns the result of calling "uptime" on the target host."""
- # NOTE(comstud): No instance_uuid argument to this compute manager
- # call
self._assert_host_exists(context, host_name)
return self.rpcapi.get_host_uptime(context, host=host_name)
@@ -2332,7 +2445,7 @@ class HostAPI(base.Base):
return self.rpcapi.host_maintenance_mode(context,
host_param=host_name, mode=mode, host=host_name)
- def service_get_all(self, context, filters=None):
+ def service_get_all(self, context, filters=None, set_zones=False):
"""Returns a list of services, optionally filtering the results.
If specified, 'filters' should be a dictionary containing services
@@ -2341,9 +2454,11 @@ class HostAPI(base.Base):
"""
if filters is None:
filters = {}
- services = self.db.service_get_all(context, False)
- services = availability_zones.set_availability_zones(context,
- services)
+ disabled = filters.pop('disabled', None)
+ services = self.db.service_get_all(context, disabled=disabled)
+ if set_zones or 'availability_zone' in filters:
+ services = availability_zones.set_availability_zones(context,
+ services)
ret_services = []
for service in services:
for key, val in filters.iteritems():
@@ -2362,6 +2477,31 @@ class HostAPI(base.Base):
"""Return all instances on the given host."""
return self.db.instance_get_all_by_host(context, host_name)
+ def task_log_get_all(self, context, task_name, period_beginning,
+ period_ending, host=None, state=None):
+ """Return the task logs within a given range, optionally
+ filtering by host and/or state.
+ """
+ return self.db.task_log_get_all(context, task_name,
+ period_beginning,
+ period_ending,
+ host=host,
+ state=state)
+
+ def compute_node_get(self, context, compute_id):
+ """Return compute node entry for particular integer ID."""
+ return self.db.compute_node_get(context, int(compute_id))
+
+ def compute_node_get_all(self, context):
+ return self.db.compute_node_get_all(context)
+
+ def compute_node_search_by_hypervisor(self, context, hypervisor_match):
+ return self.db.compute_node_search_by_hypervisor(context,
+ hypervisor_match)
+
+ def compute_node_statistics(self, context):
+ return self.db.compute_node_statistics(context)
+
class AggregateAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host aggregates."""
@@ -2819,13 +2959,14 @@ class SecurityGroupAPI(base.Base):
security_groups.add(security_group)
# ..then we find the instances that are members of these groups..
- instances = set()
+ instances = {}
for security_group in security_groups:
for instance in security_group['instances']:
- instances.add(instance)
+ if instance['uuid'] not in instances:
+ instances[instance['uuid']] = instance
# ..then we send a request to refresh the rules for each instance.
- for instance in instances:
+ for instance in instances.values():
if instance['host']:
self.security_group_rpcapi.refresh_instance_security_rules(
context, instance['host'], instance)
diff --git a/nova/compute/cells_api.py b/nova/compute/cells_api.py
index d5427a04b..50449df04 100644
--- a/nova/compute/cells_api.py
+++ b/nova/compute/cells_api.py
@@ -15,10 +15,13 @@
"""Compute API that proxies via Cells Service."""
+from nova import availability_zones
from nova import block_device
from nova.cells import rpcapi as cells_rpcapi
+from nova.cells import utils as cells_utils
from nova.compute import api as compute_api
from nova.compute import instance_types
+from nova.compute import rpcapi as compute_rpcapi
from nova.compute import vm_states
from nova import exception
from nova.openstack.common import excutils
@@ -61,6 +64,27 @@ class SchedulerRPCAPIRedirect(object):
self.cells_rpcapi.schedule_run_instance(context, **kwargs)
+class ComputeRPCProxyAPI(compute_rpcapi.ComputeAPI):
+ """Class used to substitute Compute RPC API that will proxy
+ via the cells manager to a compute manager in a child cell.
+ """
+ def __init__(self, *args, **kwargs):
+ super(ComputeRPCProxyAPI, self).__init__(*args, **kwargs)
+ self.cells_rpcapi = cells_rpcapi.CellsAPI()
+
+ def cast(self, ctxt, msg, topic=None, version=None):
+ self._set_version(msg, version)
+ topic = self._get_topic(topic)
+ self.cells_rpcapi.proxy_rpc_to_manager(ctxt, msg, topic)
+
+ def call(self, ctxt, msg, topic=None, version=None, timeout=None):
+ self._set_version(msg, version)
+ topic = self._get_topic(topic)
+ return self.cells_rpcapi.proxy_rpc_to_manager(ctxt, msg, topic,
+ call=True,
+ timeout=timeout)
+
+
class ComputeCellsAPI(compute_api.API):
def __init__(self, *args, **kwargs):
super(ComputeCellsAPI, self).__init__(*args, **kwargs)
@@ -278,6 +302,13 @@ class ComputeCellsAPI(compute_api.API):
**kwargs)
self._cast_to_cells(context, instance, 'rebuild', *args, **kwargs)
+ @validate_cell
+ def evacuate(self, context, instance, *args, **kwargs):
+ """Evacuate the given instance with the provided attributes."""
+ super(ComputeCellsAPI, self).evacuate(context, instance, *args,
+ **kwargs)
+ self._cast_to_cells(context, instance, 'evacuate', *args, **kwargs)
+
@check_instance_state(vm_state=[vm_states.RESIZED])
@validate_cell
def revert_resize(self, context, instance):
@@ -308,17 +339,15 @@ class ComputeCellsAPI(compute_api.API):
# specified flavor_id is valid and exists. We'll need to load
# it again, but that should be safe.
- old_instance_type_id = instance['instance_type_id']
- old_instance_type = instance_types.get_instance_type(
- old_instance_type_id)
+ old_instance_type = instance_types.extract_instance_type(instance)
flavor_id = kwargs.get('flavor_id')
if not flavor_id:
new_instance_type = old_instance_type
else:
- new_instance_type = instance_types.get_instance_type_by_flavor_id(
- flavor_id)
+ new_instance_type = instance_types.extract_instance_type(instance,
+ 'new_')
# NOTE(johannes): Later, when the resize is confirmed or reverted,
# the superclass implementations of those methods will need access
@@ -545,3 +574,94 @@ class ComputeCellsAPI(compute_api.API):
except exception.InstanceUnknownCell:
pass
return rv
+
+
+class HostAPI(compute_api.HostAPI):
+ """HostAPI() class for cells.
+
+ Implements host management related operations. Works by setting the
+ RPC API used by the base class to proxy via the cells manager to the
+ compute manager in the correct cell. Hosts specified with cells will
+ need to be of the format 'path!to!cell@host'.
+
+ DB methods in the base class are also overridden to proxy via the
+ cells manager.
+ """
+ def __init__(self):
+ super(HostAPI, self).__init__(rpcapi=ComputeRPCProxyAPI())
+ self.cells_rpcapi = cells_rpcapi.CellsAPI()
+
+ def _assert_host_exists(self, context, host_name):
+ """Cannot check this in API cell. This will be checked in the
+ target child cell.
+ """
+ pass
+
+ def service_get_all(self, context, filters=None, set_zones=False):
+ if filters is None:
+ filters = {}
+ if 'availability_zone' in filters:
+ zone_filter = filters.pop('availability_zone')
+ set_zones = True
+ else:
+ zone_filter = None
+ services = self.cells_rpcapi.service_get_all(context,
+ filters=filters)
+ if set_zones:
+ services = availability_zones.set_availability_zones(context,
+ services)
+ if zone_filter is not None:
+ services = [s for s in services
+ if s['availability_zone'] == zone_filter]
+ return services
+
+ def service_get_by_compute_host(self, context, host_name):
+ return self.cells_rpcapi.service_get_by_compute_host(context,
+ host_name)
+
+ def instance_get_all_by_host(self, context, host_name):
+ """Get all instances by host. Host might have a cell prepended
+ to it, so we'll need to strip it out. We don't need to proxy
+ this call to cells, as we have instance information here in
+ the API cell.
+ """
+ try:
+ cell_name, host_name = cells_utils.split_cell_and_item(host_name)
+ except ValueError:
+ cell_name = None
+ instances = super(HostAPI, self).instance_get_all_by_host(context,
+ host_name)
+ if cell_name:
+ instances = [i for i in instances
+ if i['cell_name'] == cell_name]
+ return instances
+
+ def task_log_get_all(self, context, task_name, beginning, ending,
+ host=None, state=None):
+ """Return the task logs within a given range from cells,
+ optionally filtering by the host and/or state. For cells, the
+ host should be a path like 'path!to!cell@host'. If no @host
+ is given, only task logs from a particular cell will be returned.
+ """
+ return self.cells_rpcapi.task_log_get_all(context,
+ task_name,
+ beginning,
+ ending,
+ host=host,
+ state=state)
+
+ def compute_node_get(self, context, compute_id):
+ """Get a compute node from a particular cell by its integer ID.
+ compute_id should be in the format of 'path!to!cell@ID'.
+ """
+ return self.cells_rpcapi.compute_node_get(context, compute_id)
+
+ def compute_node_get_all(self, context):
+ return self.cells_rpcapi.compute_node_get_all(context)
+
+ def compute_node_search_by_hypervisor(self, context, hypervisor_match):
+ return self.cells_rpcapi.compute_node_get_all(context,
+ hypervisor_match=hypervisor_match)
+
+ def compute_node_statistics(self, context):
+ return self.cells_rpcapi.compute_node_stats(context)
diff --git a/nova/compute/instance_actions.py b/nova/compute/instance_actions.py
new file mode 100644
index 000000000..cbb517387
--- /dev/null
+++ b/nova/compute/instance_actions.py
@@ -0,0 +1,44 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Possible actions on an instance.
+
+Actions should probably match a user intention at the API level. Because they
+can be user visible that should help to avoid confusion. For that reason they
+tend to maintain the casing sent to the API.
+
+Maintaining a list of actions here should protect against inconsistencies when
+they are used.
+"""
+
+CREATE = 'create'
+DELETE = 'delete'
+RESTORE = 'restore'
+STOP = 'stop'
+START = 'start'
+REBOOT = 'reboot'
+REBUILD = 'rebuild'
+REVERT_RESIZE = 'revertResize'
+CONFIRM_RESIZE = 'confirmResize'
+RESIZE = 'resize'
+PAUSE = 'pause'
+UNPAUSE = 'unpause'
+SUSPEND = 'suspend'
+RESUME = 'resume'
+RESCUE = 'rescue'
+UNRESCUE = 'unrescue'
+CHANGE_PASSWORD = 'changePassword'
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py
index 78129ee6b..045a24d4d 100644
--- a/nova/compute/instance_types.py
+++ b/nova/compute/instance_types.py
@@ -27,6 +27,7 @@ from nova import context
from nova import db
from nova import exception
from nova.openstack.common import cfg
+from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.openstack.common import log as logging
from nova import utils
@@ -44,6 +45,25 @@ LOG = logging.getLogger(__name__)
INVALID_NAME_REGEX = re.compile("[^\w\.\- ]")
+def _int_or_none(val):
+ if val is not None:
+ return int(val)
+
+
+system_metadata_instance_type_props = {
+ 'id': int,
+ 'name': str,
+ 'memory_mb': int,
+ 'vcpus': int,
+ 'root_gb': int,
+ 'ephemeral_gb': int,
+ 'flavorid': str,
+ 'swap': int,
+ 'rxtx_factor': float,
+ 'vcpu_weight': _int_or_none,
+ }
+
+
def create(name, memory, vcpus, root_gb, ephemeral_gb=None, flavorid=None,
swap=None, rxtx_factor=None, is_public=True):
"""Creates instance types."""
@@ -110,7 +130,7 @@ def create(name, memory, vcpus, root_gb, ephemeral_gb=None, flavorid=None,
try:
return db.instance_type_create(context.get_admin_context(), kwargs)
- except exception.DBError, e:
+ except db_session.DBError, e:
LOG.exception(_('DB error: %s') % e)
raise exception.InstanceTypeCreateFailed()
@@ -210,3 +230,42 @@ def remove_instance_type_access(flavorid, projectid, ctxt=None):
ctxt = context.get_admin_context()
return db.instance_type_access_remove(ctxt, flavorid, projectid)
+
+
+def extract_instance_type(instance, prefix=''):
+ """Create an InstanceType-like object from instance's system_metadata
+ information."""
+
+ instance_type = {}
+ sys_meta = utils.metadata_to_dict(instance['system_metadata'])
+ for key, type_fn in system_metadata_instance_type_props.items():
+ type_key = '%sinstance_type_%s' % (prefix, key)
+ instance_type[key] = type_fn(sys_meta[type_key])
+ return instance_type
+
+
+def save_instance_type_info(metadata, instance_type, prefix=''):
+ """Save properties from instance_type into instance's system_metadata,
+ in the format of:
+
+ [prefix]instance_type_[key]
+
+ This can be used to update system_metadata in place from a type, as well
+ as stash information about another instance_type for later use (such as
+ during resize)."""
+
+ for key in system_metadata_instance_type_props.keys():
+ to_key = '%sinstance_type_%s' % (prefix, key)
+ metadata[to_key] = instance_type[key]
+ return metadata
+
+
+def delete_instance_type_info(metadata, *prefixes):
+ """Delete instance_type information from instance's system_metadata
+ by prefix."""
+
+ for key in system_metadata_instance_type_props.keys():
+ for prefix in prefixes:
+ to_key = '%sinstance_type_%s' % (prefix, key)
+ del metadata[to_key]
+ return metadata
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index dbfee2469..4bb08aa16 100644..100755
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -27,6 +27,7 @@ terminating it.
"""
+import base64
import contextlib
import functools
import socket
@@ -57,13 +58,11 @@ from nova import network
from nova.network import model as network_model
from nova.openstack.common import cfg
from nova.openstack.common import excutils
-from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier
from nova.openstack.common import rpc
-from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
from nova import paths
from nova import quota
@@ -229,7 +228,31 @@ def wrap_instance_fault(function):
with excutils.save_and_reraise_exception():
compute_utils.add_instance_fault_from_exc(context,
- kwargs['instance'], e, sys.exc_info())
+ self.conductor_api, kwargs['instance'],
+ e, sys.exc_info())
+
+ return decorated_function
+
+
+def wrap_instance_event(function):
+ """Wraps a method to log the event taken on the instance, and result.
+
+ This decorator wraps a method to log the start and result of an event, as
+ part of an action taken on an instance.
+ """
+
+ @functools.wraps(function)
+ def decorated_function(self, context, *args, **kwargs):
+ wrapped_func = utils.get_wrapped_function(function)
+ keyed_args = utils.getcallargs(wrapped_func, context, *args,
+ **kwargs)
+ instance_uuid = keyed_args['instance']['uuid']
+
+ event_name = 'compute_{0}'.format(function.func_name)
+ with compute_utils.EventReporter(context, self.conductor_api,
+ event_name, instance_uuid):
+
+ function(self, context, *args, **kwargs)
return decorated_function
@@ -566,15 +589,8 @@ class ComputeManager(manager.SchedulerDependentManager):
def _get_instance_nw_info(self, context, instance):
"""Get a list of dictionaries of network data of an instance."""
- # Get the network info from network API, but don't let it
- # update the cache, as that will hit the DB. We'll update
- # the cache ourselves via the conductor.
network_info = self.network_api.get_instance_nw_info(context,
- instance, update_cache=False)
- cache = {'network_info': network_info.json()}
- self.conductor_api.instance_info_cache_update(context,
- instance,
- cache)
+ instance, conductor_api=self.conductor_api)
return network_info
def _legacy_nw_info(self, network_info):
@@ -642,13 +658,15 @@ class ComputeManager(manager.SchedulerDependentManager):
'delete_on_termination': bdm['delete_on_termination']}
block_device_mapping.append(bdmap)
- return {
+ block_device_info = {
'root_device_name': instance['root_device_name'],
'swap': swap,
'ephemerals': ephemerals,
'block_device_mapping': block_device_mapping
}
+ return block_device_info
+
def _run_instance(self, context, request_spec,
filter_properties, requested_networks, injected_files,
admin_password, is_first_time, node, instance):
@@ -656,7 +674,7 @@ class ComputeManager(manager.SchedulerDependentManager):
context = context.elevated()
try:
- self._check_instance_not_already_created(context, instance)
+ self._check_instance_exists(context, instance)
image_meta = self._check_image_size(context, instance)
if node is None:
@@ -670,25 +688,35 @@ class ComputeManager(manager.SchedulerDependentManager):
extra_usage_info = {}
self._start_building(context, instance)
+
self._notify_about_instance_usage(
context, instance, "create.start",
extra_usage_info=extra_usage_info)
+
network_info = None
bdms = self.conductor_api.block_device_mapping_get_all_by_instance(
context, instance)
+
rt = self._get_resource_tracker(node)
try:
limits = filter_properties.get('limits', {})
with rt.instance_claim(context, instance, limits):
macs = self.driver.macs_for_instance(instance)
+
network_info = self._allocate_network(context, instance,
requested_networks, macs)
- block_device_info = self._prep_block_device(context,
- instance, bdms)
+
+ self._instance_update(
+ context, instance['uuid'],
+ vm_state=vm_states.BUILDING,
+ task_state=task_states.BLOCK_DEVICE_MAPPING)
+
+ block_device_info = self._prep_block_device(
+ context, instance, bdms)
+
instance = self._spawn(context, instance, image_meta,
network_info, block_device_info,
injected_files, admin_password)
-
except exception.InstanceNotFound:
# the instance got deleted during the spawn
try:
@@ -732,8 +760,8 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_uuid = instance['uuid']
rescheduled = False
- compute_utils.add_instance_fault_from_exc(context, instance,
- exc_info[1], exc_info=exc_info)
+ compute_utils.add_instance_fault_from_exc(context, self.conductor_api,
+ instance, exc_info[1], exc_info=exc_info)
try:
self._deallocate_network(context, instance)
@@ -840,11 +868,10 @@ class ComputeManager(manager.SchedulerDependentManager):
**update_info)
return instance
- def _check_instance_not_already_created(self, context, instance):
+ def _check_instance_exists(self, context, instance):
"""Ensure an instance with the same name is not already present."""
if self.driver.instance_exists(instance['name']):
- _msg = _("Instance has already been created")
- raise exception.Invalid(_msg)
+ raise exception.InstanceExists(name=instance['name'])
def _check_image_size(self, context, instance):
"""Ensure image is smaller than the maximum size allowed by the
@@ -876,8 +903,7 @@ class ComputeManager(manager.SchedulerDependentManager):
# TODO(jk0): Should size be required in the image service?
return image_meta
- instance_type_id = instance['instance_type_id']
- instance_type = instance_types.get_instance_type(instance_type_id)
+ instance_type = instance_types.extract_instance_type(instance)
allowed_size_gb = instance_type['root_gb']
# NOTE(johannes): root_gb is allowed to be 0 for legacy reasons
@@ -925,7 +951,7 @@ class ComputeManager(manager.SchedulerDependentManager):
network_info = self.network_api.allocate_for_instance(
context, instance, vpn=is_vpn,
requested_networks=requested_networks,
- macs=macs)
+ macs=macs, conductor_api=self.conductor_api)
except Exception:
LOG.exception(_('Instance failed network setup'),
instance=instance)
@@ -938,9 +964,6 @@ class ComputeManager(manager.SchedulerDependentManager):
def _prep_block_device(self, context, instance, bdms):
"""Set up the block device for an instance with error logging."""
- instance = self._instance_update(context, instance['uuid'],
- vm_state=vm_states.BUILDING,
- task_state=task_states.BLOCK_DEVICE_MAPPING)
try:
return self._setup_block_device_mapping(context, instance, bdms)
except Exception:
@@ -1036,6 +1059,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
+ @wrap_instance_event
@wrap_instance_fault
def run_instance(self, context, instance, request_spec=None,
filter_properties=None, requested_networks=None,
@@ -1046,6 +1070,9 @@ class ComputeManager(manager.SchedulerDependentManager):
filter_properties = {}
if injected_files is None:
injected_files = []
+ else:
+ injected_files = [(path, base64.b64decode(contents))
+ for path, contents in injected_files]
@lockutils.synchronized(instance['uuid'], 'nova-')
def do_run_instance():
@@ -1142,6 +1169,7 @@ class ComputeManager(manager.SchedulerDependentManager):
system_metadata=system_meta)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
+ @wrap_instance_event
@wrap_instance_fault
def terminate_instance(self, context, instance, bdms=None):
"""Terminate an instance on this host."""
@@ -1174,6 +1202,7 @@ class ComputeManager(manager.SchedulerDependentManager):
# can't use that name in grizzly.
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
+ @wrap_instance_event
@wrap_instance_fault
def stop_instance(self, context, instance):
"""Stopping an instance on this host."""
@@ -1193,6 +1222,7 @@ class ComputeManager(manager.SchedulerDependentManager):
# can't use that name in grizzly.
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
+ @wrap_instance_event
@wrap_instance_fault
def start_instance(self, context, instance):
"""Starting an instance on this host."""
@@ -1209,6 +1239,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
+ @wrap_instance_event
@wrap_instance_fault
def soft_delete_instance(self, context, instance):
"""Soft delete an instance on this host."""
@@ -1230,6 +1261,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
+ @wrap_instance_event
@wrap_instance_fault
def restore_instance(self, context, instance):
"""Restore a soft-deleted instance on this host."""
@@ -1272,6 +1304,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
+ @wrap_instance_event
@wrap_instance_fault
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata=None,
@@ -1288,6 +1321,7 @@ class ComputeManager(manager.SchedulerDependentManager):
:param injected_files: Files to inject
:param new_pass: password to set on rebuilt instance
:param orig_sys_metadata: instance system metadata from pre-rebuild
+ :param bdms: block-device-mappings to use for rebuild
:param recreate: True if instance should be recreated with same disk
:param on_shared_storage: True if instance files on shared storage
"""
@@ -1299,39 +1333,28 @@ class ComputeManager(manager.SchedulerDependentManager):
instance=instance)
if recreate:
-
if not self.driver.capabilities["supports_recreate"]:
- # if driver doesn't support recreate return with failure
- _msg = _('instance recreate is not implemented '
- 'by this driver.')
-
- LOG.warn(_msg, instance=instance)
- self._instance_update(context,
- instance['uuid'],
- task_state=None,
- expected_task_state=task_states.
- REBUILDING)
- raise exception.Invalid(_msg)
+ raise exception.InstanceRecreateNotSupported
- self._check_instance_not_already_created(context, instance)
+ self._check_instance_exists(context, instance)
- # to cover case when admin expects that instance files are on
+ # To cover case when admin expects that instance files are on
# shared storage, but not accessible and vice versa
if on_shared_storage != self.driver.instance_on_disk(instance):
- _msg = _("Invalid state of instance files on "
- "shared storage")
- raise exception.Invalid(_msg)
+ raise exception.InvalidSharedStorage(
+ _("Invalid state of instance files on shared"
+ " storage"))
if on_shared_storage:
- LOG.info(_('disk on shared storage,'
- 'recreating using existing disk'))
+ LOG.info(_('disk on shared storage, recreating using'
+ ' existing disk'))
else:
image_ref = orig_image_ref = instance['image_ref']
- LOG.info(_("disk not on shared storage"
- "rebuilding from: '%s'") % str(image_ref))
+ LOG.info(_("disk not on shared storagerebuilding from:"
+ " '%s'") % str(image_ref))
- instance = self._instance_update(context, instance['uuid'],
- host=self.host)
+ instance = self._instance_update(
+ context, instance['uuid'], host=self.host)
if image_ref:
image_meta = _get_image_meta(context, image_ref)
@@ -1343,7 +1366,7 @@ class ComputeManager(manager.SchedulerDependentManager):
# to point to the new one... we have to override it.
orig_image_ref_url = glance.generate_image_url(orig_image_ref)
extra_usage_info = {'image_ref_url': orig_image_ref_url}
- compute_utils.notify_usage_exists(context, instance,
+ self.conductor_api.notify_usage_exists(context, instance,
current_period=True, system_metadata=orig_sys_metadata,
extra_usage_info=extra_usage_info)
@@ -1352,64 +1375,64 @@ class ComputeManager(manager.SchedulerDependentManager):
self._notify_about_instance_usage(context, instance,
"rebuild.start", extra_usage_info=extra_usage_info)
- current_power_state = self._get_power_state(context, instance)
- instance = self._instance_update(context, instance['uuid'],
- power_state=current_power_state,
+ instance = self._instance_update(
+ context, instance['uuid'],
+ power_state=self._get_power_state(context, instance),
task_state=task_states.REBUILDING,
expected_task_state=task_states.REBUILDING)
if recreate:
- # Detaching volumes.
- for bdm in self._get_instance_volume_bdms(context, instance):
- volume = self.volume_api.get(context, bdm['volume_id'])
+ self.network_api.setup_networks_on_host(
+ context, instance, self.host)
- # We can't run volume disconnect on source because
- # the host is down. Just marking volume as detached
- # in db, anyway the zombie instance going to be deleted
- # from source during init_host when host comes back
- self.volume_api.detach(context.elevated(), volume)
+ network_info = self._get_instance_nw_info(context, instance)
- self.network_api.setup_networks_on_host(context,
- instance, self.host)
- else:
- network_info = self._get_instance_nw_info(context, instance)
+ if bdms is None:
+ bdms = self.conductor_api.\
+ block_device_mapping_get_all_by_instance(
+ context, instance)
+
+ # NOTE(sirp): this detach is necessary b/c we will reattach the
+ # volumes in _prep_block_devices below.
+ for bdm in self._get_volume_bdms(bdms):
+ volume = self.volume_api.get(context, bdm['volume_id'])
+ self.volume_api.detach(context, volume)
+
+ if not recreate:
+ block_device_info = self._get_volume_block_device_info(
+ self._get_volume_bdms(bdms))
self.driver.destroy(instance,
- self._legacy_nw_info(network_info))
+ self._legacy_nw_info(network_info),
+ block_device_info=block_device_info)
- instance = self._instance_update(context, instance['uuid'],
+ instance = self._instance_update(
+ context, instance['uuid'],
task_state=task_states.REBUILD_BLOCK_DEVICE_MAPPING,
expected_task_state=task_states.REBUILDING)
+ block_device_info = self._prep_block_device(
+ context, instance, bdms)
+
instance['injected_files'] = injected_files
- network_info = self._get_instance_nw_info(context, instance)
- if bdms is None:
- capi = self.conductor_api
- bdms = capi.block_device_mapping_get_all_by_instance(
- context, instance)
- device_info = self._setup_block_device_mapping(context, instance,
- bdms)
- expected_task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING
- instance = self._instance_update(context, instance['uuid'],
+ instance = self._instance_update(
+ context, instance['uuid'],
task_state=task_states.REBUILD_SPAWNING,
- expected_task_state=expected_task_state)
-
- admin_password = new_pass
+ expected_task_state=
+ task_states.REBUILD_BLOCK_DEVICE_MAPPING)
self.driver.spawn(context, instance, image_meta,
- [], admin_password,
- self._legacy_nw_info(network_info),
- device_info)
+ [], new_pass,
+ network_info=self._legacy_nw_info(network_info),
+ block_device_info=block_device_info)
- current_power_state = self._get_power_state(context, instance)
- instance = self._instance_update(context,
- instance['uuid'],
- power_state=current_power_state,
- vm_state=vm_states.ACTIVE,
- task_state=None,
- expected_task_state=task_states.
- REBUILD_SPAWNING,
- launched_at=timeutils.utcnow())
+ instance = self._instance_update(
+ context, instance['uuid'],
+ power_state=self._get_power_state(context, instance),
+ vm_state=vm_states.ACTIVE,
+ task_state=None,
+ expected_task_state=task_states.REBUILD_SPAWNING,
+ launched_at=timeutils.utcnow())
LOG.info(_("bringing vm to original state: '%s'") % orig_vm_state)
if orig_vm_state == vm_states.STOPPED:
@@ -1427,6 +1450,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
+ @wrap_instance_event
@wrap_instance_fault
def reboot_instance(self, context, instance,
block_device_info=None,
@@ -1459,13 +1483,14 @@ class ComputeManager(manager.SchedulerDependentManager):
context=context, instance=instance)
try:
- self.driver.reboot(instance, self._legacy_nw_info(network_info),
+ self.driver.reboot(context, instance,
+ self._legacy_nw_info(network_info),
reboot_type, block_device_info)
except Exception, exc:
LOG.error(_('Cannot reboot instance: %(exc)s'), locals(),
context=context, instance=instance)
compute_utils.add_instance_fault_from_exc(context,
- instance, exc, sys.exc_info())
+ self.conductor_api, instance, exc, sys.exc_info())
# Fall through and reset task_state to None
current_power_state = self._get_power_state(context, instance)
@@ -1583,6 +1608,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
+ @wrap_instance_event
@wrap_instance_fault
def set_admin_password(self, context, instance, new_pass=None):
"""Set the root/admin password for an instance on this host.
@@ -1592,68 +1618,57 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
context = context.elevated()
-
if new_pass is None:
# Generate a random password
new_pass = utils.generate_password()
- max_tries = 10
-
- for i in xrange(max_tries):
- current_power_state = self._get_power_state(context, instance)
- expected_state = power_state.RUNNING
+ current_power_state = self._get_power_state(context, instance)
+ expected_state = power_state.RUNNING
- if current_power_state != expected_state:
- self._instance_update(context, instance['uuid'],
+ if current_power_state != expected_state:
+ self._instance_update(context, instance['uuid'],
+ task_state=None,
+ expected_task_state=task_states.
+ UPDATING_PASSWORD)
+ _msg = _('Failed to set admin password. Instance %s is not'
+ ' running') % instance["uuid"]
+ raise exception.InstancePasswordSetFailed(
+ instance=instance['uuid'], reason=_msg)
+ else:
+ try:
+ self.driver.set_admin_password(instance, new_pass)
+ LOG.audit(_("Root password set"), instance=instance)
+ self._instance_update(context,
+ instance['uuid'],
+ task_state=None,
+ expected_task_state=task_states.
+ UPDATING_PASSWORD)
+ except NotImplementedError:
+ _msg = _('set_admin_password is not implemented '
+ 'by this driver or guest instance.')
+ LOG.warn(_msg, instance=instance)
+ self._instance_update(context,
+ instance['uuid'],
task_state=None,
expected_task_state=task_states.
- UPDATING_PASSWORD)
- _msg = _('Failed to set admin password. Instance %s is not'
- ' running') % instance["uuid"]
+ UPDATING_PASSWORD)
+ raise NotImplementedError(_msg)
+ except exception.UnexpectedTaskStateError:
+ # interrupted by another (most likely delete) task
+ # do not retry
+ raise
+ except Exception, e:
+ # Catch all here because this could be anything.
+ LOG.exception(_('set_admin_password failed: %s') % e,
+ instance=instance)
+ self._set_instance_error_state(context,
+ instance['uuid'])
+ # We create a new exception here so that we won't
+ # potentially reveal password information to the
+ # API caller. The real exception is logged above
+ _msg = _('error setting admin password')
raise exception.InstancePasswordSetFailed(
- instance=instance['uuid'], reason=_msg)
- else:
- try:
- self.driver.set_admin_password(instance, new_pass)
- LOG.audit(_("Root password set"), instance=instance)
- self._instance_update(context,
- instance['uuid'],
- task_state=None,
- expected_task_state=task_states.
- UPDATING_PASSWORD)
- break
- except NotImplementedError:
- # NOTE(dprince): if the driver doesn't implement
- # set_admin_password we break to avoid a loop
- _msg = _('set_admin_password is not implemented '
- 'by this driver.')
- LOG.warn(_msg, instance=instance)
- self._instance_update(context,
- instance['uuid'],
- task_state=None,
- expected_task_state=task_states.
- UPDATING_PASSWORD)
- raise exception.InstancePasswordSetFailed(
- instance=instance['uuid'], reason=_msg)
- except exception.UnexpectedTaskStateError:
- # interrupted by another (most likely delete) task
- # do not retry
- raise
- except Exception, e:
- # Catch all here because this could be anything.
- LOG.exception(_('set_admin_password failed: %s') % e,
- instance=instance)
- if i == max_tries - 1:
- self._set_instance_error_state(context,
- instance['uuid'])
- # We create a new exception here so that we won't
- # potentially reveal password information to the
- # API caller. The real exception is logged above
- _msg = _('error setting admin password')
- raise exception.InstancePasswordSetFailed(
- instance=instance['uuid'], reason=_msg)
- time.sleep(1)
- continue
+ instance=instance['uuid'], reason=_msg)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@@ -1694,6 +1709,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
+ @wrap_instance_event
@wrap_instance_fault
def rescue_instance(self, context, instance, rescue_password=None):
"""
@@ -1731,6 +1747,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
+ @wrap_instance_event
@wrap_instance_fault
def unrescue_instance(self, context, instance):
"""Rescue an instance on this host."""
@@ -1761,6 +1778,7 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.change_instance_metadata(context, instance, diff)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
+ @wrap_instance_event
@wrap_instance_fault
def confirm_resize(self, context, instance, reservations=None,
migration=None, migration_id=None):
@@ -1773,6 +1791,12 @@ class ComputeManager(manager.SchedulerDependentManager):
with self._error_out_instance_on_exception(context, instance['uuid'],
reservations):
+ # NOTE(danms): delete stashed old/new instance_type information
+ sys_meta = utils.metadata_to_dict(instance['system_metadata'])
+ instance_types.delete_instance_type_info(sys_meta, 'old_', 'new_')
+ self._instance_update(context, instance['uuid'],
+ system_metadata=sys_meta)
+
# NOTE(tr3buchet): tear down networks on source host
self.network_api.setup_networks_on_host(context, instance,
migration['source_compute'], teardown=True)
@@ -1792,6 +1816,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
+ @wrap_instance_event
@wrap_instance_fault
def revert_resize(self, context, instance, migration=None,
migration_id=None, reservations=None):
@@ -1806,7 +1831,7 @@ class ComputeManager(manager.SchedulerDependentManager):
# NOTE(comstud): A revert_resize is essentially a resize back to
# the old size, so we need to send a usage event here.
- compute_utils.notify_usage_exists(
+ self.conductor_api.notify_usage_exists(
context, instance, current_period=True)
with self._error_out_instance_on_exception(context, instance['uuid'],
@@ -1815,8 +1840,9 @@ class ComputeManager(manager.SchedulerDependentManager):
self.network_api.setup_networks_on_host(context, instance,
teardown=True)
- self.network_api.migrate_instance_start(context, instance,
- migration)
+ self.conductor_api.network_migrate_instance_start(context,
+ instance,
+ migration)
network_info = self._get_instance_nw_info(context, instance)
block_device_info = self._get_instance_volume_block_device_info(
@@ -1836,6 +1862,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
+ @wrap_instance_event
@wrap_instance_fault
def finish_revert_resize(self, context, instance, reservations=None,
migration=None, migration_id=None):
@@ -1855,8 +1882,11 @@ class ComputeManager(manager.SchedulerDependentManager):
self._notify_about_instance_usage(
context, instance, "resize.revert.start")
- old_instance_type = migration['old_instance_type_id']
- instance_type = instance_types.get_instance_type(old_instance_type)
+ instance_type = instance_types.extract_instance_type(instance,
+ prefix='old_')
+ sys_meta = utils.metadata_to_dict(instance['system_metadata'])
+ instance_types.save_instance_type_info(sys_meta, instance_type)
+ instance_types.delete_instance_type_info(sys_meta, 'new_', 'old_')
instance = self._instance_update(context,
instance['uuid'],
@@ -1866,7 +1896,8 @@ class ComputeManager(manager.SchedulerDependentManager):
ephemeral_gb=instance_type['ephemeral_gb'],
instance_type_id=instance_type['id'],
host=migration['source_compute'],
- node=migration['source_node'])
+ node=migration['source_node'],
+ system_metadata=sys_meta)
self.network_api.setup_networks_on_host(context, instance,
migration['source_compute'])
@@ -1890,8 +1921,9 @@ class ComputeManager(manager.SchedulerDependentManager):
instance['uuid'], launched_at=timeutils.utcnow(),
expected_task_state=task_states.RESIZE_REVERTING)
- self.network_api.migrate_instance_finish(context, instance,
- migration)
+ self.conductor_api.network_migrate_instance_finish(context,
+ instance,
+ migration)
instance = self._instance_update(context, instance['uuid'],
vm_state=vm_states.ACTIVE, task_state=None)
@@ -1904,15 +1936,13 @@ class ComputeManager(manager.SchedulerDependentManager):
self._quota_commit(context, reservations)
- @staticmethod
- def _quota_commit(context, reservations):
+ def _quota_commit(self, context, reservations):
if reservations:
- QUOTAS.commit(context, reservations)
+ self.conductor_api.quota_commit(context, reservations)
- @staticmethod
- def _quota_rollback(context, reservations):
+ def _quota_rollback(self, context, reservations):
if reservations:
- QUOTAS.rollback(context, reservations)
+ self.conductor_api.quota_rollback(context, reservations)
def _prep_resize(self, context, image, instance, instance_type,
reservations, request_spec, filter_properties, node):
@@ -1931,6 +1961,14 @@ class ComputeManager(manager.SchedulerDependentManager):
msg = _('destination same as source!')
raise exception.MigrationError(msg)
+ # NOTE(danms): Stash the new instance_type to avoid having to
+ # look it up in the database later
+ sys_meta = utils.metadata_to_dict(instance['system_metadata'])
+ instance_types.save_instance_type_info(sys_meta, instance_type,
+ prefix='new_')
+ instance = self._instance_update(context, instance['uuid'],
+ system_metadata=sys_meta)
+
limits = filter_properties.get('limits', {})
rt = self._get_resource_tracker(node)
with rt.resize_claim(context, instance, instance_type, limits=limits) \
@@ -1944,6 +1982,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
+ @wrap_instance_event
@wrap_instance_fault
def prep_resize(self, context, image, instance, instance_type,
reservations=None, request_spec=None,
@@ -1960,7 +1999,7 @@ class ComputeManager(manager.SchedulerDependentManager):
with self._error_out_instance_on_exception(context, instance['uuid'],
reservations):
- compute_utils.notify_usage_exists(
+ self.conductor_api.notify_usage_exists(
context, instance, current_period=True)
self._notify_about_instance_usage(
context, instance, "resize.prep.start")
@@ -1995,8 +2034,8 @@ class ComputeManager(manager.SchedulerDependentManager):
rescheduled = False
instance_uuid = instance['uuid']
- compute_utils.add_instance_fault_from_exc(context, instance,
- exc_info[0], exc_info=exc_info)
+ compute_utils.add_instance_fault_from_exc(context, self.conductor_api,
+ instance, exc_info[0], exc_info=exc_info)
try:
scheduler_method = self.scheduler_rpcapi.prep_resize
@@ -2021,6 +2060,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
+ @wrap_instance_event
@wrap_instance_fault
def resize_instance(self, context, instance, image,
reservations=None, migration=None, migration_id=None,
@@ -2056,8 +2096,9 @@ class ComputeManager(manager.SchedulerDependentManager):
self._terminate_volume_connections(context, instance)
- self.network_api.migrate_instance_start(context, instance,
- migration)
+ self.conductor_api.network_migrate_instance_start(context,
+ instance,
+ migration)
migration = self.conductor_api.migration_update(context,
migration, 'post-migrating')
@@ -2091,8 +2132,15 @@ class ComputeManager(manager.SchedulerDependentManager):
old_instance_type_id = migration['old_instance_type_id']
new_instance_type_id = migration['new_instance_type_id']
if old_instance_type_id != new_instance_type_id:
- instance_type = instance_types.get_instance_type(
- new_instance_type_id)
+ instance_type = instance_types.extract_instance_type(instance,
+ prefix='new_')
+ old_instance_type = instance_types.extract_instance_type(instance)
+ sys_meta = utils.metadata_to_dict(instance['system_metadata'])
+ instance_types.save_instance_type_info(sys_meta,
+ old_instance_type,
+ prefix='old_')
+ instance_types.save_instance_type_info(sys_meta, instance_type)
+
instance = self._instance_update(
context,
instance['uuid'],
@@ -2100,15 +2148,18 @@ class ComputeManager(manager.SchedulerDependentManager):
memory_mb=instance_type['memory_mb'],
vcpus=instance_type['vcpus'],
root_gb=instance_type['root_gb'],
- ephemeral_gb=instance_type['ephemeral_gb'])
+ ephemeral_gb=instance_type['ephemeral_gb'],
+ system_metadata=sys_meta)
+
resize_instance = True
# NOTE(tr3buchet): setup networks on destination host
self.network_api.setup_networks_on_host(context, instance,
migration['dest_compute'])
- self.network_api.migrate_instance_finish(context, instance,
- migration)
+ self.conductor_api.network_migrate_instance_finish(context,
+ instance,
+ migration)
network_info = self._get_instance_nw_info(context, instance)
@@ -2154,6 +2205,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
+ @wrap_instance_event
@wrap_instance_fault
def finish_resize(self, context, disk_info, image, instance,
reservations=None, migration=None, migration_id=None):
@@ -2192,9 +2244,8 @@ class ComputeManager(manager.SchedulerDependentManager):
self._notify_about_instance_usage(
context, instance, "create_ip.start")
- self.network_api.add_fixed_ip_to_instance(context,
- instance,
- network_id)
+ self.network_api.add_fixed_ip_to_instance(context, instance,
+ network_id, conductor_api=self.conductor_api)
network_info = self._inject_network_info(context, instance=instance)
self.reset_network(context, instance)
@@ -2213,9 +2264,8 @@ class ComputeManager(manager.SchedulerDependentManager):
self._notify_about_instance_usage(
context, instance, "delete_ip.start")
- self.network_api.remove_fixed_ip_from_instance(context,
- instance,
- address)
+ self.network_api.remove_fixed_ip_from_instance(context, instance,
+ address, conductor_api=self.conductor_api)
network_info = self._inject_network_info(context,
instance=instance)
@@ -2226,6 +2276,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
+ @wrap_instance_event
@wrap_instance_fault
def pause_instance(self, context, instance):
"""Pause an instance on this host."""
@@ -2243,6 +2294,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
+ @wrap_instance_event
@wrap_instance_fault
def unpause_instance(self, context, instance):
"""Unpause a paused instance on this host."""
@@ -2291,6 +2343,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
+ @wrap_instance_event
@wrap_instance_fault
def suspend_instance(self, context, instance):
"""Suspend the given instance."""
@@ -2310,6 +2363,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
+ @wrap_instance_event
@wrap_instance_fault
def resume_instance(self, context, instance):
"""Resume the given suspended instance."""
@@ -2461,16 +2515,19 @@ class ComputeManager(manager.SchedulerDependentManager):
def do_reserve():
bdms = self.conductor_api.block_device_mapping_get_all_by_instance(
context, instance)
- result = compute_utils.get_device_name_for_instance(context,
- instance,
- bdms,
- device)
+
+ device_name = compute_utils.get_device_name_for_instance(
+ context, instance, bdms, device)
+
# NOTE(vish): create bdm here to avoid race condition
values = {'instance_uuid': instance['uuid'],
'volume_id': volume_id or 'reserved',
- 'device_name': result}
+ 'device_name': device_name}
+
self.conductor_api.block_device_mapping_create(context, values)
- return result
+
+ return device_name
+
return do_reserve()
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@@ -2547,15 +2604,15 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.audit(_('Detach volume %(volume_id)s from mountpoint %(mp)s'),
locals(), context=context, instance=instance)
- if not self.driver.instance_exists(instance['name']):
- LOG.warn(_('Detaching volume from unknown instance'),
- context=context, instance=instance)
connection_info = jsonutils.loads(bdm['connection_info'])
# NOTE(vish): We currently don't use the serial when disconnecting,
# but added for completeness in case we ever do.
if connection_info and 'serial' not in connection_info:
connection_info['serial'] = volume_id
try:
+ if not self.driver.instance_exists(instance['name']):
+ LOG.warn(_('Detaching volume from unknown instance'),
+ context=context, instance=instance)
self.driver.detach_volume(connection_info,
instance,
mp)
@@ -2809,7 +2866,9 @@ class ComputeManager(manager.SchedulerDependentManager):
migration = {'source_compute': self.host,
'dest_compute': dest, }
- self.network_api.migrate_instance_start(ctxt, instance_ref, migration)
+ self.conductor_api.network_migrate_instance_start(ctxt,
+ instance_ref,
+ migration)
# Define domain at destination host, without doing it,
# pause/suspend/terminate do not work.
@@ -2864,7 +2923,9 @@ class ComputeManager(manager.SchedulerDependentManager):
self.host)
migration = {'source_compute': instance['host'],
'dest_compute': self.host, }
- self.network_api.migrate_instance_finish(context, instance, migration)
+ self.conductor_api.network_migrate_instance_finish(context,
+ instance,
+ migration)
network_info = self._get_instance_nw_info(context, instance)
block_device_info = self._get_instance_volume_block_device_info(
@@ -3084,7 +3145,9 @@ class ComputeManager(manager.SchedulerDependentManager):
@manager.periodic_task
def _instance_usage_audit(self, context):
if CONF.instance_usage_audit:
- if not compute_utils.has_audit_been_run(context, self.host):
+ if not compute_utils.has_audit_been_run(context,
+ self.conductor_api,
+ self.host):
begin, end = utils.last_completed_audit_period()
capi = self.conductor_api
instances = capi.instance_get_active_by_window_joined(
@@ -3101,11 +3164,12 @@ class ComputeManager(manager.SchedulerDependentManager):
number_instances=num_instances))
start_time = time.time()
compute_utils.start_instance_usage_audit(context,
+ self.conductor_api,
begin, end,
self.host, num_instances)
for instance in instances:
try:
- compute_utils.notify_usage_exists(
+ self.conductor_api.notify_usage_exists(
context, instance,
ignore_missing_network_data=False)
successes += 1
@@ -3116,6 +3180,7 @@ class ComputeManager(manager.SchedulerDependentManager):
instance=instance)
errors += 1
compute_utils.finish_instance_usage_audit(context,
+ self.conductor_api,
begin, end,
self.host, errors,
"Instance usage audit ran "
@@ -3312,7 +3377,7 @@ class ComputeManager(manager.SchedulerDependentManager):
vm_instance = self.driver.get_info(db_instance)
vm_power_state = vm_instance['state']
except exception.InstanceNotFound:
- vm_power_state = power_state.SHUTDOWN
+ vm_power_state = power_state.NOSTATE
# Note(maoy): the above get_info call might take a long time,
# for example, because of a broken libvirt driver.
# We re-query the DB to get the latest instance info to minimize
@@ -3399,6 +3464,12 @@ class ComputeManager(manager.SchedulerDependentManager):
# instrumentation is done. See bug 1097806 for details.
LOG.warn(_("Instance is paused unexpectedly. Ignore."),
instance=db_instance)
+ elif vm_power_state == power_state.NOSTATE:
+ # Occasionally, depending on the status of the hypervisor,
+ # which could be restarting for example, an instance may
+ # not be found. Therefore just log the condidtion.
+ LOG.warn(_("Instance is unexpectedly not found. Ignore."),
+ instance=db_instance)
elif vm_state == vm_states.STOPPED:
if vm_power_state not in (power_state.NOSTATE,
power_state.SHUTDOWN,
diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py
index be0360185..825422e86 100644
--- a/nova/compute/resource_tracker.py
+++ b/nova/compute/resource_tracker.py
@@ -144,7 +144,8 @@ class ResourceTracker(object):
# Mark the resources in-use for the resize landing on this
# compute host:
- self._update_usage_from_migration(self.compute_node, migration_ref)
+ self._update_usage_from_migration(instance_ref, self.compute_node,
+ migration_ref)
elevated = context.elevated()
self._update(elevated, self.compute_node)
@@ -158,12 +159,7 @@ class ResourceTracker(object):
be done while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource
claim will not be lost if the audit process starts.
"""
- # TODO(russellb): no-db-compute: Send the old instance type
- # info that is needed via rpc so db access isn't required
- # here.
- old_instance_type_id = instance['instance_type_id']
- old_instance_type = instance_types.get_instance_type(
- old_instance_type_id)
+ old_instance_type = instance_types.extract_instance_type(instance)
return self.conductor_api.migration_create(context, instance,
{'dest_compute': self.host,
@@ -293,12 +289,14 @@ class ResourceTracker(object):
# Need to create the ComputeNode record:
resources['service_id'] = service['id']
self._create(context, resources)
- LOG.info(_('Compute_service record created for %s ') % self.host)
+ LOG.info(_('Compute_service record created for %(host)s:%(node)s')
+ % {'host': self.host, 'node': self.nodename})
else:
# just update the record:
self._update(context, resources, prune_stats=True)
- LOG.info(_('Compute_service record updated for %s ') % self.host)
+ LOG.info(_('Compute_service record updated for %(host)s:%(node)s')
+ % {'host': self.host, 'node': self.nodename})
def _create(self, context, values):
"""Create the compute node in the DB."""
@@ -377,7 +375,7 @@ class ResourceTracker(object):
resources['running_vms'] = self.stats.num_instances
resources['vcpus_used'] = self.stats.num_vcpus_used
- def _update_usage_from_migration(self, resources, migration):
+ def _update_usage_from_migration(self, instance, resources, migration):
"""Update usage for a single migration. The record may
represent an incoming or outbound migration.
"""
@@ -390,7 +388,7 @@ class ResourceTracker(object):
migration['source_node'] == self.nodename)
same_node = (incoming and outbound)
- instance = self.tracked_instances.get(uuid, None)
+ record = self.tracked_instances.get(uuid, None)
itype = None
if same_node:
@@ -398,27 +396,25 @@ class ResourceTracker(object):
# instance is *not* in:
if (instance['instance_type_id'] ==
migration['old_instance_type_id']):
-
- itype = migration['new_instance_type_id']
+ itype = instance_types.extract_instance_type(instance)
else:
# instance record already has new flavor, hold space for a
# possible revert to the old instance type:
- itype = migration['old_instance_type_id']
+ itype = instance_types.extract_instance_type(instance, 'old_')
- elif incoming and not instance:
+ elif incoming and not record:
# instance has not yet migrated here:
- itype = migration['new_instance_type_id']
+ itype = instance_types.extract_instance_type(instance, 'new_')
- elif outbound and not instance:
+ elif outbound and not record:
# instance migrated, but record usage for a possible revert:
- itype = migration['old_instance_type_id']
+ itype = instance_types.extract_instance_type(instance, 'old_')
if itype:
- instance_type = instance_types.get_instance_type(itype)
- self.stats.update_stats_for_migration(instance_type)
- self._update_usage(resources, instance_type)
+ self.stats.update_stats_for_migration(itype)
+ self._update_usage(resources, itype)
resources['stats'] = self.stats
- self.tracked_migrations[uuid] = (migration, instance_type)
+ self.tracked_migrations[uuid] = (migration, itype)
def _update_usage_from_migrations(self, resources, migrations):
@@ -451,7 +447,8 @@ class ResourceTracker(object):
for migration in filtered.values():
try:
- self._update_usage_from_migration(resources, migration)
+ self._update_usage_from_migration(instance, resources,
+ migration)
except exception.InstanceTypeNotFound:
LOG.warn(_("InstanceType could not be found, skipping "
"migration."), instance_uuid=uuid)
diff --git a/nova/compute/utils.py b/nova/compute/utils.py
index 1874e886f..35139838d 100644
--- a/nova/compute/utils.py
+++ b/nova/compute/utils.py
@@ -22,13 +22,13 @@ import traceback
from nova import block_device
from nova.compute import instance_types
-from nova import db
from nova import exception
from nova.network import model as network_model
from nova import notifications
from nova.openstack.common import cfg
from nova.openstack.common import log
from nova.openstack.common.notifier import api as notifier_api
+from nova.openstack.common import timeutils
from nova import utils
from nova.virt import driver
@@ -37,7 +37,8 @@ CONF.import_opt('host', 'nova.netconf')
LOG = log.getLogger(__name__)
-def add_instance_fault_from_exc(context, instance, fault, exc_info=None):
+def add_instance_fault_from_exc(context, conductor,
+ instance, fault, exc_info=None):
"""Adds the specified fault to the database."""
code = 500
@@ -61,7 +62,47 @@ def add_instance_fault_from_exc(context, instance, fault, exc_info=None):
'details': unicode(details),
'host': CONF.host
}
- db.instance_fault_create(context, values)
+ conductor.instance_fault_create(context, values)
+
+
+def pack_action_start(context, instance_uuid, action_name):
+ values = {'action': action_name,
+ 'instance_uuid': instance_uuid,
+ 'request_id': context.request_id,
+ 'user_id': context.user_id,
+ 'start_time': context.timestamp}
+ return values
+
+
+def pack_action_finish(context, instance_uuid):
+ values = {'instance_uuid': instance_uuid,
+ 'request_id': context.request_id,
+ 'finish_time': timeutils.utcnow()}
+ return values
+
+
+def pack_action_event_start(context, instance_uuid, event_name):
+ values = {'event': event_name,
+ 'instance_uuid': instance_uuid,
+ 'request_id': context.request_id,
+ 'start_time': timeutils.utcnow()}
+ return values
+
+
+def pack_action_event_finish(context, instance_uuid, event_name, exc_val=None,
+ exc_tb=None):
+ values = {'event': event_name,
+ 'instance_uuid': instance_uuid,
+ 'request_id': context.request_id,
+ 'finish_time': timeutils.utcnow()}
+ if exc_tb is None:
+ values['result'] = 'Success'
+ else:
+ values['result'] = 'Error'
+ values['message'] = str(exc_val)
+ values['traceback'] = ''.join(traceback.format_tb(exc_tb))
+
+ return values
def get_device_name_for_instance(context, instance, bdms, device):
@@ -75,49 +116,57 @@ def get_device_name_for_instance(context, instance, bdms, device):
appropriate format.
"""
req_prefix = None
- req_letters = None
+ req_letter = None
+
if device:
try:
- req_prefix, req_letters = block_device.match_device(device)
+ req_prefix, req_letter = block_device.match_device(device)
except (TypeError, AttributeError, ValueError):
raise exception.InvalidDevicePath(path=device)
+
mappings = block_device.instance_block_mapping(instance, bdms)
+
try:
prefix = block_device.match_device(mappings['root'])[0]
except (TypeError, AttributeError, ValueError):
raise exception.InvalidDevicePath(path=mappings['root'])
+
# NOTE(vish): remove this when xenapi is setting default_root_device
if driver.compute_driver_matches('xenapi.XenAPIDriver'):
prefix = '/dev/xvd'
+
if req_prefix != prefix:
LOG.debug(_("Using %(prefix)s instead of %(req_prefix)s") % locals())
- letters_list = []
- for _name, device in mappings.iteritems():
- letter = block_device.strip_prefix(device)
+
+ used_letters = set()
+ for device_path in mappings.itervalues():
+ letter = block_device.strip_prefix(device_path)
# NOTE(vish): delete numbers in case we have something like
# /dev/sda1
letter = re.sub("\d+", "", letter)
- letters_list.append(letter)
- used_letters = set(letters_list)
+ used_letters.add(letter)
# NOTE(vish): remove this when xenapi is properly setting
# default_ephemeral_device and default_swap_device
if driver.compute_driver_matches('xenapi.XenAPIDriver'):
- instance_type_id = instance['instance_type_id']
- instance_type = instance_types.get_instance_type(instance_type_id)
+ instance_type = instance_types.extract_instance_type(instance)
if instance_type['ephemeral_gb']:
- used_letters.update('b')
+ used_letters.add('b')
+
if instance_type['swap']:
- used_letters.update('c')
+ used_letters.add('c')
- if not req_letters:
- req_letters = _get_unused_letters(used_letters)
- if req_letters in used_letters:
+ if not req_letter:
+ req_letter = _get_unused_letter(used_letters)
+
+ if req_letter in used_letters:
raise exception.DevicePathInUse(path=device)
- return prefix + req_letters
+
+ device_name = prefix + req_letter
+ return device_name
-def _get_unused_letters(used_letters):
+def _get_unused_letter(used_letters):
doubles = [first + second for second in string.ascii_lowercase
for first in string.ascii_lowercase]
all_letters = set(list(string.ascii_lowercase) + doubles)
@@ -205,24 +254,27 @@ def get_nw_info_for_instance(instance):
return network_model.NetworkInfo.hydrate(cached_nwinfo)
-def has_audit_been_run(context, host, timestamp=None):
+def has_audit_been_run(context, conductor, host, timestamp=None):
begin, end = utils.last_completed_audit_period(before=timestamp)
- task_log = db.task_log_get(context, "instance_usage_audit",
- begin, end, host)
+ task_log = conductor.task_log_get(context, "instance_usage_audit",
+ begin, end, host)
if task_log:
return True
else:
return False
-def start_instance_usage_audit(context, begin, end, host, num_instances):
- db.task_log_begin_task(context, "instance_usage_audit", begin, end, host,
- num_instances, "Instance usage audit started...")
+def start_instance_usage_audit(context, conductor, begin, end, host,
+ num_instances):
+ conductor.task_log_begin_task(context, "instance_usage_audit", begin,
+ end, host, num_instances,
+ "Instance usage audit started...")
-def finish_instance_usage_audit(context, begin, end, host, errors, message):
- db.task_log_end_task(context, "instance_usage_audit", begin, end, host,
- errors, message)
+def finish_instance_usage_audit(context, conductor, begin, end, host, errors,
+ message):
+ conductor.task_log_end_task(context, "instance_usage_audit", begin, end,
+ host, errors, message)
def usage_volume_info(vol_usage):
@@ -246,3 +298,28 @@ def usage_volume_info(vol_usage):
vol_usage['curr_write_bytes'])
return usage_info
+
+
+class EventReporter(object):
+ """Context manager to report instance action events."""
+
+ def __init__(self, context, conductor, event_name, *instance_uuids):
+ self.context = context
+ self.conductor = conductor
+ self.event_name = event_name
+ self.instance_uuids = instance_uuids
+
+ def __enter__(self):
+ for uuid in self.instance_uuids:
+ event = pack_action_event_start(self.context, uuid,
+ self.event_name)
+ self.conductor.action_event_start(self.context, event)
+
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ for uuid in self.instance_uuids:
+ event = pack_action_event_finish(self.context, uuid,
+ self.event_name, exc_val, exc_tb)
+ self.conductor.action_event_finish(self.context, event)
+ return False
diff --git a/nova/compute/vm_mode.py b/nova/compute/vm_mode.py
index 26e5ad8a0..cc1ca6978 100644
--- a/nova/compute/vm_mode.py
+++ b/nova/compute/vm_mode.py
@@ -52,7 +52,7 @@ def get_from_instance(instance):
if mode == "hv":
mode = HVM
- if not mode in ALL:
+ if mode not in ALL:
raise exception.Invalid("Unknown vm mode '%s'" % mode)
return mode
diff --git a/nova/conductor/api.py b/nova/conductor/api.py
index d05c94877..50f59d9d6 100644
--- a/nova/conductor/api.py
+++ b/nova/conductor/api.py
@@ -14,14 +14,13 @@
"""Handles all requests to the conductor service."""
-import functools
-
from nova.conductor import manager
from nova.conductor import rpcapi
from nova import exception as exc
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import common as rpc_common
+from nova import utils
conductor_opts = [
cfg.BoolOpt('use_local',
@@ -43,25 +42,6 @@ CONF.register_opts(conductor_opts, conductor_group)
LOG = logging.getLogger(__name__)
-class ExceptionHelper(object):
- """Class to wrap another and translate the ClientExceptions raised by its
- function calls to the actual ones"""
-
- def __init__(self, target):
- self._target = target
-
- def __getattr__(self, name):
- func = getattr(self._target, name)
-
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- try:
- return func(*args, **kwargs)
- except rpc_common.ClientException, e:
- raise (e._exc_info[1], None, e._exc_info[2])
- return wrapper
-
-
class LocalAPI(object):
"""A local version of the conductor API that does database updates
locally instead of via RPC"""
@@ -69,7 +49,7 @@ class LocalAPI(object):
def __init__(self):
# TODO(danms): This needs to be something more generic for
# other/future users of this sort of functionality.
- self._manager = ExceptionHelper(manager.ConductorManager())
+ self._manager = utils.ExceptionHelper(manager.ConductorManager())
def wait_until_ready(self, context, *args, **kwargs):
# nothing to wait for in the local case.
@@ -80,7 +60,8 @@ class LocalAPI(object):
def instance_update(self, context, instance_uuid, **updates):
"""Perform an instance update in the database."""
- return self._manager.instance_update(context, instance_uuid, updates)
+ return self._manager.instance_update(context, instance_uuid,
+ updates, 'compute')
def instance_get(self, context, instance_id):
return self._manager.instance_get(context, instance_id)
@@ -112,11 +93,6 @@ class LocalAPI(object):
return self._manager.instance_get_all_hung_in_rebooting(context,
timeout)
- def instance_get_active_by_window(self, context, begin, end=None,
- project_id=None, host=None):
- return self._manager.instance_get_active_by_window(
- context, begin, end, project_id, host)
-
def instance_get_active_by_window_joined(self, context, begin, end=None,
project_id=None, host=None):
return self._manager.instance_get_active_by_window_joined(
@@ -133,6 +109,9 @@ class LocalAPI(object):
def instance_type_get(self, context, instance_type_id):
return self._manager.instance_type_get(context, instance_type_id)
+ def instance_fault_create(self, context, values):
+ return self._manager.instance_fault_create(context, values)
+
def migration_get(self, context, migration_id):
return self._manager.migration_get(context, migration_id)
@@ -293,6 +272,53 @@ class LocalAPI(object):
def service_update(self, context, service, values):
return self._manager.service_update(context, service, values)
+ def task_log_get(self, context, task_name, begin, end, host, state=None):
+ return self._manager.task_log_get(context, task_name, begin, end,
+ host, state)
+
+ def task_log_begin_task(self, context, task_name, begin, end, host,
+ task_items=None, message=None):
+ return self._manager.task_log_begin_task(context, task_name,
+ begin, end, host,
+ task_items, message)
+
+ def task_log_end_task(self, context, task_name, begin, end, host,
+ errors, message=None):
+ return self._manager.task_log_end_task(context, task_name,
+ begin, end, host,
+ errors, message)
+
+ def notify_usage_exists(self, context, instance, current_period=False,
+ ignore_missing_network_data=True,
+ system_metadata=None, extra_usage_info=None):
+ return self._manager.notify_usage_exists(
+ context, instance, current_period, ignore_missing_network_data,
+ system_metadata, extra_usage_info)
+
+ def security_groups_trigger_handler(self, context, event, *args):
+ return self._manager.security_groups_trigger_handler(context,
+ event, args)
+
+ def security_groups_trigger_members_refresh(self, context, group_ids):
+ return self._manager.security_groups_trigger_members_refresh(context,
+ group_ids)
+
+ def network_migrate_instance_start(self, context, instance, migration):
+ return self._manager.network_migrate_instance_start(context,
+ instance,
+ migration)
+
+ def network_migrate_instance_finish(self, context, instance, migration):
+ return self._manager.network_migrate_instance_finish(context,
+ instance,
+ migration)
+
+ def quota_commit(self, context, reservations):
+ return self._manager.quota_commit(context, reservations)
+
+ def quota_rollback(self, context, reservations):
+ return self._manager.quota_rollback(context, reservations)
+
class API(object):
"""Conductor API that does updates via RPC to the ConductorManager."""
@@ -335,7 +361,7 @@ class API(object):
def instance_update(self, context, instance_uuid, **updates):
"""Perform an instance update in the database."""
return self.conductor_rpcapi.instance_update(context, instance_uuid,
- updates)
+ updates, 'conductor')
def instance_destroy(self, context, instance):
return self.conductor_rpcapi.instance_destroy(context, instance)
@@ -369,11 +395,6 @@ class API(object):
return self.conductor_rpcapi.instance_get_all_hung_in_rebooting(
context, timeout)
- def instance_get_active_by_window(self, context, begin, end=None,
- project_id=None, host=None):
- return self.conductor_rpcapi.instance_get_active_by_window(
- context, begin, end, project_id, host)
-
def instance_get_active_by_window_joined(self, context, begin, end=None,
project_id=None, host=None):
return self.conductor_rpcapi.instance_get_active_by_window_joined(
@@ -391,6 +412,9 @@ class API(object):
return self.conductor_rpcapi.instance_type_get(context,
instance_type_id)
+ def instance_fault_create(self, context, values):
+ return self.conductor_rpcapi.instance_fault_create(context, values)
+
def migration_get(self, context, migration_id):
return self.conductor_rpcapi.migration_get(context, migration_id)
@@ -564,3 +588,51 @@ class API(object):
def service_update(self, context, service, values):
return self.conductor_rpcapi.service_update(context, service, values)
+
+ def task_log_get(self, context, task_name, begin, end, host, state=None):
+ return self.conductor_rpcapi.task_log_get(context, task_name, begin,
+ end, host, state)
+
+ def task_log_begin_task(self, context, task_name, begin, end, host,
+ task_items=None, message=None):
+ return self.conductor_rpcapi.task_log_begin_task(context, task_name,
+ begin, end, host,
+ task_items, message)
+
+ def task_log_end_task(self, context, task_name, begin, end, host,
+ errors, message=None):
+ return self.conductor_rpcapi.task_log_end_task(context, task_name,
+ begin, end, host,
+ errors, message)
+
+ def notify_usage_exists(self, context, instance, current_period=False,
+ ignore_missing_network_data=True,
+ system_metadata=None, extra_usage_info=None):
+ return self.conductor_rpcapi.notify_usage_exists(
+ context, instance, current_period, ignore_missing_network_data,
+ system_metadata, extra_usage_info)
+
+ def security_groups_trigger_handler(self, context, event, *args):
+ return self.conductor_rpcapi.security_groups_trigger_handler(context,
+ event,
+ args)
+
+ def security_groups_trigger_members_refresh(self, context, group_ids):
+ return self.conductor_rpcapi.security_groups_trigger_members_refresh(
+ context, group_ids)
+
+ def network_migrate_instance_start(self, context, instance, migration):
+ return self.conductor_rpcapi.network_migrate_instance_start(context,
+ instance,
+ migration)
+
+ def network_migrate_instance_finish(self, context, instance, migration):
+ return self.conductor_rpcapi.network_migrate_instance_finish(context,
+ instance,
+ migration)
+
+ def quota_commit(self, context, reservations):
+ return self.conductor_rpcapi.quota_commit(context, reservations)
+
+ def quota_rollback(self, context, reservations):
+ return self.conductor_rpcapi.quota_rollback(context, reservations)
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index 87b143912..905d2e2cd 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -14,14 +14,17 @@
"""Handles database requests from other nova services."""
+from nova.compute import api as compute_api
+from nova.compute import utils as compute_utils
from nova import exception
from nova import manager
+from nova import network
from nova import notifications
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
-
+from nova import quota
LOG = logging.getLogger(__name__)
@@ -34,6 +37,7 @@ allowed_updates = ['task_state', 'vm_state', 'expected_task_state',
'instance_type_id', 'root_device_name', 'launched_on',
'progress', 'vm_mode', 'default_ephemeral_device',
'default_swap_device', 'root_device_name',
+ 'system_metadata',
]
# Fields that we want to convert back into a datetime object.
@@ -43,11 +47,23 @@ datetime_fields = ['launched_at', 'terminated_at']
class ConductorManager(manager.SchedulerDependentManager):
"""Mission: TBD."""
- RPC_API_VERSION = '1.35'
+ RPC_API_VERSION = '1.41'
def __init__(self, *args, **kwargs):
super(ConductorManager, self).__init__(service_name='conductor',
*args, **kwargs)
+ self.security_group_api = compute_api.SecurityGroupAPI()
+ self._network_api = None
+ self.quotas = quota.QUOTAS
+
+ @property
+ def network_api(self):
+ # NOTE(danms): We need to instantiate our network_api on first use
+ # to avoid the circular dependency that exists between our init
+ # and network_api's
+ if self._network_api is None:
+ self._network_api = network.API()
+ return self._network_api
def ping(self, context, arg):
return jsonutils.to_primitive({'service': 'conductor', 'arg': arg})
@@ -56,7 +72,8 @@ class ConductorManager(manager.SchedulerDependentManager):
exception.InvalidUUID,
exception.InstanceNotFound,
exception.UnexpectedTaskStateError)
- def instance_update(self, context, instance_uuid, updates):
+ def instance_update(self, context, instance_uuid,
+ updates, service=None):
for key, value in updates.iteritems():
if key not in allowed_updates:
LOG.error(_("Instance update attempted for "
@@ -67,7 +84,7 @@ class ConductorManager(manager.SchedulerDependentManager):
old_ref, instance_ref = self.db.instance_update_and_get_original(
context, instance_uuid, updates)
- notifications.send_update(context, old_ref, instance_ref)
+ notifications.send_update(context, old_ref, instance_ref, service)
return jsonutils.to_primitive(instance_ref)
@rpc_common.client_exceptions(exception.InstanceNotFound)
@@ -234,6 +251,7 @@ class ConductorManager(manager.SchedulerDependentManager):
def instance_get_active_by_window(self, context, begin, end=None,
project_id=None, host=None):
+ # Unused, but cannot remove until major RPC version bump
result = self.db.instance_get_active_by_window(context, begin, end,
project_id, host)
return jsonutils.to_primitive(result)
@@ -258,6 +276,10 @@ class ConductorManager(manager.SchedulerDependentManager):
result = self.db.instance_type_get(context, instance_type_id)
return jsonutils.to_primitive(result)
+ def instance_fault_create(self, context, values):
+ result = self.db.instance_fault_create(context, values)
+ return jsonutils.to_primitive(result)
+
def vol_get_usage_by_time(self, context, start_time):
result = self.db.vol_get_usage_by_time(context, start_time)
return jsonutils.to_primitive(result)
@@ -319,3 +341,46 @@ class ConductorManager(manager.SchedulerDependentManager):
def service_update(self, context, service, values):
svc = self.db.service_update(context, service['id'], values)
return jsonutils.to_primitive(svc)
+
+ def task_log_get(self, context, task_name, begin, end, host, state=None):
+ result = self.db.task_log_get(context, task_name, begin, end, host,
+ state)
+ return jsonutils.to_primitive(result)
+
+ def task_log_begin_task(self, context, task_name, begin, end, host,
+ task_items=None, message=None):
+ result = self.db.task_log_begin_task(context.elevated(), task_name,
+ begin, end, host, task_items,
+ message)
+ return jsonutils.to_primitive(result)
+
+ def task_log_end_task(self, context, task_name, begin, end, host,
+ errors, message=None):
+ result = self.db.task_log_end_task(context.elevated(), task_name,
+ begin, end, host, errors, message)
+ return jsonutils.to_primitive(result)
+
+ def notify_usage_exists(self, context, instance, current_period=False,
+ ignore_missing_network_data=True,
+ system_metadata=None, extra_usage_info=None):
+ compute_utils.notify_usage_exists(context, instance, current_period,
+ ignore_missing_network_data,
+ system_metadata, extra_usage_info)
+
+ def security_groups_trigger_handler(self, context, event, args):
+ self.security_group_api.trigger_handler(event, context, *args)
+
+ def security_groups_trigger_members_refresh(self, context, group_ids):
+ self.security_group_api.trigger_members_refresh(context, group_ids)
+
+ def network_migrate_instance_start(self, context, instance, migration):
+ self.network_api.migrate_instance_start(context, instance, migration)
+
+ def network_migrate_instance_finish(self, context, instance, migration):
+ self.network_api.migrate_instance_finish(context, instance, migration)
+
+ def quota_commit(self, context, reservations):
+ quota.QUOTAS.commit(context, reservations)
+
+ def quota_rollback(self, context, reservations):
+ quota.QUOTAS.rollback(context, reservations)
diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py
index 1699c85ed..58613e59a 100644
--- a/nova/conductor/rpcapi.py
+++ b/nova/conductor/rpcapi.py
@@ -68,6 +68,16 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
1.33 - Added compute_node_create and compute_node_update
1.34 - Added service_update
1.35 - Added instance_get_active_by_window_joined
+ 1.36 - Added instance_fault_create
+ 1.37 - Added task_log_get, task_log_begin_task, task_log_end_task
+ 1.38 - Added service name to instance_update
+ 1.39 - Added notify_usage_exists
+ 1.40 - Added security_groups_trigger_handler and
+ security_groups_trigger_members_refresh
+ Remove instance_get_active_by_window
+ 1.41 - Added fixed_ip_get_by_instance, network_get,
+ instance_floating_address_get_all, quota_commit,
+ quota_rollback
"""
BASE_RPC_API_VERSION = '1.0'
@@ -82,12 +92,15 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
msg = self.make_msg('ping', arg=arg_p)
return self.call(context, msg, version='1.22', timeout=timeout)
- def instance_update(self, context, instance_uuid, updates):
+ def instance_update(self, context, instance_uuid, updates,
+ service=None):
updates_p = jsonutils.to_primitive(updates)
return self.call(context,
self.make_msg('instance_update',
instance_uuid=instance_uuid,
- updates=updates_p))
+ updates=updates_p,
+ service=service),
+ version='1.38')
def instance_get(self, context, instance_id):
msg = self.make_msg('instance_get',
@@ -235,13 +248,6 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
timeout=timeout)
return self.call(context, msg, version='1.15')
- def instance_get_active_by_window(self, context, begin, end=None,
- project_id=None, host=None):
- msg = self.make_msg('instance_get_active_by_window',
- begin=begin, end=end, project_id=project_id,
- host=host)
- return self.call(context, msg, version='1.15')
-
def instance_get_active_by_window_joined(self, context, begin, end=None,
project_id=None, host=None):
msg = self.make_msg('instance_get_active_by_window_joined',
@@ -293,12 +299,18 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
msg = self.make_msg('instance_get_all_by_host', host=host, node=node)
return self.call(context, msg, version='1.32')
+ def instance_fault_create(self, context, values):
+ msg = self.make_msg('instance_fault_create', values=values)
+ return self.call(context, msg, version='1.36')
+
def action_event_start(self, context, values):
- msg = self.make_msg('action_event_start', values=values)
+ values_p = jsonutils.to_primitive(values)
+ msg = self.make_msg('action_event_start', values=values_p)
return self.call(context, msg, version='1.25')
def action_event_finish(self, context, values):
- msg = self.make_msg('action_event_finish', values=values)
+ values_p = jsonutils.to_primitive(values)
+ msg = self.make_msg('action_event_finish', values=values_p)
return self.call(context, msg, version='1.25')
def instance_info_cache_update(self, context, instance, values):
@@ -330,3 +342,70 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
service_p = jsonutils.to_primitive(service)
msg = self.make_msg('service_update', service=service_p, values=values)
return self.call(context, msg, version='1.34')
+
+ def task_log_get(self, context, task_name, begin, end, host, state=None):
+ msg = self.make_msg('task_log_get', task_name=task_name,
+ begin=begin, end=end, host=host, state=state)
+ return self.call(context, msg, version='1.37')
+
+ def task_log_begin_task(self, context, task_name, begin, end, host,
+ task_items=None, message=None):
+ msg = self.make_msg('task_log_begin_task', task_name=task_name,
+ begin=begin, end=end, host=host,
+ task_items=task_items, message=message)
+ return self.call(context, msg, version='1.37')
+
+ def task_log_end_task(self, context, task_name, begin, end, host, errors,
+ message=None):
+ msg = self.make_msg('task_log_end_task', task_name=task_name,
+ begin=begin, end=end, host=host, errors=errors,
+ message=message)
+ return self.call(context, msg, version='1.37')
+
+ def notify_usage_exists(self, context, instance, current_period=False,
+ ignore_missing_network_data=True,
+ system_metadata=None, extra_usage_info=None):
+ instance_p = jsonutils.to_primitive(instance)
+ system_metadata_p = jsonutils.to_primitive(system_metadata)
+ extra_usage_info_p = jsonutils.to_primitive(extra_usage_info)
+ msg = self.make_msg('notify_usage_exists', instance=instance_p,
+ current_period=current_period,
+ ignore_missing_network_data=ignore_missing_network_data,
+ system_metadata=system_metadata_p,
+ extra_usage_info=extra_usage_info_p)
+ return self.call(context, msg, version='1.39')
+
+ def security_groups_trigger_handler(self, context, event, args):
+ args_p = jsonutils.to_primitive(args)
+ msg = self.make_msg('security_groups_trigger_handler', event=event,
+ args=args_p)
+ return self.call(context, msg, version='1.40')
+
+ def security_groups_trigger_members_refresh(self, context, group_ids):
+ msg = self.make_msg('security_groups_trigger_members_refresh',
+ group_ids=group_ids)
+ return self.call(context, msg, version='1.40')
+
+ def network_migrate_instance_start(self, context, instance, migration):
+ instance_p = jsonutils.to_primitive(instance)
+ migration_p = jsonutils.to_primitive(migration)
+ msg = self.make_msg('network_migrate_instance_start',
+ instance=instance_p, migration=migration_p)
+ return self.call(context, msg, version='1.41')
+
+ def network_migrate_instance_finish(self, context, instance, migration):
+ instance_p = jsonutils.to_primitive(instance)
+ migration_p = jsonutils.to_primitive(migration)
+ msg = self.make_msg('network_migrate_instance_finish',
+ instance=instance_p, migration=migration_p)
+ return self.call(context, msg, version='1.41')
+
+ def quota_commit(self, context, reservations):
+ reservations_p = jsonutils.to_primitive(reservations)
+ msg = self.make_msg('quota_commit', reservations=reservations_p)
+ return self.call(context, msg, version='1.41')
+
+ def quota_rollback(self, context, reservations):
+ reservations_p = jsonutils.to_primitive(reservations)
+ msg = self.make_msg('quota_rollback', reservations=reservations_p)
+ return self.call(context, msg, version='1.41')
diff --git a/nova/config.py b/nova/config.py
index 4095dba75..18147bdbb 100644
--- a/nova/config.py
+++ b/nova/config.py
@@ -18,10 +18,16 @@
# under the License.
from nova.openstack.common import cfg
+from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.openstack.common import rpc
+from nova import paths
+
+_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('$sqlite_db')
def parse_args(argv, default_config_files=None):
+ db_session.set_defaults(sql_connection=_DEFAULT_SQL_CONNECTION,
+ sqlite_db='nova.sqlite')
rpc.set_defaults(control_exchange='nova')
cfg.CONF(argv[1:],
project='nova',
diff --git a/nova/context.py b/nova/context.py
index 1a566cb5a..60fd5b4c0 100644
--- a/nova/context.py
+++ b/nova/context.py
@@ -46,7 +46,7 @@ class RequestContext(object):
roles=None, remote_address=None, timestamp=None,
request_id=None, auth_token=None, overwrite=True,
quota_class=None, user_name=None, project_name=None,
- service_catalog=None, instance_lock_checked=False, **kwargs):
+ service_catalog=[], instance_lock_checked=False, **kwargs):
"""
:param read_deleted: 'no' indicates deleted records are hidden, 'yes'
indicates deleted records are visible, 'only' indicates that
@@ -65,9 +65,6 @@ class RequestContext(object):
self.user_id = user_id
self.project_id = project_id
self.roles = roles or []
- self.is_admin = is_admin
- if self.is_admin is None:
- self.is_admin = policy.check_is_admin(self.roles)
self.read_deleted = read_deleted
self.remote_address = remote_address
if not timestamp:
@@ -79,7 +76,9 @@ class RequestContext(object):
request_id = generate_request_id()
self.request_id = request_id
self.auth_token = auth_token
- self.service_catalog = service_catalog
+ # Only include required parts of service_catalog
+ self.service_catalog = [s for s in service_catalog
+ if s.get('type') in ('volume')]
self.instance_lock_checked = instance_lock_checked
# NOTE(markmc): this attribute is currently only used by the
@@ -88,7 +87,9 @@ class RequestContext(object):
self.quota_class = quota_class
self.user_name = user_name
self.project_name = project_name
-
+ self.is_admin = is_admin
+ if self.is_admin is None:
+ self.is_admin = policy.check_is_admin(self)
if overwrite or not hasattr(local.store, 'context'):
self.update_store()
diff --git a/nova/crypto.py b/nova/crypto.py
index 5c48c60b6..96e545893 100644
--- a/nova/crypto.py
+++ b/nova/crypto.py
@@ -24,9 +24,15 @@ Includes root and intermediate CAs, SSH key_pairs and x509 certificates.
from __future__ import absolute_import
+import base64
import hashlib
import os
+import re
import string
+import struct
+
+from pyasn1.codec.der import encoder as der_encoder
+from pyasn1.type import univ
from nova import context
from nova import db
@@ -181,23 +187,75 @@ def decrypt_text(project_id, text):
raise exception.DecryptionFailure(reason=exc.stderr)
+_RSA_OID = univ.ObjectIdentifier('1.2.840.113549.1.1.1')
+
+
+def _to_sequence(*vals):
+ seq = univ.Sequence()
+ for i in range(len(vals)):
+ seq.setComponentByPosition(i, vals[i])
+ return seq
+
+
+def convert_from_sshrsa_to_pkcs8(pubkey):
+ """Convert a ssh public key to openssl format
+ Equivalent to the ssh-keygen's -m option
+ """
+ # get the second field from the public key file.
+ try:
+ keydata = base64.b64decode(pubkey.split(None)[1])
+ except IndexError:
+ msg = _("Unable to find the key")
+ raise exception.EncryptionFailure(reason=msg)
+
+ # decode the parts of the key
+ parts = []
+ while keydata:
+ dlen = struct.unpack('>I', keydata[:4])[0]
+ data = keydata[4:dlen + 4]
+ keydata = keydata[4 + dlen:]
+ parts.append(data)
+
+ # Use asn to build the openssl key structure
+ #
+ # SEQUENCE(2 elem)
+ # +- SEQUENCE(2 elem)
+ # | +- OBJECT IDENTIFIER (1.2.840.113549.1.1.1)
+ # | +- NULL
+ # +- BIT STRING(1 elem)
+ # +- SEQUENCE(2 elem)
+ # +- INTEGER(2048 bit)
+ # +- INTEGER 65537
+
+ # Build the sequence for the bit string
+ n_val = eval(
+ '0x' + ''.join(['%02X' % struct.unpack('B', x)[0] for x in parts[2]]))
+ e_val = eval(
+ '0x' + ''.join(['%02X' % struct.unpack('B', x)[0] for x in parts[1]]))
+ pkinfo = _to_sequence(univ.Integer(n_val), univ.Integer(e_val))
+
+ # Convert the sequence into a bit string
+ pklong = long(der_encoder.encode(pkinfo).encode('hex'), 16)
+ pkbitstring = univ.BitString("'00%s'B" % bin(pklong)[2:])
+
+ # Build the key data structure
+ oid = _to_sequence(_RSA_OID, univ.Null())
+ pkcs1_seq = _to_sequence(oid, pkbitstring)
+ pkcs8 = base64.encodestring(der_encoder.encode(pkcs1_seq))
+
+ # Remove the embedded new line and format the key, each line
+ # should be 64 characters long
+ return ('-----BEGIN PUBLIC KEY-----\n%s\n-----END PUBLIC KEY-----\n' %
+ re.sub("(.{64})", "\\1\n", pkcs8.replace('\n', ''), re.DOTALL))
+
+
def ssh_encrypt_text(ssh_public_key, text):
"""Encrypt text with an ssh public key.
-
- Requires recent ssh-keygen binary in addition to openssl binary.
"""
with utils.tempdir() as tmpdir:
- sshkey = os.path.abspath(os.path.join(tmpdir, 'ssh.key'))
- with open(sshkey, 'w') as f:
- f.write(ssh_public_key)
sslkey = os.path.abspath(os.path.join(tmpdir, 'ssl.key'))
try:
- # NOTE(vish): -P is to skip prompt on bad keys
- out, _err = utils.execute('ssh-keygen',
- '-P', '',
- '-e',
- '-f', sshkey,
- '-m', 'PKCS8')
+ out = convert_from_sshrsa_to_pkcs8(ssh_public_key)
with open(sslkey, 'w') as f:
f.write(out)
enc, _err = utils.execute('openssl',
diff --git a/nova/db/api.py b/nova/db/api.py
index 6af81e5f9..ffd153a46 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -132,15 +132,6 @@ def service_get_all(context, disabled=None):
return IMPL.service_get_all(context, disabled)
-def service_does_host_exist(context, host_name, include_disabled=False):
- """Returns True if 'host_name' is found in the services table, False
- otherwise
- :param: host_name - the name of the host we want to check if it exists
- :param: include_disabled - Set to True to include hosts from disabled
- services"""
- return IMPL.service_does_host_exist(context, host_name, include_disabled)
-
-
def service_get_all_by_topic(context, topic):
"""Get all services for a given topic."""
return IMPL.service_get_all_by_topic(context, topic)
@@ -159,15 +150,6 @@ def service_get_by_compute_host(context, host):
return IMPL.service_get_by_compute_host(context, host)
-def service_get_all_compute_sorted(context):
- """Get all compute services sorted by instance count.
-
- :returns: a list of (Service, instance_count) tuples.
-
- """
- return IMPL.service_get_all_compute_sorted(context)
-
-
def service_get_by_args(context, host, binary):
"""Get the state of a service by node name and binary."""
return IMPL.service_get_by_args(context, host, binary)
@@ -305,7 +287,7 @@ def floating_ip_destroy(context, address):
def floating_ip_disassociate(context, address):
"""Disassociate a floating ip from a fixed ip by address.
- :returns: the address of the previous fixed ip or None
+ :returns: the fixed ip record joined to network record or None
if the ip was not associated to an ip.
"""
@@ -316,7 +298,7 @@ def floating_ip_fixed_ip_associate(context, floating_address,
fixed_address, host):
"""Associate a floating ip to a fixed_ip by address.
- :returns: the address of the new fixed ip (fixed_address) or None
+ :returns: the fixed ip record joined to network record or None
if the ip was already associated to the fixed ip.
"""
@@ -477,9 +459,12 @@ def fixed_ip_disassociate_all_by_timeout(context, host, time):
return IMPL.fixed_ip_disassociate_all_by_timeout(context, host, time)
-def fixed_ip_get(context, id):
- """Get fixed ip by id or raise if it does not exist."""
- return IMPL.fixed_ip_get(context, id)
+def fixed_ip_get(context, id, get_network=False):
+ """Get fixed ip by id or raise if it does not exist.
+
+ If get_network is true, also return the assocated network.
+ """
+ return IMPL.fixed_ip_get(context, id, get_network)
def fixed_ip_get_all(context):
@@ -621,17 +606,6 @@ def instance_get_all_by_filters(context, filters, sort_key='created_at',
marker=marker)
-def instance_get_active_by_window(context, begin, end=None, project_id=None,
- host=None):
- """Get instances active during a certain time window.
-
- Specifying a project_id will filter for a certain project.
- Specifying a host will filter for instances on a given compute host.
- """
- return IMPL.instance_get_active_by_window(context, begin, end,
- project_id, host)
-
-
def instance_get_active_by_window_joined(context, begin, end=None,
project_id=None, host=None):
"""Get instances and joins active during a certain time window.
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index a6f585eef..d0a58e44f 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -35,14 +35,14 @@ from sqlalchemy.sql.expression import desc
from sqlalchemy.sql import func
from nova import block_device
-from nova.common import sqlalchemyutils
from nova.compute import task_states
from nova.compute import vm_states
from nova import db
from nova.db.sqlalchemy import models
-from nova.db.sqlalchemy.session import get_session
from nova import exception
from nova.openstack.common import cfg
+from nova.openstack.common.db.sqlalchemy import session as db_session
+from nova.openstack.common.db.sqlalchemy import utils as sqlalchemyutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
@@ -58,10 +58,13 @@ db_opts = [
CONF = cfg.CONF
CONF.register_opts(db_opts)
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
-CONF.import_opt('sql_connection', 'nova.db.sqlalchemy.session')
+CONF.import_opt('sql_connection',
+ 'nova.openstack.common.db.sqlalchemy.session')
LOG = logging.getLogger(__name__)
+get_session = db_session.get_session
+
def is_user_context(context):
"""Indicates if the request context is a normal user."""
@@ -397,45 +400,6 @@ def service_get_by_compute_host(context, host):
@require_admin_context
-def _service_get_all_topic_subquery(context, session, topic, subq, label):
- sort_value = getattr(subq.c, label)
- return model_query(context, models.Service,
- func.coalesce(sort_value, 0),
- session=session, read_deleted="no").\
- filter_by(topic=topic).\
- filter_by(disabled=False).\
- outerjoin((subq, models.Service.host == subq.c.host)).\
- order_by(sort_value).\
- all()
-
-
-@require_admin_context
-def service_get_all_compute_sorted(context):
- session = get_session()
- with session.begin():
- # NOTE(vish): The intended query is below
- # SELECT services.*, COALESCE(inst_cores.instance_cores,
- # 0)
- # FROM services LEFT OUTER JOIN
- # (SELECT host, SUM(instances.vcpus) AS instance_cores
- # FROM instances GROUP BY host) AS inst_cores
- # ON services.host = inst_cores.host
- topic = CONF.compute_topic
- label = 'instance_cores'
- subq = model_query(context, models.Instance.host,
- func.sum(models.Instance.vcpus).label(label),
- base_model=models.Instance, session=session,
- read_deleted="no").\
- group_by(models.Instance.host).\
- subquery()
- return _service_get_all_topic_subquery(context,
- session,
- topic,
- subq,
- label)
-
-
-@require_admin_context
def service_get_by_args(context, host, binary):
result = model_query(context, models.Service).\
filter_by(host=host).\
@@ -666,6 +630,7 @@ def certificate_get_all_by_user_and_project(context, user_id, project_id):
def floating_ip_get(context, id):
result = model_query(context, models.FloatingIp, project_only=True).\
filter_by(id=id).\
+ options(joinedload_all('fixed_ip.instance')).\
first()
if not result:
@@ -793,15 +758,16 @@ def floating_ip_fixed_ip_associate(context, floating_address,
floating_ip_ref = _floating_ip_get_by_address(context,
floating_address,
session=session)
- fixed_ip_ref = fixed_ip_get_by_address(context,
- fixed_address,
- session=session)
+ fixed_ip_ref = model_query(context, models.FixedIp, session=session).\
+ filter_by(address=fixed_address).\
+ options(joinedload('network')).\
+ first()
if floating_ip_ref.fixed_ip_id == fixed_ip_ref["id"]:
return None
floating_ip_ref.fixed_ip_id = fixed_ip_ref["id"]
floating_ip_ref.host = host
floating_ip_ref.save(session=session)
- return fixed_address
+ return fixed_ip_ref
@require_context
@@ -834,15 +800,12 @@ def floating_ip_disassociate(context, address):
fixed_ip_ref = model_query(context, models.FixedIp, session=session).\
filter_by(id=floating_ip_ref['fixed_ip_id']).\
+ options(joinedload('network')).\
first()
- if fixed_ip_ref:
- fixed_ip_address = fixed_ip_ref['address']
- else:
- fixed_ip_address = None
floating_ip_ref.fixed_ip_id = None
floating_ip_ref.host = None
floating_ip_ref.save(session=session)
- return fixed_ip_address
+ return fixed_ip_ref
@require_context
@@ -882,6 +845,7 @@ def floating_ip_get_all_by_project(context, project_id):
return _floating_ip_get_all(context).\
filter_by(project_id=project_id).\
filter_by(auto_assigned=False).\
+ options(joinedload_all('fixed_ip.instance')).\
all()
@@ -892,8 +856,14 @@ def floating_ip_get_by_address(context, address):
@require_context
def _floating_ip_get_by_address(context, address, session=None):
+
+ # if address string is empty explicitly set it to None
+ if not address:
+ address = None
+
result = model_query(context, models.FloatingIp, session=session).\
filter_by(address=address).\
+ options(joinedload_all('fixed_ip.instance')).\
first()
if not result:
@@ -1140,10 +1110,11 @@ def fixed_ip_disassociate_all_by_timeout(context, host, time):
@require_context
-def fixed_ip_get(context, id):
- result = model_query(context, models.FixedIp).\
- filter_by(id=id).\
- first()
+def fixed_ip_get(context, id, get_network=False):
+ query = model_query(context, models.FixedIp).filter_by(id=id)
+ if get_network:
+ query = query.options(joinedload('network'))
+ result = query.first()
if not result:
raise exception.FixedIpNotFound(id=id)
@@ -1245,7 +1216,7 @@ def fixed_ip_get_by_network_host(context, network_id, host):
first()
if not result:
- raise exception.FixedIpNotFoundForNetworkHost(network_uuid=network_id,
+ raise exception.FixedIpNotFoundForNetworkHost(network_id=network_id,
host=host)
return result
@@ -1283,7 +1254,7 @@ def virtual_interface_create(context, values):
vif_ref = models.VirtualInterface()
vif_ref.update(values)
vif_ref.save()
- except exception.DBError:
+ except db_session.DBError:
raise exception.VirtualInterfaceCreateException()
return vif_ref
@@ -1685,25 +1656,6 @@ def regex_filter(query, model, filters):
return query
-@require_context
-def instance_get_active_by_window(context, begin, end=None,
- project_id=None, host=None):
- """Return instances that were active during window."""
- session = get_session()
- query = session.query(models.Instance)
-
- query = query.filter(or_(models.Instance.terminated_at == None,
- models.Instance.terminated_at > begin))
- if end:
- query = query.filter(models.Instance.launched_at < end)
- if project_id:
- query = query.filter_by(project_id=project_id)
- if host:
- query = query.filter_by(host=host)
-
- return query.all()
-
-
@require_admin_context
def instance_get_active_by_window_joined(context, begin, end=None,
project_id=None, host=None):
@@ -1734,7 +1686,8 @@ def _instance_get_all_query(context, project_only=False):
options(joinedload('info_cache')).\
options(joinedload('security_groups')).\
options(joinedload('metadata')).\
- options(joinedload('instance_type'))
+ options(joinedload('instance_type')).\
+ options(joinedload('system_metadata'))
@require_admin_context
@@ -1846,9 +1799,10 @@ def instance_update_and_get_original(context, instance_uuid, values):
# NOTE(danms): This updates the instance's metadata list in-place and in
# the database to avoid stale data and refresh issues. It assumes the
# delete=True behavior of instance_metadata_update(...)
-def _instance_metadata_update_in_place(context, instance, metadata, session):
+def _instance_metadata_update_in_place(context, instance, metadata_type, model,
+ metadata, session):
to_delete = []
- for keyvalue in instance['metadata']:
+ for keyvalue in instance[metadata_type]:
key = keyvalue['key']
if key in metadata:
keyvalue['value'] = metadata.pop(key)
@@ -1856,15 +1810,14 @@ def _instance_metadata_update_in_place(context, instance, metadata, session):
to_delete.append(keyvalue)
for condemned in to_delete:
- instance['metadata'].remove(condemned)
condemned.soft_delete(session=session)
for key, value in metadata.iteritems():
- newitem = models.InstanceMetadata()
+ newitem = model()
newitem.update({'key': key, 'value': value,
'instance_uuid': instance['uuid']})
session.add(newitem)
- instance['metadata'].append(newitem)
+ instance[metadata_type].append(newitem)
def _instance_update(context, instance_uuid, values, copy_old_instance=False):
@@ -1912,14 +1865,18 @@ def _instance_update(context, instance_uuid, values, copy_old_instance=False):
metadata = values.get('metadata')
if metadata is not None:
_instance_metadata_update_in_place(context, instance_ref,
+ 'metadata',
+ models.InstanceMetadata,
values.pop('metadata'),
session)
system_metadata = values.get('system_metadata')
if system_metadata is not None:
- instance_system_metadata_update(
- context, instance_ref['uuid'], values.pop('system_metadata'),
- delete=True, session=session)
+ _instance_metadata_update_in_place(context, instance_ref,
+ 'system_metadata',
+ models.InstanceSystemMetadata,
+ values.pop('system_metadata'),
+ session)
instance_ref.update(values)
instance_ref.save(session=session)
@@ -2247,7 +2204,9 @@ def network_get_associated_fixed_ips(context, network_id, host=None):
models.VirtualInterface.address,
models.Instance.hostname,
models.Instance.updated_at,
- models.Instance.created_at).\
+ models.Instance.created_at,
+ models.FixedIp.allocated,
+ models.FixedIp.leased).\
filter(models.FixedIp.deleted == 0).\
filter(models.FixedIp.network_id == network_id).\
filter(models.FixedIp.allocated == True).\
@@ -2269,6 +2228,8 @@ def network_get_associated_fixed_ips(context, network_id, host=None):
cleaned['instance_hostname'] = datum[5]
cleaned['instance_updated'] = datum[6]
cleaned['instance_created'] = datum[7]
+ cleaned['allocated'] = datum[8]
+ cleaned['leased'] = datum[9]
data.append(cleaned)
return data
@@ -3559,7 +3520,7 @@ def instance_type_create(context, values):
instance_type_ref.update(values)
instance_type_ref.save(session=session)
except Exception, e:
- raise exception.DBError(e)
+ raise db_session.DBError(e)
return _dict_with_extra_specs(instance_type_ref)
@@ -4262,7 +4223,7 @@ def s3_image_create(context, image_uuid):
s3_image_ref.update({'uuid': image_uuid})
s3_image_ref.save()
except Exception, e:
- raise exception.DBError(e)
+ raise db_session.DBError(e)
return s3_image_ref
@@ -4590,6 +4551,7 @@ def instance_fault_get_by_instance_uuids(context, instance_uuids):
def action_start(context, values):
+ convert_datetimes(values, 'start_time')
action_ref = models.InstanceAction()
action_ref.update(values)
action_ref.save()
@@ -4597,6 +4559,7 @@ def action_start(context, values):
def action_finish(context, values):
+ convert_datetimes(values, 'start_time', 'finish_time')
session = get_session()
with session.begin():
action_ref = model_query(context, models.InstanceAction,
@@ -4644,6 +4607,7 @@ def _action_get_by_request_id(context, instance_uuid, request_id,
def action_event_start(context, values):
"""Start an event on an instance action."""
+ convert_datetimes(values, 'start_time')
session = get_session()
with session.begin():
action = _action_get_by_request_id(context, values['instance_uuid'],
@@ -4664,6 +4628,7 @@ def action_event_start(context, values):
def action_event_finish(context, values):
"""Finish an event on an instance action."""
+ convert_datetimes(values, 'start_time', 'finish_time')
session = get_session()
with session.begin():
action = _action_get_by_request_id(context, values['instance_uuid'],
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/147_no_service_zones.py b/nova/db/sqlalchemy/migrate_repo/versions/147_no_service_zones.py
index a20799fbe..d93cd1ead 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/147_no_service_zones.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/147_no_service_zones.py
@@ -37,9 +37,9 @@ def upgrade(migrate_engine):
if rec['binary'] != 'nova-compute':
continue
# if zone doesn't exist create
- result = aggregate_metadata.select().where(aggregate_metadata.c.key ==
- 'availability_zone' and
- aggregate_metadata.c.key == rec['availability_zone']).execute()
+ result = aggregate_metadata.select().where(
+ aggregate_metadata.c.key == 'availability_zone').where(
+ aggregate_metadata.c.value == rec['availability_zone']).execute()
result = [r for r in result]
if len(result) > 0:
agg_id = result[0].aggregate_id
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/152_change_type_of_deleted_column.py b/nova/db/sqlalchemy/migrate_repo/versions/152_change_type_of_deleted_column.py
index d4bd991f7..c49e8272b 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/152_change_type_of_deleted_column.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/152_change_type_of_deleted_column.py
@@ -4,8 +4,7 @@ from sqlalchemy.ext.compiler import compiles
from sqlalchemy import MetaData, Table, Column, Index
from sqlalchemy import select
from sqlalchemy.sql.expression import UpdateBase
-from sqlalchemy.sql import literal_column
-from sqlalchemy import String, Integer, Boolean
+from sqlalchemy import Integer, Boolean
from sqlalchemy.types import NullType, BigInteger
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/153_instance_type_in_system_metadata.py b/nova/db/sqlalchemy/migrate_repo/versions/153_instance_type_in_system_metadata.py
new file mode 100644
index 000000000..20e75a6eb
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/153_instance_type_in_system_metadata.py
@@ -0,0 +1,49 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import MetaData, select, Table
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+ instances = Table('instances', meta, autoload=True)
+ instance_types = Table('instance_types', meta, autoload=True)
+ sys_meta = Table('instance_system_metadata', meta, autoload=True)
+
+ # Taken from nova/compute/api.py
+ instance_type_props = ['id', 'name', 'memory_mb', 'vcpus',
+ 'root_gb', 'ephemeral_gb', 'flavorid',
+ 'swap', 'rxtx_factor', 'vcpu_weight']
+
+ select_columns = [instances.c.uuid]
+ select_columns += [getattr(instance_types.c, name)
+ for name in instance_type_props]
+
+ q = select(select_columns, from_obj=instances.join(
+ instance_types,
+ instances.c.instance_type_id == instance_types.c.id))
+
+ i = sys_meta.insert()
+ for values in q.execute():
+ for index in range(0, len(instance_type_props)):
+ i.execute({"key": "instance_type_%s" % instance_type_props[index],
+ "value": str(values[index + 1]),
+ "instance_uuid": values[0]})
+
+
+def downgrade(migration_engine):
+ # This migration only touches data, and only metadata at that. No need
+ # to go through and delete old metadata items.
+ pass
diff --git a/nova/db/sqlalchemy/migration.py b/nova/db/sqlalchemy/migration.py
index dbc1ed432..421167bec 100644
--- a/nova/db/sqlalchemy/migration.py
+++ b/nova/db/sqlalchemy/migration.py
@@ -20,8 +20,8 @@ import distutils.version as dist_version
import os
from nova.db import migration
-from nova.db.sqlalchemy.session import get_engine
from nova import exception
+from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.openstack.common import log as logging
@@ -62,6 +62,8 @@ from migrate.versioning.repository import Repository
_REPOSITORY = None
+get_engine = db_session.get_engine
+
def db_sync(version=None):
if version is not None:
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 05452f2ad..5eeae30dc 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -26,9 +26,9 @@ from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float
from sqlalchemy.orm import relationship, backref, object_mapper
-from nova.db.sqlalchemy.session import get_session
from nova.db.sqlalchemy import types
from nova.openstack.common import cfg
+from nova.openstack.common.db.sqlalchemy import models
from nova.openstack.common import timeutils
@@ -36,74 +36,8 @@ CONF = cfg.CONF
BASE = declarative_base()
-class NovaBase(object):
- """Base class for Nova Models."""
- __table_initialized__ = False
- created_at = Column(DateTime, default=timeutils.utcnow)
- updated_at = Column(DateTime, onupdate=timeutils.utcnow)
- deleted_at = Column(DateTime)
- deleted = Column(Integer, default=0)
- metadata = None
-
- def save(self, session=None):
- """Save this object."""
- if not session:
- session = get_session()
- # NOTE(boris-42): This part of code should be look like:
- # sesssion.add(self)
- # session.flush()
- # But there is a bug in sqlalchemy and eventlet that
- # raises NoneType exception if there is no running
- # transaction and rollback is called. As long as
- # sqlalchemy has this bug we have to create transaction
- # explicity.
- with session.begin(subtransactions=True):
- session.add(self)
- session.flush()
-
- def soft_delete(self, session=None):
- """Mark this object as deleted."""
- self.deleted = self.id
- self.deleted_at = timeutils.utcnow()
- self.save(session=session)
-
- def __setitem__(self, key, value):
- setattr(self, key, value)
-
- def __getitem__(self, key):
- return getattr(self, key)
-
- def get(self, key, default=None):
- return getattr(self, key, default)
-
- def __iter__(self):
- columns = dict(object_mapper(self).columns).keys()
- # NOTE(russellb): Allow models to specify other keys that can be looked
- # up, beyond the actual db columns. An example would be the 'name'
- # property for an Instance.
- if hasattr(self, '_extra_keys'):
- columns.extend(self._extra_keys())
- self._i = iter(columns)
- return self
-
- def next(self):
- n = self._i.next()
- return n, getattr(self, n)
-
- def update(self, values):
- """Make the model object behave like a dict."""
- for k, v in values.iteritems():
- setattr(self, k, v)
-
- def iteritems(self):
- """Make the model object behave like a dict.
-
- Includes attributes from joins."""
- local = dict(self)
- joined = dict([(k, v) for k, v in self.__dict__.iteritems()
- if not k[0] == '_'])
- local.update(joined)
- return local.iteritems()
+class NovaBase(models.SoftDeleteMixin, models.ModelBase):
+ pass
class Service(BASE, NovaBase):
@@ -719,6 +653,19 @@ class FixedIp(BASE, NovaBase):
leased = Column(Boolean, default=False)
reserved = Column(Boolean, default=False)
host = Column(String(255))
+ network = relationship(Network,
+ backref=backref('fixed_ips'),
+ foreign_keys=network_id,
+ primaryjoin='and_('
+ 'FixedIp.network_id == Network.id,'
+ 'FixedIp.deleted == 0,'
+ 'Network.deleted == 0)')
+ instance = relationship(Instance,
+ foreign_keys=instance_uuid,
+ primaryjoin='and_('
+ 'FixedIp.instance_uuid == Instance.uuid,'
+ 'FixedIp.deleted == 0,'
+ 'Instance.deleted == 0)')
class FloatingIp(BASE, NovaBase):
@@ -732,6 +679,13 @@ class FloatingIp(BASE, NovaBase):
auto_assigned = Column(Boolean, default=False, nullable=False)
pool = Column(String(255))
interface = Column(String(255))
+ fixed_ip = relationship(FixedIp,
+ backref=backref('floating_ips'),
+ foreign_keys=fixed_ip_id,
+ primaryjoin='and_('
+ 'FloatingIp.fixed_ip_id == FixedIp.id,'
+ 'FloatingIp.deleted == 0,'
+ 'FixedIp.deleted == 0)')
class DNSDomain(BASE, NovaBase):
@@ -999,7 +953,11 @@ class InstanceFault(BASE, NovaBase):
class InstanceAction(BASE, NovaBase):
- """Track client actions on an instance."""
+ """Track client actions on an instance.
+
+ The intention is that there will only be one of these per user request. A
+ lookup by (instance_uuid, request_id) should always return a single result.
+ """
__tablename__ = 'instance_actions'
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
action = Column(String(255))
diff --git a/nova/db/sqlalchemy/utils.py b/nova/db/sqlalchemy/utils.py
new file mode 100644
index 000000000..2faa5021f
--- /dev/null
+++ b/nova/db/sqlalchemy/utils.py
@@ -0,0 +1,117 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2013 Boris Pavlovic (boris@pavlovic.me).
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from migrate.changeset import UniqueConstraint
+from sqlalchemy.engine import reflection
+from sqlalchemy.ext.compiler import compiles
+from sqlalchemy import MetaData, Table, Column, Index
+from sqlalchemy.sql.expression import UpdateBase
+from sqlalchemy.types import NullType
+
+from nova import exception
+
+
+class InsertFromSelect(UpdateBase):
+ def __init__(self, table, select):
+ self.table = table
+ self.select = select
+
+
+@compiles(InsertFromSelect)
+def visit_insert_from_select(element, compiler, **kw):
+ return "INSERT INTO %s %s" % (
+ compiler.process(element.table, asfrom=True),
+ compiler.process(element.select))
+
+
+def _drop_unique_constraint_in_sqlite(migrate_engine, table_name, uc_name,
+ **col_name_col_instance):
+ insp = reflection.Inspector.from_engine(migrate_engine)
+ meta = MetaData(bind=migrate_engine)
+
+ table = Table(table_name, meta, autoload=True)
+ columns = []
+ for column in table.columns:
+ if isinstance(column.type, NullType):
+ try:
+ new_column = col_name_col_instance.get(column.name)
+ except Exception as e:
+ msg = _("Please specify column %s in col_name_col_instance "
+ "param. It is required because column has unsupported "
+ "type by sqlite).")
+ raise exception.NovaException(msg % column.name)
+
+ if not isinstance(new_column, Column):
+ msg = _("col_name_col_instance param has wrong type of "
+ "column instance for column %s It should be instance "
+ "of sqlalchemy.Column.")
+ raise exception.NovaException(msg % column.name)
+ columns.append(new_column)
+ else:
+ columns.append(column.copy())
+
+ constraints = [constraint for constraint in table.constraints
+ if not constraint.name == uc_name]
+
+ new_table = Table(table_name + "__tmp__", meta, *(columns + constraints))
+ new_table.create()
+
+ indexes = []
+ for index in insp.get_indexes(table_name):
+ column_names = [new_table.c[c] for c in index['column_names']]
+ indexes.append(Index(index["name"],
+ *column_names,
+ unique=index["unique"]))
+
+ ins = InsertFromSelect(new_table, table.select())
+ migrate_engine.execute(ins)
+ table.drop()
+
+ [index.create(migrate_engine) for index in indexes]
+ new_table.rename(table_name)
+
+
+def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns,
+ **col_name_col_instance):
+ """
+ This method drops UC from table and works for mysql, postgresql and sqlite.
+ In mysql and postgresql we are able to use "alter table" constuction. In
+ sqlite is only one way to drop UC:
+ 1) Create new table with same columns, indexes and constraints
+ (except one that we want to drop).
+ 2) Copy data from old table to new.
+ 3) Drop old table.
+ 4) Rename new table to the name of old table.
+
+ :param migrate_engine: sqlalchemy engine
+ :oaram table_name: name of table that contains uniq constarint.
+ :param uc_name: name of uniq constraint that will be dropped.
+ :param columns: columns that are in uniq constarint.
+ :param col_name_col_instance: constains pair column_name=column_instance.
+ column_instance is instance of Column. These params
+ are required only for columns that have unsupported
+ types by sqlite. For example BigInteger.
+ """
+ if migrate_engine.name in ["mysql", "postgresql"]:
+ meta = MetaData()
+ meta.bind = migrate_engine
+ t = Table(table_name, meta, autoload=True)
+ uc = UniqueConstraint(*fields, table=t, name=uc_name)
+ uc.drop()
+ else:
+ _drop_unique_constraint_in_sqlite(migrate_engine, table_name, uc_name,
+ **col_name_col_instance)
diff --git a/nova/exception.py b/nova/exception.py
index a9a565393..ede512a97 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -25,7 +25,6 @@ SHOULD include dedicated exception logging.
"""
import functools
-import itertools
import webob.exc
@@ -165,20 +164,6 @@ class EC2APIError(NovaException):
super(EC2APIError, self).__init__(outstr)
-class DBError(NovaException):
- """Wraps an implementation specific exception."""
- def __init__(self, inner_exception=None):
- self.inner_exception = inner_exception
- super(DBError, self).__init__(str(inner_exception))
-
-
-class DBDuplicateEntry(DBError):
- """Wraps an implementation specific exception."""
- def __init__(self, columns=[], inner_exception=None):
- self.columns = columns
- super(DBDuplicateEntry, self).__init__(inner_exception)
-
-
class EncryptionFailure(NovaException):
message = _("Failed to encrypt text: %(reason)s")
@@ -227,6 +212,20 @@ class Invalid(NovaException):
code = 400
+class InvalidBDM(Invalid):
+ message = _("Block Device Mapping is Invalid.")
+
+
+class InvalidBDMSnapshot(InvalidBDM):
+ message = _("Block Device Mapping is Invalid: "
+ "failed to get snapshot %(id)s.")
+
+
+class InvalidBDMVolume(InvalidBDM):
+ message = _("Block Device Mapping is Invalid: "
+ "failed to get volume %(id)s.")
+
+
class VolumeUnattached(Invalid):
message = _("Volume %(volume_id)s is not attached to anything")
@@ -1107,3 +1106,12 @@ class CryptoCAFileNotFound(FileNotFound):
class CryptoCRLFileNotFound(FileNotFound):
message = _("The CRL file for %(project)s could not be found")
+
+
+class InstanceRecreateNotSupported(Invalid):
+ message = _('Instance recreate is not implemented by this virt driver.')
+
+
+class ServiceGroupUnavailable(NovaException):
+ message = _("The service from servicegroup driver %(driver) is "
+ "temporarily unavailable.")
diff --git a/nova/image/glance.py b/nova/image/glance.py
index 1a6bba62f..78cfc3dee 100644
--- a/nova/image/glance.py
+++ b/nova/image/glance.py
@@ -258,8 +258,8 @@ class GlanceImageService(object):
return getattr(image_meta, 'direct_url', None)
- def download(self, context, image_id, data):
- """Calls out to Glance for metadata and data and writes data."""
+ def download(self, context, image_id, data=None):
+ """Calls out to Glance for data and writes data."""
if 'file' in CONF.allowed_direct_url_schemes:
location = self.get_location(context, image_id)
o = urlparse.urlparse(location)
@@ -277,8 +277,11 @@ class GlanceImageService(object):
except Exception:
_reraise_translated_image_exception(image_id)
- for chunk in image_chunks:
- data.write(chunk)
+ if data is None:
+ return image_chunks
+ else:
+ for chunk in image_chunks:
+ data.write(chunk)
def create(self, context, image_meta, data=None):
"""Store the image data and return the new image object."""
@@ -485,6 +488,8 @@ def get_remote_image_service(context, image_href):
:returns: a tuple of the form (image_service, image_id)
"""
+ # Calling out to another service may take a while, so lets log this
+ LOG.debug(_("fetching image %s from glance") % image_href)
#NOTE(bcwaldon): If image_href doesn't look like a URI, assume its a
# standalone image ID
if '/' not in str(image_href):
diff --git a/nova/locale/en_US/LC_MESSAGES/nova.po b/nova/locale/en_US/LC_MESSAGES/nova.po
index 7da810ba5..74873ef96 100644
--- a/nova/locale/en_US/LC_MESSAGES/nova.po
+++ b/nova/locale/en_US/LC_MESSAGES/nova.po
@@ -8,8 +8,8 @@ msgstr ""
"Project-Id-Version: Nova\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/nova\n"
"POT-Creation-Date: 2012-10-29 00:01+0000\n"
-"PO-Revision-Date: 2012-09-13 10:30+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"PO-Revision-Date: 2013-01-21 18:28+0000\n"
+"Last-Translator: Jeremy Stanley <fungi@yuggoth.org>\n"
"Language-Team: en_US <LL@li.org>\n"
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
"MIME-Version: 1.0\n"
@@ -137,9 +137,9 @@ msgid "Policy doesn't allow %(action)s to be performed."
msgstr "Policy doesn't allow %(action)s to be performed."
#: nova/exception.py:214
-#, fuzzy, python-format
+#, python-format
msgid "Image %(image_id)s is not active."
-msgstr "Image %(image_id)s could not be found."
+msgstr "Image %(image_id)s is not active."
#: nova/exception.py:218
#, python-format
@@ -195,9 +195,8 @@ msgid "Invalid metadata"
msgstr "Invalid metadata"
#: nova/exception.py:267
-#, fuzzy
msgid "Invalid metadata size"
-msgstr "Invalid metadata key"
+msgstr "Invalid metadata size"
#: nova/exception.py:271
#, python-format
@@ -398,9 +397,9 @@ msgid "Resource could not be found."
msgstr "Resource could not be found."
#: nova/exception.py:438
-#, fuzzy, python-format
+#, python-format
msgid "Could not find driver for compute_driver %(name)s"
-msgstr "Could not find driver for connection_type %(name)s"
+msgstr "Could not find driver for compute_driver %(name)s"
#: nova/exception.py:442
#, python-format
@@ -624,7 +623,7 @@ msgstr ""
#: nova/exception.py:614
#, python-format
msgid "More than one instance is associated with fixed ip address '%(address)s'."
-msgstr ""
+msgstr "More than one instance is associated with fixed ip address '%(address)s'."
#: nova/exception.py:619
#, python-format
@@ -665,14 +664,13 @@ msgid "Floating ip not found for host %(host)s."
msgstr "Floating ip not found for host %(host)s."
#: nova/exception.py:656
-#, fuzzy, python-format
+#, python-format
msgid "Multiple floating ips are found for address %(address)s."
-msgstr "Floating ip not found for address %(address)s."
+msgstr "Multiple floating ips are found for address %(address)s."
#: nova/exception.py:660
-#, fuzzy
msgid "Floating ip pool not found."
-msgstr "No floating_ip found"
+msgstr "Floating ip pool not found."
#: nova/exception.py:665
msgid "Zero floating ips available."
@@ -699,7 +697,7 @@ msgstr "Interface %(interface)s not found."
#: nova/exception.py:686
msgid "Cannot disassociate auto assigined floating ip"
-msgstr ""
+msgstr "Cannot disassociate auto assigined floating ip"
#: nova/exception.py:690
#, python-format
@@ -965,14 +963,14 @@ msgid "Instance %(name)s already exists."
msgstr "Instance %(name)s already exists."
#: nova/exception.py:897
-#, fuzzy, python-format
+#, python-format
msgid "Instance Type with name %(name)s already exists."
-msgstr "Instance Type %(name)s already exists."
+msgstr "Instance Type with name %(name)s already exists."
#: nova/exception.py:901
-#, fuzzy, python-format
+#, python-format
msgid "Instance Type with ID %(flavor_id)s already exists."
-msgstr "Instance Type %(name)s already exists."
+msgstr "Instance Type with ID %(flavor_id)s already exists."
#: nova/exception.py:905
#, python-format
@@ -1194,9 +1192,9 @@ msgid "Instance %(instance_id)s could not be found."
msgstr "Instance %(instance_id)s could not be found."
#: nova/exception.py:1098
-#, fuzzy, python-format
+#, python-format
msgid "Marker %(marker)s could not be found."
-msgstr "Image %(image_id)s could not be found."
+msgstr "Marker %(marker)s could not be found."
#: nova/exception.py:1102
#, python-format
@@ -1811,7 +1809,7 @@ msgstr "Floating ip is not associated."
#: nova/api/ec2/cloud.py:1201
#: nova/api/openstack/compute/contrib/floating_ips.py:100
msgid "Cannot disassociate auto assigned floating ip"
-msgstr ""
+msgstr "Cannot disassociate auto assigned floating ip"
#: nova/api/ec2/cloud.py:1228
msgid "Image must be available"
@@ -2101,9 +2099,8 @@ msgid "Empty body provided in request"
msgstr "Empty body provided in request"
#: nova/api/openstack/wsgi.py:897
-#, fuzzy
msgid "Invalid XML in request body"
-msgstr "Invalid request body"
+msgstr "Invalid XML in request body"
#: nova/api/openstack/wsgi.py:916
#, python-format
@@ -2649,9 +2646,9 @@ msgstr "Floating ip is not associated"
#: nova/api/openstack/compute/contrib/floating_ips.py:151
#: nova/api/openstack/compute/contrib/floating_ips.py:199
-#, fuzzy, python-format
+#, python-format
msgid "Floating ip not found for id %s"
-msgstr "Floating ip not found for id %(id)s."
+msgstr "Floating ip not found for id %s"
#: nova/api/openstack/compute/contrib/floating_ips.py:184
#, python-format
@@ -2691,9 +2688,8 @@ msgstr "l3driver call to add floating ip failed"
#: nova/api/openstack/compute/contrib/floating_ips.py:272
#: nova/api/openstack/compute/contrib/floating_ips.py:301
-#, fuzzy
msgid "floating ip not found"
-msgstr "No floating_ip found"
+msgstr "floating ip not found"
#: nova/api/openstack/compute/contrib/floating_ips.py:275
msgid "Error. Unable to associate floating ip"
@@ -2907,18 +2903,16 @@ msgid "Instance has had its instance_type removed from the DB"
msgstr "Instance has had its instance_type removed from the DB"
#: nova/api/openstack/volume/volumes.py:277
-#, fuzzy
msgid "Invalid request body. 'volume' not found"
-msgstr "Invalid request body"
+msgstr "Invalid request body. 'volume' not found"
#: nova/api/openstack/volume/volumes.py:307
-#, fuzzy
msgid "Invalid request body. 'size' not found"
-msgstr "Invalid request body"
+msgstr "Invalid request body. 'size' not found"
#: nova/api/openstack/volume/volumes.py:317
msgid "Snapshot and image cannot be specified together."
-msgstr ""
+msgstr "Snapshot and image cannot be specified together."
#: nova/api/openstack/volume/volumes.py:361
#, python-format
@@ -2928,11 +2922,11 @@ msgstr "Removing options '%(bad_options)s' from query"
#: nova/api/openstack/volume/contrib/admin_actions.py:72
#, python-format
msgid "Updating status of %(resource)s '%(id)s' to '%(status)s'"
-msgstr ""
+msgstr "Updating status of %(resource)s '%(id)s' to '%(status)s'"
#: nova/api/openstack/volume/contrib/volume_actions.py:90
msgid "No image_name was specified in request."
-msgstr ""
+msgstr "No image_name was specified in request."
#: nova/cloudpipe/pipelib.py:44
msgid "Instance type for vpn instances"
@@ -3031,9 +3025,8 @@ msgid "No host for instance, deleting immediately"
msgstr "No host for instance, deleting immediately"
#: nova/compute/api.py:929
-#, fuzzy
msgid "host for instance is down, deleting from database"
-msgstr "No host for instance, deleting immediately"
+msgstr "host for instance is down, deleting from database"
#: nova/compute/api.py:950
msgid "Going to try to terminate instance"
@@ -3200,7 +3193,7 @@ msgstr "Possibly task preempted."
#: nova/compute/manager.py:230
msgid "Compute driver option required, but not specified"
-msgstr ""
+msgstr "Compute driver option required, but not specified"
#: nova/compute/manager.py:233
#, python-format
@@ -3243,9 +3236,8 @@ msgid "Setting up bdm %s"
msgstr "Setting up bdm %s"
#: nova/compute/manager.py:518
-#, fuzzy
msgid "Failed to dealloc network for deleted instance"
-msgstr "Deallocating network for instance"
+msgstr "Failed to dealloc network for deleted instance"
#: nova/compute/manager.py:550
#, python-format
@@ -3343,7 +3335,7 @@ msgstr "terminating bdm %s"
#: nova/compute/manager.py:927
#, python-format
msgid "Ignoring volume cleanup failure due to %s"
-msgstr ""
+msgstr "Ignoring volume cleanup failure due to %s"
#: nova/compute/manager.py:967 nova/compute/manager.py:1808
#: nova/compute/manager.py:2993
@@ -3461,7 +3453,7 @@ msgstr "Migrating"
#: nova/compute/manager.py:1805
#, python-format
msgid "Failed to rollback quota for failed finish_resize: %(qr_error)s"
-msgstr ""
+msgstr "Failed to rollback quota for failed finish_resize: %(qr_error)s"
#: nova/compute/manager.py:1861
msgid "Pausing"
@@ -3732,31 +3724,35 @@ msgid ""
"Host field should be not be set on the instance until resources have been"
" claimed."
msgstr ""
+"Host field should be not be set on the instance until resources have been"
+" claimed."
#: nova/compute/resource_tracker.py:168
-#, fuzzy, python-format
+#, python-format
msgid ""
"Attempting claim: memory %(memory_mb)d MB, disk %(disk_gb)d GB, VCPUs "
"%(vcpus)d"
msgstr ""
-"Attempting claim: memory %(memory_mb)d MB, disk %(disk_gb)d GB, mem limit"
-" %(memory_mb_limit)s"
+"Attempting claim: memory %(memory_mb)d MB, disk %(disk_gb)d GB, VCPUs "
+"%(vcpus)d"
#: nova/compute/resource_tracker.py:210
-#, fuzzy, python-format
+#, python-format
msgid ""
"Total memory: %(total_mem)d MB, used: %(used_mem)d MB, free: %(free_mem)d"
" MB"
-msgstr "Total memory: %(total_mem)d MB, used: %(used_mem)d MB, free: %(free_mem)d"
+msgstr
+"Total memory: %(total_mem)d MB, used: %(used_mem)d MB, free: %(free_mem)d"
+" MB"
#: nova/compute/resource_tracker.py:219
msgid "Memory limit not specified, defaulting to unlimited"
-msgstr ""
+msgstr "Memory limit not specified, defaulting to unlimited"
#: nova/compute/resource_tracker.py:225
-#, fuzzy, python-format
+#, python-format
msgid "Memory limit: %(memory_mb_limit)d MB, free: %(free_ram_mb)d MB"
-msgstr "Limit: %(memory_mb_limit)d MB, free: %(free_ram_mb)d"
+msgstr "Memory limit: %(memory_mb_limit)d MB, free: %(free_ram_mb)d MB"
#: nova/compute/resource_tracker.py:232
#, python-format
@@ -3768,20 +3764,22 @@ msgstr ""
"memory %(memory_mb)d MB"
#: nova/compute/resource_tracker.py:241
-#, fuzzy, python-format
+#, python-format
msgid ""
"Total disk: %(total_disk)d GB, used: %(used_disk)d GB, free: "
"%(free_disk)d GB"
-msgstr "Total memory: %(total_mem)d MB, used: %(used_mem)d MB, free: %(free_mem)d"
+msgstr ""
+"Total disk: %(total_disk)d GB, used: %(used_disk)d GB, free: "
+"%(free_disk)d GB"
#: nova/compute/resource_tracker.py:250
msgid "Disk limit not specified, defaulting to unlimited"
-msgstr ""
+msgstr "Disk limit not specified, defaulting to unlimited"
#: nova/compute/resource_tracker.py:256
-#, fuzzy, python-format
+#, python-format
msgid "Disk limit: %(disk_gb_limit)d GB, free: %(free_disk_gb)d GB"
-msgstr "Limit: %(memory_mb_limit)d MB, free: %(free_ram_mb)d"
+msgstr "Disk limit: %(disk_gb_limit)d GB, free: %(free_disk_gb)d GB"
#: nova/compute/resource_tracker.py:262
#, python-format
@@ -3795,25 +3793,25 @@ msgstr ""
#: nova/compute/resource_tracker.py:273
#, python-format
msgid "Total VCPUs: %(total_vcpus)d, used: %(used_vcpus)d"
-msgstr ""
+msgstr "Total VCPUs: %(total_vcpus)d, used: %(used_vcpus)d"
#: nova/compute/resource_tracker.py:280
msgid "VCPU limit not specified, defaulting to unlimited"
-msgstr ""
+msgstr "VCPU limit not specified, defaulting to unlimited"
#: nova/compute/resource_tracker.py:284
#, python-format
msgid "CPU limit: %(vcpu_limit)d"
-msgstr ""
+msgstr "CPU limit: %(vcpu_limit)d"
#: nova/compute/resource_tracker.py:291
-#, fuzzy, python-format
+#, python-format
msgid ""
"Unable to claim resources. Free CPU %(free_vcpus)d < requested CPU "
"%(vcpus)d"
msgstr ""
-"Unable to claim resources. Free disk %(free_disk_gb)d GB < requested "
-"disk %(disk_gb)d GB"
+"Unable to claim resources. Free CPU %(free_vcpus)d < requested CPU "
+"%(vcpus)d"
#: nova/compute/resource_tracker.py:310
#, python-format
@@ -3851,40 +3849,40 @@ msgstr "No service record for host %s"
#: nova/compute/resource_tracker.py:435
#, python-format
msgid "Hypervisor: free ram (MB): %s"
-msgstr ""
+msgstr "Hypervisor: free ram (MB): %s"
#: nova/compute/resource_tracker.py:436
-#, fuzzy, python-format
+#, python-format
msgid "Hypervisor: free disk (GB): %s"
-msgstr "free_disk_gb: %s"
+msgstr "Hypervisor: free disk (GB): %s"
#: nova/compute/resource_tracker.py:441
#, python-format
msgid "Hypervisor: free VCPUs: %s"
-msgstr ""
+msgstr "Hypervisor: free VCPUs: %s"
#: nova/compute/resource_tracker.py:443
msgid "Hypervisor: VCPU information unavailable"
-msgstr ""
+msgstr "Hypervisor: VCPU information unavailable"
#: nova/compute/resource_tracker.py:450
-#, fuzzy, python-format
+#, python-format
msgid "Free ram (MB): %s"
-msgstr "free_ram_mb: %s"
+msgstr "Free ram (MB): %s"
#: nova/compute/resource_tracker.py:451
-#, fuzzy, python-format
+#, python-format
msgid "Free disk (GB): %s"
-msgstr "free_disk_gb: %s"
+msgstr "Free disk (GB): %s"
#: nova/compute/resource_tracker.py:456
-#, fuzzy, python-format
+#, python-format
msgid "Free VCPUS: %s"
-msgstr "free_disk_gb: %s"
+msgstr "Free VCPUS: %s"
#: nova/compute/resource_tracker.py:458
msgid "Free VCPU information unavailable"
-msgstr ""
+msgstr "Free VCPU information unavailable"
#: nova/compute/resource_tracker.py:533
#, python-format
@@ -4302,34 +4300,32 @@ msgid "Failed to update usages deallocating floating IP"
msgstr "Failed to update usages deallocating floating IP"
#: nova/network/manager.py:650
-#, fuzzy, python-format
+#, python-format
msgid "Starting migration network for instance %(instance_uuid)s"
-msgstr "Destroying VDIs for Instance %(instance_uuid)s"
+msgstr "Starting migration network for instance %(instance_uuid)s"
#: nova/network/manager.py:657
-#, fuzzy, python-format
+#, python-format
msgid ""
"Floating ip address |%(address)s| no longer belongs to instance "
"%(instance_uuid)s. Will notmigrate it "
msgstr ""
-"Fixed IP address %(address)s is already in use on instance "
-"%(instance_uuid)s."
+"Floating ip address |%(address)s| no longer belongs to instance "
+"%(instance_uuid)s. Will notmigrate it "
#: nova/network/manager.py:678
-#, fuzzy, python-format
+#, python-format
msgid "Finishing migration network for instance %(instance_uuid)s"
-msgstr ""
-"Automatically confirming migration %(migration_id)s for instance "
-"%(instance_uuid)s"
+msgstr "Finishing migration network for instance %(instance_uuid)s"
#: nova/network/manager.py:686
-#, fuzzy, python-format
+#, python-format
msgid ""
"Floating ip address |%(address)s| no longer belongs to instance "
"%(instance_uuid)s. Will notsetup it."
msgstr ""
-"Fixed IP address %(address)s is already in use on instance "
-"%(instance_uuid)s."
+"Floating ip address |%(address)s| no longer belongs to instance "
+"%(instance_uuid)s. Will notsetup it."
#: nova/network/manager.py:733
#, python-format
@@ -4544,9 +4540,9 @@ msgid "validate_networks() for %s"
msgstr "validate_networks() for %s"
#: nova/network/quantumv2/api.py:438
-#, fuzzy, python-format
+#, python-format
msgid "Multiple floating IP pools matches found for name '%s'"
-msgstr "Floating ip not found for id %(id)s."
+msgstr "Multiple floating IP pools matches found for name '%s'"
#: nova/openstack/common/lockutils.py:98
#, python-format
@@ -4564,9 +4560,9 @@ msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..."
msgstr "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..."
#: nova/openstack/common/lockutils.py:216
-#, fuzzy, python-format
+#, python-format
msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..."
-msgstr "Got file lock \"%(lock)s\" for method \"%(method)s\"..."
+msgstr "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..."
#: nova/openstack/common/log.py:298
#, python-format
@@ -4574,9 +4570,9 @@ msgid "syslog facility must be one of: %s"
msgstr "syslog facility must be one of: %s"
#: nova/openstack/common/policy.py:394
-#, fuzzy, python-format
+#, python-format
msgid "Failed to understand rule %(rule)s"
-msgstr "Failed to understand rule %(match)r"
+msgstr "Failed to understand rule %(rule)s"
#: nova/openstack/common/policy.py:404
#, python-format
@@ -4584,9 +4580,9 @@ msgid "No handler for matches of kind %s"
msgstr "No handler for matches of kind %s"
#: nova/openstack/common/policy.py:679
-#, fuzzy, python-format
+#, python-format
msgid "Failed to understand rule %(rule)r"
-msgstr "Failed to understand rule %(match)r"
+msgstr "Failed to understand rule %(rule)r"
#: nova/openstack/common/notifier/api.py:125
#, python-format
@@ -4949,7 +4945,7 @@ msgstr "Could not find another compute"
#: nova/scheduler/driver.py:64
msgid "Exception during scheduler.run_instance"
-msgstr ""
+msgstr "Exception during scheduler.run_instance"
#: nova/scheduler/driver.py:68 nova/scheduler/manager.py:181
#, python-format
@@ -5064,9 +5060,9 @@ msgid "No service for compute ID %s"
msgstr "No service for compute ID %s"
#: nova/scheduler/manager.py:79
-#, fuzzy, python-format
+#, python-format
msgid "Failed to schedule create_volume: %(ex)s"
-msgstr "Failed to schedule_%(method)s: %(ex)s"
+msgstr "Failed to schedule create_volume: %(ex)s"
#: nova/scheduler/manager.py:165
#, python-format
@@ -5127,13 +5123,13 @@ msgid "VCPUs not set; assuming CPU collection broken"
msgstr "VCPUs not set; assuming CPU collection broken"
#: nova/scheduler/filters/disk_filter.py:47
-#, fuzzy, python-format
+#, python-format
msgid ""
"%(host_state)s does not have %(requested_disk)s MB usable disk, it only "
"has %(usable_disk_mb)s MB usable disk."
msgstr ""
-"%(host_state)s does not have %(requested_ram)s MB usable ram, it only has"
-" %(usable_ram)s MB usable ram."
+"%(host_state)s does not have %(requested_disk)s MB usable disk, it only "
+"has %(usable_disk_mb)s MB usable disk."
#: nova/scheduler/filters/image_props_filter.py:48
#, python-format
@@ -5173,6 +5169,8 @@ msgid ""
"%(host_state)s fails I/O ops check: Max IOs per host is set to "
"%(max_io_ops)s"
msgstr ""
+"%(host_state)s fails I/O ops check: Max IOs per host is set to "
+"%(max_io_ops)s"
#: nova/scheduler/filters/num_instances_filter.py:39
#, python-format
@@ -5180,6 +5178,8 @@ msgid ""
"%(host_state)s fails num_instances check: Max instances per host is set "
"to %(max_instances)s"
msgstr ""
+"%(host_state)s fails num_instances check: Max instances per host is set "
+"to %(max_instances)s"
#: nova/scheduler/filters/ram_filter.py:46
#, python-format
@@ -5499,11 +5499,13 @@ msgid ""
"Length mismatch: %(result)s\n"
"%(expected)s."
msgstr ""
+"Length mismatch: %(result)s\n"
+"%(expected)s."
#: nova/tests/integrated/test_api_samples.py:164
-#, fuzzy, python-format
+#, python-format
msgid "Result: %(res_obj)s not in %(expected)s."
-msgstr "Result: %(result)s is not a dict."
+msgstr "Result: %(res_obj)s not in %(expected)s."
#: nova/tests/integrated/test_api_samples.py:181
#: nova/tests/integrated/test_api_samples.py:194
@@ -5911,7 +5913,7 @@ msgstr "injected file path not valid"
#: nova/virt/disk/api.py:518
msgid "Not implemented on Windows"
-msgstr ""
+msgstr "Not implemented on Windows"
#: nova/virt/disk/api.py:552
#, python-format
@@ -5941,7 +5943,7 @@ msgstr "unknown guestmount error"
#: nova/virt/disk/guestfs.py:119
#, python-format
msgid "Failed to umount image at %s, guestmount was still running after 10s"
-msgstr ""
+msgstr "Failed to umount image at %s, guestmount was still running after 10s"
#: nova/virt/disk/loop.py:31
#, python-format
@@ -6620,9 +6622,8 @@ msgid "Failed to cleanup directory %(target)s: %(e)s"
msgstr "Failed to cleanup directory %(target)s: %(e)s"
#: nova/virt/libvirt/driver.py:729
-#, fuzzy
msgid "During detach_volume, instance disappeared."
-msgstr "During wait destroy, instance disappeared."
+msgstr "During detach_volume, instance disappeared."
#: nova/virt/libvirt/driver.py:739
msgid "attaching LXC block device"
@@ -7017,7 +7018,7 @@ msgstr "Verification complete"
#: nova/virt/libvirt/snapshots.py:83 nova/virt/libvirt/snapshots.py:86
#: nova/virt/libvirt/snapshots.py:89
msgid "LVM snapshots not implemented"
-msgstr ""
+msgstr "LVM snapshots not implemented"
#: nova/virt/libvirt/utils.py:131
#, python-format
@@ -7042,11 +7043,11 @@ msgstr ""
#: nova/virt/libvirt/utils.py:187
#, python-format
msgid "Path %s must be LVM logical volume"
-msgstr ""
+msgstr "Path %s must be LVM logical volume"
#: nova/virt/libvirt/utils.py:410
msgid "Can't retrieve root device path from instance libvirt configuration"
-msgstr ""
+msgstr "Can't retrieve root device path from instance libvirt configuration"
#: nova/virt/libvirt/utils.py:499
#, python-format
@@ -7145,7 +7146,7 @@ msgstr "No space left on any volume group"
#: nova/virt/powervm/exception.py:45
#, python-format
msgid "Operation '%(operation)s' on LPAR '%(instance_name)s' timed out"
-msgstr ""
+msgstr "Operation '%(operation)s' on LPAR '%(instance_name)s' timed out"
#: nova/virt/powervm/exception.py:50
msgid "Image creation failed on PowerVM"
@@ -7260,17 +7261,16 @@ msgid "Could not create logical volume. No space left on any volume group."
msgstr "Could not create logical volume. No space left on any volume group."
#: nova/virt/powervm/operator.py:554
-#, fuzzy
msgid "Unable to get checksum"
-msgstr "Unable to get console"
+msgstr "Unable to get checksum"
#: nova/virt/powervm/operator.py:557
msgid "Image checksums do not match"
-msgstr ""
+msgstr "Image checksums do not match"
#: nova/virt/powervm/operator.py:582
msgid "Uncompressed image file not found"
-msgstr ""
+msgstr "Uncompressed image file not found"
#: nova/virt/vmwareapi/driver.py:111
msgid ""
@@ -7891,9 +7891,9 @@ msgid "Unable to get SR for this host: %s"
msgstr "Unable to get SR for this host: %s"
#: nova/virt/xenapi/host.py:190
-#, fuzzy, python-format
+#, python-format
msgid "Failed to extract instance support from %s"
-msgstr "Failed to terminate instance"
+msgstr "Failed to extract instance support from %s"
#: nova/virt/xenapi/host.py:207
msgid "Unable to get updated status"
@@ -8048,9 +8048,8 @@ msgstr ""
" on %(sr_ref)s."
#: nova/virt/xenapi/vm_utils.py:454
-#, fuzzy
msgid "SR not present and could not be introduced"
-msgstr "Instance could not be found"
+msgstr "SR not present and could not be introduced"
#: nova/virt/xenapi/vm_utils.py:555
#, python-format
@@ -8110,9 +8109,9 @@ msgid "download_vhd failed: %r"
msgstr "download_vhd failed: %r"
#: nova/virt/xenapi/vm_utils.py:1028
-#, fuzzy, python-format
+#, python-format
msgid "Invalid value '%s' for xenapi_torrent_images"
-msgstr "Invalid value '%s' for force. "
+msgstr "Invalid value '%s' for xenapi_torrent_images"
#: nova/virt/xenapi/vm_utils.py:1039
#, python-format
@@ -8485,9 +8484,9 @@ msgid "Failed to transfer vhd to new host"
msgstr "Failed to transfer vhd to new host"
#: nova/virt/xenapi/vmops.py:655
-#, fuzzy, python-format
+#, python-format
msgid "Resizing down VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB"
-msgstr "Resizing down VDI %(cow_uuid)s from %(old_gb)dGB to %(new_gb)dGB"
+msgstr "Resizing down VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB"
#: nova/virt/xenapi/vmops.py:779
#, python-format
@@ -8700,14 +8699,14 @@ msgid "Unable to find vbd for vdi %s"
msgstr "Unable to find vbd for vdi %s"
#: nova/virt/xenapi/volume_utils.py:281
-#, fuzzy, python-format
+#, python-format
msgid "Unable to obtain target information %(mountpoint)s"
-msgstr "Unable to obtain target information %(data)s, %(mountpoint)s"
+msgstr "Unable to obtain target information %(mountpoint)s"
#: nova/virt/xenapi/volume_utils.py:309
-#, fuzzy, python-format
+#, python-format
msgid "Unable to obtain target information %(connection_data)s"
-msgstr "Unable to obtain target information %(data)s, %(mountpoint)s"
+msgstr "Unable to obtain target information %(connection_data)s"
#: nova/virt/xenapi/volume_utils.py:335
#, python-format
@@ -8863,12 +8862,11 @@ msgstr ""
#: nova/volume/api.py:145
msgid "Size of specified image is larger than volume size."
-msgstr ""
+msgstr "Size of specified image is larger than volume size."
#: nova/volume/api.py:215
-#, fuzzy
msgid "Failed to update quota for deleting volume."
-msgstr "Failed to update usages deleting volume"
+msgstr "Failed to update quota for deleting volume."
#: nova/volume/api.py:223
msgid "Volume status must be available or error"
@@ -8888,14 +8886,12 @@ msgid "Volume Snapshot status must be available or error"
msgstr "Volume Snapshot status must be available or error"
#: nova/volume/api.py:479
-#, fuzzy
msgid "Volume status must be available/in-use."
-msgstr "Volume status must be available or error"
+msgstr "Volume status must be available/in-use."
#: nova/volume/api.py:482
-#, fuzzy
msgid "Volume status is in-use."
-msgstr "volume %s: volume is busy"
+msgstr "Volume status is in-use."
#: nova/volume/cinder.py:68
#, python-format
@@ -8963,27 +8959,27 @@ msgstr "rbd has no pool %s"
#: nova/volume/driver.py:740
#, python-format
msgid "Image %s is not stored in rbd"
-msgstr ""
+msgstr "Image %s is not stored in rbd"
#: nova/volume/driver.py:744
#, python-format
msgid "Image %s has blank components"
-msgstr ""
+msgstr "Image %s has blank components"
#: nova/volume/driver.py:747
#, python-format
msgid "Image %s is not an rbd snapshot"
-msgstr ""
+msgstr "Image %s is not an rbd snapshot"
#: nova/volume/driver.py:762
#, python-format
msgid "%s is in a different ceph cluster"
-msgstr ""
+msgstr "%s is in a different ceph cluster"
#: nova/volume/driver.py:773
-#, fuzzy, python-format
+#, python-format
msgid "Unable to read image %s"
-msgstr "Unable to locate volume %s"
+msgstr "Unable to read image %s"
#: nova/volume/driver.py:815
#, python-format
@@ -9015,6 +9011,8 @@ msgid ""
"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure "
"your tgtd config file contains 'include %(volumes_dir)s/*'"
msgstr ""
+"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure "
+"your tgtd config file contains 'include %(volumes_dir)s/*'"
#: nova/volume/iscsi.py:154 nova/volume/iscsi.py:196
#, python-format
@@ -9042,12 +9040,12 @@ msgstr "volume %s: skipping export"
#: nova/volume/manager.py:109
msgid "Resuming any in progress delete operations"
-msgstr ""
+msgstr "Resuming any in progress delete operations"
#: nova/volume/manager.py:112
-#, fuzzy, python-format
+#, python-format
msgid "Resuming delete on volume: %s"
-msgstr "Removing volume: %s"
+msgstr "Resuming delete on volume: %s"
#: nova/volume/manager.py:121
#, python-format
@@ -9130,12 +9128,12 @@ msgstr "snapshot %s: deleted successfully"
#: nova/volume/manager.py:323
#, python-format
msgid "Downloaded image %(image_id)s to %(volume_id)s successfully"
-msgstr ""
+msgstr "Downloaded image %(image_id)s to %(volume_id)s successfully"
#: nova/volume/manager.py:342
-#, fuzzy, python-format
+#, python-format
msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully"
-msgstr "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s"
+msgstr "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully"
#: nova/volume/manager.py:426
msgid "Checking volume capabilities"
@@ -9636,7 +9634,7 @@ msgstr ""
#: nova/volume/storwize_svc.py:373
msgid "If compression is set to True, rsize must also be set (not equal to -1)"
-msgstr ""
+msgstr "If compression is set to True, rsize must also be set (not equal to -1)"
#: nova/volume/storwize_svc.py:378
msgid "enter: do_setup"
@@ -10194,7 +10192,7 @@ msgid "Failed to reach backend %d"
msgstr "Failed to reach backend %d"
#: nova/volume/xensm.py:102
-#, fuzzy, python-format
+#, python-format
msgid "XenSMDriver requires xenapi connection, using %s"
msgstr "XenSMDriver requires xenapi connection"
diff --git a/nova/locale/nova.pot b/nova/locale/nova.pot
index 347b98733..567ba7de8 100644
--- a/nova/locale/nova.pot
+++ b/nova/locale/nova.pot
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova 2013.1\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2013-01-06 00:03+0000\n"
+"POT-Creation-Date: 2013-02-11 00:02+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
@@ -17,64 +17,69 @@ msgstr ""
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 0.9.6\n"
+#: nova/block_device.py:167 nova/virt/hyperv/basevolumeutils.py:96
+#, python-format
+msgid "block_device_list %s"
+msgstr ""
+
#: nova/context.py:62
#, python-format
msgid "Arguments dropped when creating context: %s"
msgstr ""
-#: nova/context.py:100
+#: nova/context.py:102
#, python-format
msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r"
msgstr ""
-#: nova/crypto.py:46
+#: nova/crypto.py:47
msgid "Filename of root CA"
msgstr ""
-#: nova/crypto.py:49
+#: nova/crypto.py:50
msgid "Filename of private key"
msgstr ""
-#: nova/crypto.py:52
+#: nova/crypto.py:53
msgid "Filename of root Certificate Revocation List"
msgstr ""
-#: nova/crypto.py:55
+#: nova/crypto.py:56
msgid "Where we keep our keys"
msgstr ""
-#: nova/crypto.py:58
+#: nova/crypto.py:59
msgid "Where we keep our root CA"
msgstr ""
-#: nova/crypto.py:61
+#: nova/crypto.py:62
msgid "Should we use a CA for each project?"
msgstr ""
-#: nova/crypto.py:65
+#: nova/crypto.py:66
#, python-format
msgid "Subject for certificate for users, %s for project, user, timestamp"
msgstr ""
-#: nova/crypto.py:70
+#: nova/crypto.py:71
#, python-format
msgid "Subject for certificate for projects, %s for project, timestamp"
msgstr ""
-#: nova/crypto.py:300
+#: nova/crypto.py:332
msgid "Failed to write inbound.csr"
msgstr ""
-#: nova/crypto.py:303
+#: nova/crypto.py:335
#, python-format
msgid "Flags path: %s"
msgstr ""
-#: nova/exception.py:66
+#: nova/exception.py:65
msgid "Unexpected error while running command."
msgstr ""
-#: nova/exception.py:69
+#: nova/exception.py:68
#, python-format
msgid ""
"%(description)s\n"
@@ -84,240 +89,271 @@ msgid ""
"Stderr: %(stderr)r"
msgstr ""
-#: nova/exception.py:126
+#: nova/exception.py:123
msgid "An unknown exception occurred."
msgstr ""
-#: nova/exception.py:147 nova/openstack/common/rpc/common.py:47
+#: nova/exception.py:144 nova/openstack/common/rpc/common.py:89
msgid "Exception in string format operation"
msgstr ""
-#: nova/exception.py:161
+#: nova/exception.py:158
msgid "Unknown"
msgstr ""
-#: nova/exception.py:185
-msgid "Failed to decrypt text"
+#: nova/exception.py:168
+#, python-format
+msgid "Failed to encrypt text: %(reason)s"
+msgstr ""
+
+#: nova/exception.py:172
+#, python-format
+msgid "Failed to decrypt text: %(reason)s"
msgstr ""
-#: nova/exception.py:189
+#: nova/exception.py:176
msgid "Virtual Interface creation failed"
msgstr ""
-#: nova/exception.py:193
+#: nova/exception.py:180
msgid "5 attempts to create virtual interfacewith unique mac address failed"
msgstr ""
-#: nova/exception.py:198
+#: nova/exception.py:185
#, python-format
msgid "Connection to glance host %(host)s:%(port)s failed: %(reason)s"
msgstr ""
-#: nova/exception.py:203
+#: nova/exception.py:190
msgid "Not authorized."
msgstr ""
-#: nova/exception.py:208
+#: nova/exception.py:195
msgid "User does not have admin privileges"
msgstr ""
-#: nova/exception.py:212
+#: nova/exception.py:199
#, python-format
msgid "Policy doesn't allow %(action)s to be performed."
msgstr ""
-#: nova/exception.py:216
+#: nova/exception.py:203
#, python-format
msgid "Image %(image_id)s is not active."
msgstr ""
-#: nova/exception.py:220
+#: nova/exception.py:207
#, python-format
msgid "Not authorized for image %(image_id)s."
msgstr ""
-#: nova/exception.py:224
+#: nova/exception.py:211
msgid "Unacceptable parameters."
msgstr ""
-#: nova/exception.py:229
+#: nova/exception.py:216
+msgid "Block Device Mapping is Invalid."
+msgstr ""
+
+#: nova/exception.py:220
+#, python-format
+msgid "Block Device Mapping is Invalid: failed to get snapshot %(id)s."
+msgstr ""
+
+#: nova/exception.py:225
+#, python-format
+msgid "Block Device Mapping is Invalid: failed to get volume %(id)s."
+msgstr ""
+
+#: nova/exception.py:230
#, python-format
msgid "Volume %(volume_id)s is not attached to anything"
msgstr ""
-#: nova/exception.py:233 nova/api/ec2/cloud.py:447 nova/api/ec2/cloud.py:472
-#: nova/api/openstack/compute/contrib/keypairs.py:98 nova/compute/api.py:2321
+#: nova/exception.py:234 nova/api/ec2/cloud.py:461
+#: nova/api/openstack/compute/contrib/keypairs.py:98 nova/compute/api.py:2628
msgid "Keypair data is invalid"
msgstr ""
-#: nova/exception.py:237
+#: nova/exception.py:238
msgid "The request is invalid."
msgstr ""
-#: nova/exception.py:241
+#: nova/exception.py:242
msgid "Invalid input received"
msgstr ""
-#: nova/exception.py:245
+#: nova/exception.py:246
msgid "Invalid volume"
msgstr ""
-#: nova/exception.py:249 nova/api/openstack/compute/servers.py:1307
+#: nova/exception.py:250 nova/api/openstack/compute/servers.py:1338
#: nova/api/openstack/compute/contrib/admin_actions.py:242
msgid "Invalid metadata"
msgstr ""
-#: nova/exception.py:253
+#: nova/exception.py:254
msgid "Invalid metadata size"
msgstr ""
-#: nova/exception.py:257
+#: nova/exception.py:258
#, python-format
msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s"
msgstr ""
-#: nova/exception.py:261 nova/api/ec2/cloud.py:629
+#: nova/exception.py:262 nova/api/ec2/cloud.py:615
#, python-format
msgid "Invalid IP protocol %(protocol)s."
msgstr ""
-#: nova/exception.py:265
+#: nova/exception.py:266
#, python-format
msgid "Invalid content type %(content_type)s."
msgstr ""
-#: nova/exception.py:269
+#: nova/exception.py:270
#, python-format
msgid "Invalid cidr %(cidr)s."
msgstr ""
-#: nova/exception.py:273
+#: nova/exception.py:274 nova/openstack/common/db/sqlalchemy/session.py:353
msgid "Invalid Parameter: Unicode is not supported by the current database."
msgstr ""
-#: nova/exception.py:280
+#: nova/exception.py:281
#, python-format
msgid "%(err)s"
msgstr ""
-#: nova/exception.py:284
+#: nova/exception.py:285
#, python-format
msgid ""
"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason:"
" %(reason)s."
msgstr ""
-#: nova/exception.py:289
+#: nova/exception.py:290
#, python-format
msgid "Group not valid. Reason: %(reason)s"
msgstr ""
-#: nova/exception.py:293
+#: nova/exception.py:294 nova/openstack/common/db/sqlalchemy/utils.py:33
msgid "Sort key supplied was not valid."
msgstr ""
-#: nova/exception.py:297
+#: nova/exception.py:298
#, python-format
msgid ""
"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while"
" the instance is in this state."
msgstr ""
-#: nova/exception.py:302
+#: nova/exception.py:303
#, python-format
msgid "Instance %(instance_id)s is not running."
msgstr ""
-#: nova/exception.py:306
+#: nova/exception.py:307
#, python-format
msgid "Instance %(instance_id)s is not in rescue mode"
msgstr ""
-#: nova/exception.py:310
+#: nova/exception.py:311
#, python-format
msgid "Instance %(instance_id)s is not ready"
msgstr ""
-#: nova/exception.py:314
+#: nova/exception.py:315
msgid "Failed to suspend instance"
msgstr ""
-#: nova/exception.py:318
-msgid "Failed to resume server"
+#: nova/exception.py:319
+#, python-format
+msgid "Failed to resume instance: %(reason)s."
+msgstr ""
+
+#: nova/exception.py:323
+#, python-format
+msgid "Failed to power on instance: %(reason)s."
msgstr ""
-#: nova/exception.py:322
+#: nova/exception.py:327
+#, python-format
+msgid "Failed to power off instance: %(reason)s."
+msgstr ""
+
+#: nova/exception.py:331
msgid "Failed to reboot instance"
msgstr ""
-#: nova/exception.py:326
+#: nova/exception.py:335
msgid "Failed to terminate instance"
msgstr ""
-#: nova/exception.py:330
+#: nova/exception.py:339
msgid "Service is unavailable at this time."
msgstr ""
-#: nova/exception.py:334
+#: nova/exception.py:343
msgid "Insufficient compute resources."
msgstr ""
-#: nova/exception.py:338
+#: nova/exception.py:347
msgid "Compute service is unavailable at this time."
msgstr ""
-#: nova/exception.py:342
+#: nova/exception.py:351
#, python-format
msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)."
msgstr ""
-#: nova/exception.py:347
+#: nova/exception.py:356
msgid "The supplied hypervisor type of is invalid."
msgstr ""
-#: nova/exception.py:351
+#: nova/exception.py:360
msgid "The instance requires a newer hypervisor version than has been provided."
msgstr ""
-#: nova/exception.py:356
+#: nova/exception.py:365
#, python-format
msgid ""
"The supplied disk path (%(path)s) already exists, it is expected not to "
"exist."
msgstr ""
-#: nova/exception.py:361
+#: nova/exception.py:370
#, python-format
msgid "The supplied device path (%(path)s) is invalid."
msgstr ""
-#: nova/exception.py:365
+#: nova/exception.py:374
#, python-format
msgid "The supplied device path (%(path)s) is in use."
msgstr ""
-#: nova/exception.py:369
+#: nova/exception.py:378
#, python-format
msgid "The supplied device (%(device)s) is busy."
msgstr ""
-#: nova/exception.py:373
+#: nova/exception.py:382
msgid "Unacceptable CPU info"
msgstr ""
-#: nova/exception.py:377
+#: nova/exception.py:386
#, python-format
msgid "%(address)s is not a valid IP v4/6 address."
msgstr ""
-#: nova/exception.py:381
+#: nova/exception.py:390
#, python-format
msgid ""
"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN "
"tag is %(tag)s, but the one associated with the port group is %(pgroup)s."
msgstr ""
-#: nova/exception.py:387
+#: nova/exception.py:396
#, python-format
msgid ""
"vSwitch which contains the port group %(bridge)s is not associated with "
@@ -325,80 +361,85 @@ msgid ""
"one associated is %(actual)s."
msgstr ""
-#: nova/exception.py:394
+#: nova/exception.py:403
#, python-format
msgid "Disk format %(disk_format)s is not acceptable"
msgstr ""
-#: nova/exception.py:398
+#: nova/exception.py:407
#, python-format
msgid "Image %(image_id)s is unacceptable: %(reason)s"
msgstr ""
-#: nova/exception.py:402
+#: nova/exception.py:411
#, python-format
msgid "Instance %(instance_id)s is unacceptable: %(reason)s"
msgstr ""
-#: nova/exception.py:406
+#: nova/exception.py:415
#, python-format
msgid "Ec2 id %(ec2_id)s is unacceptable."
msgstr ""
-#: nova/exception.py:410
+#: nova/exception.py:419
#, python-format
msgid "Expected a uuid but received %(uuid)s."
msgstr ""
-#: nova/exception.py:414
+#: nova/exception.py:423
+#, python-format
+msgid "Unexpected argument for periodic task creation: %(arg)s."
+msgstr ""
+
+#: nova/exception.py:427
msgid "Constraint not met."
msgstr ""
-#: nova/exception.py:419
+#: nova/exception.py:432
msgid "Resource could not be found."
msgstr ""
-#: nova/exception.py:424
+#: nova/exception.py:437
#, python-format
msgid "No agent-build associated with id %(id)s."
msgstr ""
-#: nova/exception.py:428
+#: nova/exception.py:441
#, python-format
msgid "Volume %(volume_id)s could not be found."
msgstr ""
-#: nova/exception.py:432
+#: nova/exception.py:445
#, python-format
msgid "Snapshot %(snapshot_id)s could not be found."
msgstr ""
-#: nova/exception.py:436
+#: nova/exception.py:449
#, python-format
msgid "No target id found for volume %(volume_id)s."
msgstr ""
-#: nova/exception.py:440
+#: nova/exception.py:453
#, python-format
msgid "No disk at %(location)s"
msgstr ""
-#: nova/exception.py:444
+#: nova/exception.py:457
#, python-format
msgid "Could not find a handler for %(driver_type)s volume."
msgstr ""
-#: nova/exception.py:448
+#: nova/exception.py:461
#, python-format
msgid "Invalid image href %(image_href)s."
msgstr ""
-#: nova/exception.py:452
+#: nova/exception.py:465
#, python-format
msgid "Image %(image_id)s could not be found."
msgstr ""
-#: nova/exception.py:456
+#: nova/exception.py:469
#, python-format
msgid ""
"Image %(image_id)s could not be found. The nova EC2 API assigns image ids"
@@ -406,725 +447,751 @@ msgid ""
"image ids since adding this image?"
msgstr ""
-#: nova/exception.py:463
+#: nova/exception.py:476
#, python-format
msgid "Project %(project_id)s could not be found."
msgstr ""
-#: nova/exception.py:467
+#: nova/exception.py:480
msgid "Cannot find SR to read/write VDI."
msgstr ""
-#: nova/exception.py:471
+#: nova/exception.py:484
#, python-format
msgid "Network %(network_id)s is duplicated."
msgstr ""
-#: nova/exception.py:475
+#: nova/exception.py:488
#, python-format
msgid "Network %(network_id)s is still in use."
msgstr ""
-#: nova/exception.py:479
+#: nova/exception.py:492
#, python-format
msgid "%(req)s is required to create a network."
msgstr ""
-#: nova/exception.py:483
+#: nova/exception.py:496
#, python-format
msgid "Network %(network_id)s could not be found."
msgstr ""
-#: nova/exception.py:487
+#: nova/exception.py:500
#, python-format
msgid "Network could not be found for bridge %(bridge)s"
msgstr ""
-#: nova/exception.py:491
+#: nova/exception.py:504
#, python-format
msgid "Network could not be found for uuid %(uuid)s"
msgstr ""
-#: nova/exception.py:495
+#: nova/exception.py:508
#, python-format
msgid "Network could not be found with cidr %(cidr)s."
msgstr ""
-#: nova/exception.py:499
+#: nova/exception.py:512
#, python-format
msgid "Network could not be found for instance %(instance_id)s."
msgstr ""
-#: nova/exception.py:503
+#: nova/exception.py:516
msgid "No networks defined."
msgstr ""
-#: nova/exception.py:507
+#: nova/exception.py:520
#, python-format
msgid ""
"Either Network uuid %(network_uuid)s is not present or is not assigned to"
" the project %(project_id)s."
msgstr ""
-#: nova/exception.py:512
+#: nova/exception.py:525
msgid "Could not find the datastore reference(s) which the VM uses."
msgstr ""
-#: nova/exception.py:516
+#: nova/exception.py:529
#, python-format
msgid "Port %(port_id)s is still in use."
msgstr ""
-#: nova/exception.py:520
+#: nova/exception.py:533
#, python-format
msgid "Port %(port_id)s could not be found."
msgstr ""
-#: nova/exception.py:524
+#: nova/exception.py:537
+#, python-format
+msgid "Port %(port_id)s not usable for instance %(instance)s."
+msgstr ""
+
+#: nova/exception.py:541
+#, python-format
+msgid "No free port available for instance %(instance)s."
+msgstr ""
+
+#: nova/exception.py:545
#, python-format
msgid "No fixed IP associated with id %(id)s."
msgstr ""
-#: nova/exception.py:528
+#: nova/exception.py:549
#, python-format
msgid "Fixed ip not found for address %(address)s."
msgstr ""
-#: nova/exception.py:532
+#: nova/exception.py:553
#, python-format
msgid "Instance %(instance_uuid)s has zero fixed ips."
msgstr ""
-#: nova/exception.py:536
+#: nova/exception.py:557
#, python-format
msgid "Network host %(host)s has zero fixed ips in network %(network_id)s."
msgstr ""
-#: nova/exception.py:541
+#: nova/exception.py:562
#, python-format
msgid "Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'."
msgstr ""
-#: nova/exception.py:545
+#: nova/exception.py:566
#, python-format
msgid ""
"Fixed IP address (%(address)s) does not exist in network "
"(%(network_uuid)s)."
msgstr ""
-#: nova/exception.py:550
+#: nova/exception.py:571
#, python-format
msgid ""
"Fixed IP address %(address)s is already in use on instance "
"%(instance_uuid)s."
msgstr ""
-#: nova/exception.py:555
+#: nova/exception.py:576
#, python-format
msgid "More than one instance is associated with fixed ip address '%(address)s'."
msgstr ""
-#: nova/exception.py:560
+#: nova/exception.py:581
#, python-format
msgid "Fixed IP address %(address)s is invalid."
msgstr ""
-#: nova/exception.py:564
+#: nova/exception.py:585
msgid "Zero fixed ips available."
msgstr ""
-#: nova/exception.py:568
+#: nova/exception.py:589
msgid "Zero fixed ips could be found."
msgstr ""
-#: nova/exception.py:577
+#: nova/exception.py:598
#, python-format
msgid "Floating ip %(address)s already exists."
msgstr ""
-#: nova/exception.py:581
+#: nova/exception.py:602
#, python-format
msgid "Floating ip not found for id %(id)s."
msgstr ""
-#: nova/exception.py:585
+#: nova/exception.py:606
#, python-format
msgid "The DNS entry %(name)s already exists in domain %(domain)s."
msgstr ""
-#: nova/exception.py:589
+#: nova/exception.py:610
#, python-format
msgid "Floating ip not found for address %(address)s."
msgstr ""
-#: nova/exception.py:593
+#: nova/exception.py:614
#, python-format
msgid "Floating ip not found for host %(host)s."
msgstr ""
-#: nova/exception.py:597
+#: nova/exception.py:618
#, python-format
msgid "Multiple floating ips are found for address %(address)s."
msgstr ""
-#: nova/exception.py:601
+#: nova/exception.py:622
msgid "Floating ip pool not found."
msgstr ""
-#: nova/exception.py:606
+#: nova/exception.py:627
msgid "Zero floating ips available."
msgstr ""
-#: nova/exception.py:611
+#: nova/exception.py:632
#, python-format
msgid "Floating ip %(address)s is associated."
msgstr ""
-#: nova/exception.py:615
+#: nova/exception.py:636
#, python-format
msgid "Floating ip %(address)s is not associated."
msgstr ""
-#: nova/exception.py:619
+#: nova/exception.py:640
msgid "Zero floating ips exist."
msgstr ""
-#: nova/exception.py:623
+#: nova/exception.py:644
#, python-format
msgid "Interface %(interface)s not found."
msgstr ""
-#: nova/exception.py:627
+#: nova/exception.py:648
msgid "Cannot disassociate auto assigined floating ip"
msgstr ""
-#: nova/exception.py:631
+#: nova/exception.py:652
#, python-format
msgid "Keypair %(name)s not found for user %(user_id)s"
msgstr ""
-#: nova/exception.py:635
+#: nova/exception.py:656
#, python-format
msgid "Certificate %(certificate_id)s not found."
msgstr ""
-#: nova/exception.py:639
+#: nova/exception.py:660
#, python-format
msgid "Service %(service_id)s could not be found."
msgstr ""
-#: nova/exception.py:643
+#: nova/exception.py:664
#, python-format
msgid "Host %(host)s could not be found."
msgstr ""
-#: nova/exception.py:647
+#: nova/exception.py:668
#, python-format
msgid "Compute host %(host)s could not be found."
msgstr ""
-#: nova/exception.py:651
+#: nova/exception.py:672
#, python-format
msgid "Could not find binary %(binary)s on host %(host)s."
msgstr ""
-#: nova/exception.py:655
+#: nova/exception.py:676
#, python-format
msgid "Invalid reservation expiration %(expire)s."
msgstr ""
-#: nova/exception.py:659
+#: nova/exception.py:680
#, python-format
msgid ""
"Change would make usage less than 0 for the following resources: "
"%(unders)s"
msgstr ""
-#: nova/exception.py:664
+#: nova/exception.py:685
msgid "Quota could not be found"
msgstr ""
-#: nova/exception.py:668
+#: nova/exception.py:689
#, python-format
msgid "Unknown quota resources %(unknown)s."
msgstr ""
-#: nova/exception.py:672
+#: nova/exception.py:693
#, python-format
msgid "Quota for project %(project_id)s could not be found."
msgstr ""
-#: nova/exception.py:676
+#: nova/exception.py:697
#, python-format
msgid "Quota class %(class_name)s could not be found."
msgstr ""
-#: nova/exception.py:680
+#: nova/exception.py:701
#, python-format
msgid "Quota usage for project %(project_id)s could not be found."
msgstr ""
-#: nova/exception.py:684
+#: nova/exception.py:705
#, python-format
msgid "Quota reservation %(uuid)s could not be found."
msgstr ""
-#: nova/exception.py:688
+#: nova/exception.py:709
#, python-format
msgid "Quota exceeded for resources: %(overs)s"
msgstr ""
-#: nova/exception.py:692
+#: nova/exception.py:713
#, python-format
msgid "Security group %(security_group_id)s not found."
msgstr ""
-#: nova/exception.py:696
+#: nova/exception.py:717
#, python-format
msgid "Security group %(security_group_id)s not found for project %(project_id)s."
msgstr ""
-#: nova/exception.py:701
+#: nova/exception.py:722
#, python-format
msgid "Security group with rule %(rule_id)s not found."
msgstr ""
-#: nova/exception.py:705
+#: nova/exception.py:726
#, python-format
msgid ""
"Security group %(security_group_id)s is already associated with the "
"instance %(instance_id)s"
msgstr ""
-#: nova/exception.py:710
+#: nova/exception.py:731
#, python-format
msgid ""
"Security group %(security_group_id)s is not associated with the instance "
"%(instance_id)s"
msgstr ""
-#: nova/exception.py:715
+#: nova/exception.py:736
#, python-format
msgid "Migration %(migration_id)s could not be found."
msgstr ""
-#: nova/exception.py:719
+#: nova/exception.py:740
#, python-format
msgid "Migration not found for instance %(instance_id)s with status %(status)s."
msgstr ""
-#: nova/exception.py:724
+#: nova/exception.py:745
#, python-format
msgid "Console pool %(pool_id)s could not be found."
msgstr ""
-#: nova/exception.py:728
+#: nova/exception.py:749
#, python-format
msgid ""
"Console pool of type %(console_type)s for compute host %(compute_host)s "
"on proxy host %(host)s not found."
msgstr ""
-#: nova/exception.py:734
+#: nova/exception.py:755
#, python-format
msgid "Console %(console_id)s could not be found."
msgstr ""
-#: nova/exception.py:738
+#: nova/exception.py:759
#, python-format
msgid "Console for instance %(instance_uuid)s could not be found."
msgstr ""
-#: nova/exception.py:742
+#: nova/exception.py:763
#, python-format
msgid ""
"Console for instance %(instance_uuid)s in pool %(pool_id)s could not be "
"found."
msgstr ""
-#: nova/exception.py:747
+#: nova/exception.py:768
#, python-format
msgid "Invalid console type %(console_type)s"
msgstr ""
-#: nova/exception.py:751
+#: nova/exception.py:772
#, python-format
msgid "Instance type %(instance_type_id)s could not be found."
msgstr ""
-#: nova/exception.py:755
+#: nova/exception.py:776
#, python-format
msgid "Instance type with name %(instance_type_name)s could not be found."
msgstr ""
-#: nova/exception.py:760
+#: nova/exception.py:781
#, python-format
msgid "Flavor %(flavor_id)s could not be found."
msgstr ""
-#: nova/exception.py:764
+#: nova/exception.py:785
#, python-format
msgid "Flavor access not found for %(flavor_id)s / %(project_id)s combination."
msgstr ""
-#: nova/exception.py:769
+#: nova/exception.py:790
#, python-format
-msgid "Cell %(cell_id)s could not be found."
+msgid "Cell %(cell_name)s doesn't exist."
msgstr ""
-#: nova/exception.py:773
+#: nova/exception.py:794
#, python-format
msgid "Inconsistency in cell routing: %(reason)s"
msgstr ""
-#: nova/exception.py:777
+#: nova/exception.py:798
#, python-format
msgid "Service API method not found: %(detail)s"
msgstr ""
-#: nova/exception.py:781
+#: nova/exception.py:802
msgid "Timeout waiting for response from cell"
msgstr ""
-#: nova/exception.py:785
+#: nova/exception.py:806
#, python-format
msgid "Cell message has reached maximum hop count: %(hop_count)s"
msgstr ""
-#: nova/exception.py:789
+#: nova/exception.py:810
msgid "No cells available matching scheduling criteria."
msgstr ""
-#: nova/exception.py:793
+#: nova/exception.py:814
#, python-format
msgid "Exception received during cell processing: %(exc_name)s."
msgstr ""
-#: nova/exception.py:797
+#: nova/exception.py:818
#, python-format
msgid "Cell is not known for instance %(instance_uuid)s"
msgstr ""
-#: nova/exception.py:801
+#: nova/exception.py:822
#, python-format
msgid "Scheduler Host Filter %(filter_name)s could not be found."
msgstr ""
-#: nova/exception.py:805
+#: nova/exception.py:826
#, python-format
msgid "Scheduler cost function %(cost_fn_str)s could not be found."
msgstr ""
-#: nova/exception.py:810
+#: nova/exception.py:831
#, python-format
msgid "Scheduler weight flag not found: %(flag_name)s"
msgstr ""
-#: nova/exception.py:814
+#: nova/exception.py:835
#, python-format
msgid "Instance %(instance_uuid)s has no metadata with key %(metadata_key)s."
msgstr ""
-#: nova/exception.py:819
+#: nova/exception.py:840
#, python-format
msgid ""
"Instance %(instance_uuid)s has no system metadata with key "
"%(metadata_key)s."
msgstr ""
-#: nova/exception.py:824
+#: nova/exception.py:845
#, python-format
msgid ""
"Instance Type %(instance_type_id)s has no extra specs with key "
"%(extra_specs_key)s."
msgstr ""
-#: nova/exception.py:829
+#: nova/exception.py:850
#, python-format
msgid "File %(file_path)s could not be found."
msgstr ""
-#: nova/exception.py:833
+#: nova/exception.py:854
msgid "Zero files could be found."
msgstr ""
-#: nova/exception.py:837
+#: nova/exception.py:858
#, python-format
msgid "Virtual switch associated with the network adapter %(adapter)s not found."
msgstr ""
-#: nova/exception.py:842
+#: nova/exception.py:863
#, python-format
msgid "Network adapter %(adapter)s could not be found."
msgstr ""
-#: nova/exception.py:846
+#: nova/exception.py:867
#, python-format
msgid "Class %(class_name)s could not be found: %(exception)s"
msgstr ""
-#: nova/exception.py:850
+#: nova/exception.py:871
msgid "Action not allowed."
msgstr ""
-#: nova/exception.py:854
+#: nova/exception.py:875
msgid "Rotation is not allowed for snapshots"
msgstr ""
-#: nova/exception.py:858
+#: nova/exception.py:879
msgid "Rotation param is required for backup image_type"
msgstr ""
-#: nova/exception.py:862
+#: nova/exception.py:883
#, python-format
msgid "Key pair %(key_name)s already exists."
msgstr ""
-#: nova/exception.py:866
+#: nova/exception.py:887
#, python-format
msgid "Instance %(name)s already exists."
msgstr ""
-#: nova/exception.py:870
+#: nova/exception.py:891
#, python-format
msgid "Instance Type with name %(name)s already exists."
msgstr ""
-#: nova/exception.py:874
+#: nova/exception.py:895
#, python-format
msgid "Instance Type with ID %(flavor_id)s already exists."
msgstr ""
-#: nova/exception.py:878
+#: nova/exception.py:899
#, python-format
msgid ""
"Flavor access alreay exists for flavor %(flavor_id)s and project "
"%(project_id)s combination."
msgstr ""
-#: nova/exception.py:883
+#: nova/exception.py:904
#, python-format
msgid "%(path)s is not on shared storage: %(reason)s"
msgstr ""
-#: nova/exception.py:887
+#: nova/exception.py:908
#, python-format
msgid "%(path)s is not on local storage: %(reason)s"
msgstr ""
-#: nova/exception.py:891
+#: nova/exception.py:912
msgid "Migration error"
msgstr ""
-#: nova/exception.py:895
+#: nova/exception.py:916
#, python-format
msgid "Malformed message body: %(reason)s"
msgstr ""
-#: nova/exception.py:901
+#: nova/exception.py:922
#, python-format
msgid "Could not find config at %(path)s"
msgstr ""
-#: nova/exception.py:905
+#: nova/exception.py:926
#, python-format
msgid "Could not load paste app '%(name)s' from %(path)s"
msgstr ""
-#: nova/exception.py:909
+#: nova/exception.py:930
msgid "When resizing, instances must change flavor!"
msgstr ""
-#: nova/exception.py:913
+#: nova/exception.py:934
#, python-format
msgid "Resize error: %(reason)s"
msgstr ""
-#: nova/exception.py:917
+#: nova/exception.py:938
msgid "Image is larger than instance type allows"
msgstr ""
-#: nova/exception.py:921
+#: nova/exception.py:942
msgid "Instance type's memory is too small for requested image."
msgstr ""
-#: nova/exception.py:925
+#: nova/exception.py:946
msgid "Instance type's disk is too small for requested image."
msgstr ""
-#: nova/exception.py:929
+#: nova/exception.py:950
#, python-format
msgid "Insufficient free memory on compute node to start %(uuid)s."
msgstr ""
-#: nova/exception.py:933
+#: nova/exception.py:954
msgid "Could not fetch bandwidth/cpu/disk metrics for this host."
msgstr ""
-#: nova/exception.py:937
+#: nova/exception.py:958
#, python-format
msgid "No valid host was found. %(reason)s"
msgstr ""
-#: nova/exception.py:941
+#: nova/exception.py:962
msgid "Quota exceeded"
msgstr ""
-#: nova/exception.py:948
+#: nova/exception.py:969
#, python-format
msgid ""
"Quota exceeded for %(overs)s: Requested %(req)s, but already used "
"%(used)d of %(allowed)d %(resource)s"
msgstr ""
-#: nova/exception.py:953
+#: nova/exception.py:974
msgid "Maximum number of floating ips exceeded"
msgstr ""
-#: nova/exception.py:957
+#: nova/exception.py:978
#, python-format
msgid "Maximum number of metadata items exceeds %(allowed)d"
msgstr ""
-#: nova/exception.py:961
+#: nova/exception.py:982
msgid "Personality file limit exceeded"
msgstr ""
-#: nova/exception.py:965
+#: nova/exception.py:986
msgid "Personality file path too long"
msgstr ""
-#: nova/exception.py:969
+#: nova/exception.py:990
msgid "Personality file content too long"
msgstr ""
-#: nova/exception.py:973
+#: nova/exception.py:994
msgid "Maximum number of key pairs exceeded"
msgstr ""
-#: nova/exception.py:977
+#: nova/exception.py:998
msgid "Maximum number of security groups or rules exceeded"
msgstr ""
-#: nova/exception.py:981
+#: nova/exception.py:1002
#, python-format
msgid ""
"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: "
"%(reason)s."
msgstr ""
-#: nova/exception.py:986
+#: nova/exception.py:1007
#, python-format
msgid "Aggregate %(aggregate_id)s could not be found."
msgstr ""
-#: nova/exception.py:990
+#: nova/exception.py:1011
#, python-format
msgid "Aggregate %(aggregate_name)s already exists."
msgstr ""
-#: nova/exception.py:994
+#: nova/exception.py:1015
#, python-format
msgid "Aggregate %(aggregate_id)s has no host %(host)s."
msgstr ""
-#: nova/exception.py:998
+#: nova/exception.py:1019
#, python-format
msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s."
msgstr ""
-#: nova/exception.py:1003
+#: nova/exception.py:1024
#, python-format
msgid "Aggregate %(aggregate_id)s already has host %(host)s."
msgstr ""
-#: nova/exception.py:1007
+#: nova/exception.py:1028
msgid "Unable to create instance type"
msgstr ""
-#: nova/exception.py:1011
+#: nova/exception.py:1032
#, python-format
msgid "Failed to set admin password on %(instance)s because %(reason)s"
msgstr ""
-#: nova/exception.py:1017
+#: nova/exception.py:1038
#, python-format
msgid "Detected existing vlan with id %(vlan)d"
msgstr ""
-#: nova/exception.py:1021
+#: nova/exception.py:1042
#, python-format
msgid "Instance %(instance_id)s could not be found."
msgstr ""
-#: nova/exception.py:1025
+#: nova/exception.py:1046
#, python-format
msgid "Marker %(marker)s could not be found."
msgstr ""
-#: nova/exception.py:1029
+#: nova/exception.py:1050
#, python-format
msgid "Invalid id: %(val)s (expecting \"i-...\")."
msgstr ""
-#: nova/exception.py:1033
+#: nova/exception.py:1054
#, python-format
msgid "Could not fetch image %(image_id)s"
msgstr ""
-#: nova/exception.py:1037
+#: nova/exception.py:1058
#, python-format
msgid "Task %(task_name)s is already running on host %(host)s"
msgstr ""
-#: nova/exception.py:1041
+#: nova/exception.py:1062
#, python-format
msgid "Task %(task_name)s is not running on host %(host)s"
msgstr ""
-#: nova/exception.py:1045
+#: nova/exception.py:1066
#, python-format
msgid "Instance %(instance_uuid)s is locked"
msgstr ""
-#: nova/exception.py:1049
+#: nova/exception.py:1070
#, python-format
msgid "Could not mount vfat config drive. %(operation)s failed. Error: %(error)s"
msgstr ""
-#: nova/exception.py:1054
+#: nova/exception.py:1075
#, python-format
msgid "Unknown config drive format %(format)s. Select one of iso9660 or vfat."
msgstr ""
-#: nova/exception.py:1059
+#: nova/exception.py:1080
#, python-format
msgid ""
"User data too large. User data must be no larger than %(maxsize)s bytes "
"once base64 encoded. Your data is %(length)d bytes"
msgstr ""
-#: nova/exception.py:1065
+#: nova/exception.py:1086
msgid "User data needs to be valid base 64."
msgstr ""
-#: nova/exception.py:1069
+#: nova/exception.py:1090
#, python-format
msgid ""
"unexpected task state: expecting %(expected)s but the actual state is "
"%(actual)s"
msgstr ""
-#: nova/exception.py:1074
+#: nova/exception.py:1095
+#, python-format
+msgid ""
+"Action for request_id %(request_id)s on instance %(instance_uuid)s not "
+"found"
+msgstr ""
+
+#: nova/exception.py:1100
+#, python-format
+msgid "Event %(event)s not found for action id %(action_id)s"
+msgstr ""
+
+#: nova/exception.py:1104
#, python-format
msgid "The CA file for %(project)s could not be found"
msgstr ""
-#: nova/exception.py:1078
+#: nova/exception.py:1108
#, python-format
msgid "The CRL file for %(project)s could not be found"
msgstr ""
+#: nova/exception.py:1112
+msgid "Instance recreate is not implemented by this virt driver."
+msgstr ""
+
#: nova/hooks.py:62
#, python-format
msgid "Running %(name)s pre-hook: %(obj)s"
@@ -1135,22 +1202,27 @@ msgstr ""
msgid "Running %(name)s post-hook: %(obj)s"
msgstr ""
+#: nova/manager.py:161
+#, python-format
+msgid "Skipping periodic task %(task)s because its interval is negative"
+msgstr ""
+
#: nova/manager.py:166
#, python-format
-msgid "Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run"
+msgid "Skipping periodic task %(task)s because it is disabled"
msgstr ""
-#: nova/manager.py:172
+#: nova/manager.py:225
#, python-format
msgid "Running periodic task %(full_task_name)s"
msgstr ""
-#: nova/manager.py:182
+#: nova/manager.py:233
#, python-format
msgid "Error during %(full_task_name)s: %(e)s"
msgstr ""
-#: nova/manager.py:257
+#: nova/manager.py:306
msgid "Notifying Schedulers of capabilities ..."
msgstr ""
@@ -1170,210 +1242,260 @@ msgstr ""
msgid "Rule checked when requested rule is not found"
msgstr ""
-#: nova/quota.py:719
+#: nova/quota.py:944
#, python-format
msgid "Created reservations %(reservations)s"
msgstr ""
-#: nova/quota.py:738
+#: nova/quota.py:966
#, python-format
msgid "Failed to commit reservations %(reservations)s"
msgstr ""
-#: nova/quota.py:756
+#: nova/quota.py:968
+#, python-format
+msgid "Committed reservations %(reservations)s"
+msgstr ""
+
+#: nova/quota.py:988
#, python-format
msgid "Failed to roll back reservations %(reservations)s"
msgstr ""
-#: nova/service.py:173
+#: nova/quota.py:990
+#, python-format
+msgid "Rolled back reservations %(reservations)s"
+msgstr ""
+
+#: nova/service.py:196
msgid "Full set of CONF:"
msgstr ""
-#: nova/service.py:180
+#: nova/service.py:203
#, python-format
msgid "%(flag)s : FLAG SET "
msgstr ""
-#: nova/service.py:190 nova/service.py:288
+#: nova/service.py:213 nova/service.py:311
#, python-format
msgid "Caught %s, exiting"
msgstr ""
-#: nova/service.py:234
+#: nova/service.py:257
msgid "Parent process has died unexpectedly, exiting"
msgstr ""
-#: nova/service.py:270
+#: nova/service.py:293
msgid "Forking too fast, sleeping"
msgstr ""
-#: nova/service.py:293
+#: nova/service.py:316
msgid "Unhandled exception"
msgstr ""
-#: nova/service.py:300
+#: nova/service.py:323
#, python-format
msgid "Started child %d"
msgstr ""
-#: nova/service.py:310
+#: nova/service.py:333
#, python-format
msgid "Starting %d workers"
msgstr ""
-#: nova/service.py:324
+#: nova/service.py:347
#, python-format
msgid "Child %(pid)d killed by signal %(sig)d"
msgstr ""
-#: nova/service.py:327
+#: nova/service.py:350
#, python-format
msgid "Child %(pid)d exited with status %(code)d"
msgstr ""
-#: nova/service.py:330
+#: nova/service.py:353
#, python-format
msgid "pid %d not in child list"
msgstr ""
-#: nova/service.py:350
+#: nova/service.py:373
#, python-format
msgid "Caught %s, stopping children"
msgstr ""
-#: nova/service.py:361
+#: nova/service.py:384
#, python-format
msgid "Waiting on %d children to exit"
msgstr ""
-#: nova/service.py:392
+#: nova/service.py:426
#, python-format
msgid "Starting %(topic)s node (version %(version)s)"
msgstr ""
-#: nova/service.py:409 nova/openstack/common/rpc/service.py:47
+#: nova/service.py:443 nova/openstack/common/rpc/service.py:47
#, python-format
msgid "Creating Consumer connection for Service %s"
msgstr ""
-#: nova/service.py:429
+#: nova/service.py:463
#, python-format
msgid "Join ServiceGroup membership for this service %s"
msgstr ""
-#: nova/service.py:506
+#: nova/service.py:545
msgid "Service killed that has no database entry"
msgstr ""
-#: nova/service.py:626
+#: nova/service.py:580
+#, python-format
+msgid "Temporary directory is invalid: %s"
+msgstr ""
+
+#: nova/service.py:678
msgid "serve() can only be called once"
msgstr ""
-#: nova/utils.py:194
+#: nova/utils.py:196
#, python-format
msgid "Got unknown keyword args to utils.execute: %r"
msgstr ""
-#: nova/utils.py:205
+#: nova/utils.py:207
#, python-format
msgid "Running cmd (subprocess): %s"
msgstr ""
-#: nova/utils.py:229 nova/utils.py:307 nova/virt/powervm/common.py:82
+#: nova/utils.py:231 nova/utils.py:309 nova/virt/powervm/common.py:83
#, python-format
msgid "Result was %s"
msgstr ""
-#: nova/utils.py:242
+#: nova/utils.py:244
#, python-format
msgid "%r failed. Retrying."
msgstr ""
-#: nova/utils.py:282
+#: nova/utils.py:284
#, python-format
msgid "Running cmd (SSH): %s"
msgstr ""
-#: nova/utils.py:284
+#: nova/utils.py:286
msgid "Environment not supported over SSH"
msgstr ""
-#: nova/utils.py:288
+#: nova/utils.py:290
msgid "process_input not supported over SSH"
msgstr ""
-#: nova/utils.py:323
+#: nova/utils.py:325
#, python-format
msgid "debug in callback: %s"
msgstr ""
-#: nova/utils.py:485
+#: nova/utils.py:487
#, python-format
msgid "Link Local address is not found.:%s"
msgstr ""
-#: nova/utils.py:488
+#: nova/utils.py:490
#, python-format
msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s"
msgstr ""
-#: nova/utils.py:523
+#: nova/utils.py:529
#, python-format
msgid "Invalid backend: %s"
msgstr ""
-#: nova/utils.py:584
-msgid "in looping call"
+#: nova/utils.py:601
+msgid "in fixed duration looping call"
msgstr ""
-#: nova/utils.py:644
+#: nova/utils.py:636
+#, python-format
+msgid "Periodic task processor sleeping for %.02f seconds"
+msgstr ""
+
+#: nova/utils.py:643
+msgid "in dynamic looping call"
+msgstr ""
+
+#: nova/utils.py:697
#, python-format
msgid "Unknown byte multiplier: %s"
msgstr ""
-#: nova/utils.py:773
+#: nova/utils.py:826
#, python-format
msgid "Expected object of type: %s"
msgstr ""
-#: nova/utils.py:802
+#: nova/utils.py:855
#, python-format
msgid "Invalid server_string: %s"
msgstr ""
-#: nova/utils.py:926
+#: nova/utils.py:978
#, python-format
msgid "timefunc: '%(name)s' took %(total_time).2f secs"
msgstr ""
-#: nova/utils.py:1003
+#: nova/utils.py:1044
#, python-format
msgid "Reloading cached file %s"
msgstr ""
-#: nova/utils.py:1113 nova/virt/configdrive.py:177
+#: nova/utils.py:1176 nova/virt/configdrive.py:176
#, python-format
msgid "Could not remove tmpdir: %s"
msgstr ""
-#: nova/wsgi.py:87
+#: nova/wsgi.py:121
#, python-format
msgid "%(name)s listening on %(host)s:%(port)s"
msgstr ""
-#: nova/wsgi.py:111
+#: nova/wsgi.py:136
+#, python-format
+msgid "Unable to find cert_file : %s"
+msgstr ""
+
+#: nova/wsgi.py:140
+#, python-format
+msgid "Unable to find ca_file : %s"
+msgstr ""
+
+#: nova/wsgi.py:144
+#, python-format
+msgid "Unable to find key_file : %s"
+msgstr ""
+
+#: nova/wsgi.py:148
+msgid ""
+"When running server in SSL mode, you must specify both a cert_file and "
+"key_file option value in your configuration file"
+msgstr ""
+
+#: nova/wsgi.py:178
+#, python-format
+msgid "Failed to start %(name)s on %(host)s:%(port)s with SSL support"
+msgstr ""
+
+#: nova/wsgi.py:206
msgid "Stopping WSGI server."
msgstr ""
-#: nova/wsgi.py:129
+#: nova/wsgi.py:224
msgid "WSGI server has stopped."
msgstr ""
-#: nova/wsgi.py:198
+#: nova/wsgi.py:293
msgid "You must implement __call__"
msgstr ""
-#: nova/wsgi.py:384
+#: nova/wsgi.py:479
#, python-format
msgid "Loading app %(name)s from %(path)s"
msgstr ""
@@ -1386,7 +1508,7 @@ msgstr ""
msgid "Sourcing roles from deprecated X-Role HTTP header"
msgstr ""
-#: nova/api/sizelimit.py:50 nova/api/metadata/password.py:64
+#: nova/api/sizelimit.py:50 nova/api/metadata/password.py:62
msgid "Request is too large."
msgstr ""
@@ -1405,125 +1527,130 @@ msgstr ""
msgid "FaultWrapper: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:179
+#: nova/api/ec2/__init__.py:174
msgid "Too many failed authentications."
msgstr ""
-#: nova/api/ec2/__init__.py:189
+#: nova/api/ec2/__init__.py:184
#, python-format
msgid ""
"Access key %(access_key)s has had %(failures)d failed authentications and"
" will be locked out for %(lock_mins)d minutes."
msgstr ""
-#: nova/api/ec2/__init__.py:206
+#: nova/api/ec2/__init__.py:201
msgid "Signature not provided"
msgstr ""
-#: nova/api/ec2/__init__.py:210
+#: nova/api/ec2/__init__.py:205
msgid "Access key not provided"
msgstr ""
-#: nova/api/ec2/__init__.py:245 nova/api/ec2/__init__.py:260
+#: nova/api/ec2/__init__.py:240 nova/api/ec2/__init__.py:255
msgid "Failure communicating with keystone"
msgstr ""
-#: nova/api/ec2/__init__.py:259
+#: nova/api/ec2/__init__.py:254
#, python-format
msgid "Keystone failure: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:319
+#: nova/api/ec2/__init__.py:314
msgid "Timestamp failed validation."
msgstr ""
-#: nova/api/ec2/__init__.py:339
+#: nova/api/ec2/__init__.py:334
#, python-format
msgid "action: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:341
+#: nova/api/ec2/__init__.py:336
#, python-format
msgid "arg: %(key)s\t\tval: %(value)s"
msgstr ""
-#: nova/api/ec2/__init__.py:416
+#: nova/api/ec2/__init__.py:411
#, python-format
msgid "Unauthorized request for controller=%(controller)s and action=%(action)s"
msgstr ""
-#: nova/api/ec2/__init__.py:488
+#: nova/api/ec2/__init__.py:483
#, python-format
msgid "InstanceNotFound raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:494
+#: nova/api/ec2/__init__.py:489
#, python-format
msgid "VolumeNotFound raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:500
+#: nova/api/ec2/__init__.py:495
#, python-format
msgid "SnapshotNotFound raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:506
+#: nova/api/ec2/__init__.py:501
#, python-format
msgid "NotFound raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:509
+#: nova/api/ec2/__init__.py:504
#, python-format
msgid "EC2APIError raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:517
+#: nova/api/ec2/__init__.py:512
#, python-format
msgid "KeyPairExists raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:521
+#: nova/api/ec2/__init__.py:517
+#, python-format
+msgid "InvalidKeypair raised: %s"
+msgstr ""
+
+#: nova/api/ec2/__init__.py:522
#, python-format
msgid "InvalidParameterValue raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:525
+#: nova/api/ec2/__init__.py:526
#, python-format
msgid "InvalidPortRange raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:529
+#: nova/api/ec2/__init__.py:530
#, python-format
msgid "NotAuthorized raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:533
+#: nova/api/ec2/__init__.py:534
#, python-format
msgid "InvalidRequest raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:537
+#: nova/api/ec2/__init__.py:538
#, python-format
msgid "QuotaError raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:541
+#: nova/api/ec2/__init__.py:542
#, python-format
msgid "Invalid id: bogus (expecting \"i-...\"): %s"
msgstr ""
-#: nova/api/ec2/__init__.py:550
+#: nova/api/ec2/__init__.py:551
#, python-format
msgid "Unexpected error raised: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:551
+#: nova/api/ec2/__init__.py:552
#, python-format
msgid "Environment: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:553 nova/api/metadata/handler.py:138
-#: nova/api/metadata/handler.py:185
+#: nova/api/ec2/__init__.py:554 nova/api/metadata/handler.py:133
+#: nova/api/metadata/handler.py:180
msgid "An unknown error has occurred. Please try your request again."
msgstr ""
@@ -1532,273 +1659,267 @@ msgstr ""
msgid "Unsupported API request: controller = %(controller)s, action = %(action)s"
msgstr ""
-#: nova/api/ec2/cloud.py:395
+#: nova/api/ec2/cloud.py:389
#, python-format
msgid "Create snapshot of volume %s"
msgstr ""
-#: nova/api/ec2/cloud.py:421
+#: nova/api/ec2/cloud.py:415
#, python-format
msgid "Could not find key pair(s): %s"
msgstr ""
-#: nova/api/ec2/cloud.py:437
+#: nova/api/ec2/cloud.py:432
#, python-format
msgid "Create key pair %s"
msgstr ""
-#: nova/api/ec2/cloud.py:444 nova/api/ec2/cloud.py:469
+#: nova/api/ec2/cloud.py:439 nova/api/ec2/cloud.py:458
#: nova/api/openstack/compute/contrib/keypairs.py:93
msgid "Quota exceeded, too many key pairs."
msgstr ""
-#: nova/api/ec2/cloud.py:450 nova/api/ec2/cloud.py:475
-#: nova/api/openstack/compute/contrib/keypairs.py:101
-#, python-format
-msgid "Key pair '%s' already exists."
-msgstr ""
-
-#: nova/api/ec2/cloud.py:459
+#: nova/api/ec2/cloud.py:448
#, python-format
msgid "Import key %s"
msgstr ""
-#: nova/api/ec2/cloud.py:482
+#: nova/api/ec2/cloud.py:468
#, python-format
msgid "Delete key pair %s"
msgstr ""
-#: nova/api/ec2/cloud.py:616 nova/api/ec2/cloud.py:737
+#: nova/api/ec2/cloud.py:602 nova/api/ec2/cloud.py:723
msgid "Not enough parameters, need group_name or group_id"
msgstr ""
-#: nova/api/ec2/cloud.py:621
+#: nova/api/ec2/cloud.py:607
#, python-format
msgid "%s Not enough parameters to build a valid rule"
msgstr ""
-#: nova/api/ec2/cloud.py:659 nova/api/ec2/cloud.py:691
+#: nova/api/ec2/cloud.py:645 nova/api/ec2/cloud.py:677
msgid "No rule for the specified parameters."
msgstr ""
-#: nova/api/ec2/cloud.py:682
+#: nova/api/ec2/cloud.py:668
#, python-format
msgid "%s - This rule already exists in group"
msgstr ""
-#: nova/api/ec2/cloud.py:748
+#: nova/api/ec2/cloud.py:751
#, python-format
msgid "Get console output for instance %s"
msgstr ""
-#: nova/api/ec2/cloud.py:824
+#: nova/api/ec2/cloud.py:827
#, python-format
msgid "Create volume from snapshot %s"
msgstr ""
-#: nova/api/ec2/cloud.py:828 nova/api/openstack/compute/contrib/volumes.py:241
+#: nova/api/ec2/cloud.py:831 nova/api/openstack/compute/contrib/volumes.py:244
#, python-format
msgid "Create volume of %s GB"
msgstr ""
-#: nova/api/ec2/cloud.py:856
+#: nova/api/ec2/cloud.py:859
msgid "Delete Failed"
msgstr ""
-#: nova/api/ec2/cloud.py:869
+#: nova/api/ec2/cloud.py:872
#, python-format
msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s"
msgstr ""
-#: nova/api/ec2/cloud.py:877
+#: nova/api/ec2/cloud.py:880
msgid "Attach Failed."
msgstr ""
-#: nova/api/ec2/cloud.py:890 nova/api/openstack/compute/contrib/volumes.py:428
+#: nova/api/ec2/cloud.py:893 nova/api/openstack/compute/contrib/volumes.py:436
#, python-format
msgid "Detach volume %s"
msgstr ""
-#: nova/api/ec2/cloud.py:896
+#: nova/api/ec2/cloud.py:899
msgid "Detach Volume Failed."
msgstr ""
-#: nova/api/ec2/cloud.py:922 nova/api/ec2/cloud.py:979
-#: nova/api/ec2/cloud.py:1528 nova/api/ec2/cloud.py:1543
+#: nova/api/ec2/cloud.py:925 nova/api/ec2/cloud.py:982
+#: nova/api/ec2/cloud.py:1535 nova/api/ec2/cloud.py:1550
#, python-format
msgid "attribute not supported: %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1049
+#: nova/api/ec2/cloud.py:1056
#, python-format
msgid "vol = %s\n"
msgstr ""
-#: nova/api/ec2/cloud.py:1208
+#: nova/api/ec2/cloud.py:1215
msgid "Allocate address"
msgstr ""
-#: nova/api/ec2/cloud.py:1212
+#: nova/api/ec2/cloud.py:1219
msgid "No more floating IPs available"
msgstr ""
-#: nova/api/ec2/cloud.py:1216
+#: nova/api/ec2/cloud.py:1223
#, python-format
msgid "Release address %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1221
+#: nova/api/ec2/cloud.py:1228
msgid "Unable to release IP Address."
msgstr ""
-#: nova/api/ec2/cloud.py:1224
+#: nova/api/ec2/cloud.py:1231
#, python-format
msgid "Associate address %(public_ip)s to instance %(instance_id)s"
msgstr ""
-#: nova/api/ec2/cloud.py:1232
+#: nova/api/ec2/cloud.py:1239
msgid "Unable to associate IP Address, no fixed_ips."
msgstr ""
-#: nova/api/ec2/cloud.py:1240
-#: nova/api/openstack/compute/contrib/floating_ips.py:257
+#: nova/api/ec2/cloud.py:1247
+#: nova/api/openstack/compute/contrib/floating_ips.py:238
#, python-format
msgid "multiple fixed_ips exist, using the first: %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1249
+#: nova/api/ec2/cloud.py:1256
msgid "Floating ip is already associated."
msgstr ""
-#: nova/api/ec2/cloud.py:1252
+#: nova/api/ec2/cloud.py:1259
msgid "l3driver call to add floating ip failed."
msgstr ""
-#: nova/api/ec2/cloud.py:1255
+#: nova/api/ec2/cloud.py:1262
msgid "Error, unable to associate floating ip."
msgstr ""
-#: nova/api/ec2/cloud.py:1263
+#: nova/api/ec2/cloud.py:1270
#, python-format
msgid "Disassociate address %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1268
+#: nova/api/ec2/cloud.py:1275
msgid "Floating ip is not associated."
msgstr ""
-#: nova/api/ec2/cloud.py:1271
+#: nova/api/ec2/cloud.py:1278
#: nova/api/openstack/compute/contrib/floating_ips.py:100
msgid "Cannot disassociate auto assigned floating ip"
msgstr ""
-#: nova/api/ec2/cloud.py:1298
+#: nova/api/ec2/cloud.py:1305
msgid "Image must be available"
msgstr ""
-#: nova/api/ec2/cloud.py:1330
+#: nova/api/ec2/cloud.py:1337
msgid "Going to start terminating instances"
msgstr ""
-#: nova/api/ec2/cloud.py:1340
+#: nova/api/ec2/cloud.py:1347
#, python-format
msgid "Reboot instance %r"
msgstr ""
-#: nova/api/ec2/cloud.py:1349
+#: nova/api/ec2/cloud.py:1356
msgid "Going to stop instances"
msgstr ""
-#: nova/api/ec2/cloud.py:1358
+#: nova/api/ec2/cloud.py:1365
msgid "Going to start instances"
msgstr ""
-#: nova/api/ec2/cloud.py:1449
+#: nova/api/ec2/cloud.py:1456
#, python-format
msgid "De-registering image %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1465
+#: nova/api/ec2/cloud.py:1472
msgid "imageLocation is required"
msgstr ""
-#: nova/api/ec2/cloud.py:1484
+#: nova/api/ec2/cloud.py:1491
#, python-format
msgid "Registered image %(image_location)s with id %(image_id)s"
msgstr ""
-#: nova/api/ec2/cloud.py:1546
+#: nova/api/ec2/cloud.py:1553
msgid "user or group not specified"
msgstr ""
-#: nova/api/ec2/cloud.py:1548
+#: nova/api/ec2/cloud.py:1555
msgid "only group \"all\" is supported"
msgstr ""
-#: nova/api/ec2/cloud.py:1550
+#: nova/api/ec2/cloud.py:1557
msgid "operation_type must be add or remove"
msgstr ""
-#: nova/api/ec2/cloud.py:1552
+#: nova/api/ec2/cloud.py:1559
#, python-format
msgid "Updating image %s publicity"
msgstr ""
-#: nova/api/ec2/cloud.py:1565
+#: nova/api/ec2/cloud.py:1572
#, python-format
msgid "Not allowed to modify attributes for image %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1594
+#: nova/api/ec2/cloud.py:1601
#, python-format
msgid ""
"Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not "
"have a volume attached at root (%(root)s)"
msgstr ""
-#: nova/api/ec2/cloud.py:1624
+#: nova/api/ec2/cloud.py:1631
#, python-format
msgid "Couldn't stop instance with in %d sec"
msgstr ""
-#: nova/api/ec2/cloud.py:1642
+#: nova/api/ec2/cloud.py:1649
#, python-format
msgid "image of %(instance)s at %(now)s"
msgstr ""
-#: nova/api/ec2/cloud.py:1675
+#: nova/api/ec2/cloud.py:1682
msgid "Invalid CIDR"
msgstr ""
-#: nova/api/ec2/ec2utils.py:188
+#: nova/api/ec2/ec2utils.py:190
msgid "Request must include either Timestamp or Expires, but cannot contain both"
msgstr ""
-#: nova/api/ec2/ec2utils.py:208
+#: nova/api/ec2/ec2utils.py:210
msgid "Timestamp is invalid."
msgstr ""
-#: nova/api/metadata/handler.py:110
+#: nova/api/metadata/handler.py:105
msgid ""
"X-Instance-ID present in request headers. The "
"'service_quantum_metadata_proxy' option must be enabled to process this "
"header."
msgstr ""
-#: nova/api/metadata/handler.py:136 nova/api/metadata/handler.py:143
+#: nova/api/metadata/handler.py:131 nova/api/metadata/handler.py:138
#, python-format
msgid "Failed to get metadata for ip: %s"
msgstr ""
-#: nova/api/metadata/handler.py:155
+#: nova/api/metadata/handler.py:150
msgid "X-Instance-ID header is missing from request."
msgstr ""
-#: nova/api/metadata/handler.py:157
+#: nova/api/metadata/handler.py:152
msgid "Multiple X-Instance-ID headers found within request."
msgstr ""
-#: nova/api/metadata/handler.py:171
+#: nova/api/metadata/handler.py:166
#, python-format
msgid ""
"X-Instance-ID-Signature: %(signature)s does not match the expected value:"
@@ -1806,11 +1927,11 @@ msgid ""
"%(remote_address)s"
msgstr ""
-#: nova/api/metadata/handler.py:176
+#: nova/api/metadata/handler.py:171
msgid "Invalid proxy request signature."
msgstr ""
-#: nova/api/metadata/handler.py:183 nova/api/metadata/handler.py:190
+#: nova/api/metadata/handler.py:178 nova/api/metadata/handler.py:185
#, python-format
msgid "Failed to get metadata for instance id: %s"
msgstr ""
@@ -1820,7 +1941,7 @@ msgstr ""
msgid "Caught error: %s"
msgstr ""
-#: nova/api/openstack/__init__.py:60 nova/api/openstack/wsgi.py:986
+#: nova/api/openstack/__init__.py:60 nova/api/openstack/wsgi.py:992
#, python-format
msgid "%(url)s returned with HTTP %(status)d"
msgstr ""
@@ -1869,7 +1990,7 @@ msgstr ""
msgid "offset param must be positive"
msgstr ""
-#: nova/api/openstack/common.py:230 nova/api/openstack/compute/servers.py:536
+#: nova/api/openstack/common.py:230 nova/api/openstack/compute/servers.py:540
#, python-format
msgid "marker [%s] not found"
msgstr ""
@@ -1973,77 +2094,96 @@ msgstr ""
msgid "Failed to load extension %(ext_name)s: %(exc)s"
msgstr ""
-#: nova/api/openstack/wsgi.py:199 nova/api/openstack/wsgi.py:619
+#: nova/api/openstack/wsgi.py:199 nova/api/openstack/wsgi.py:621
msgid "cannot understand JSON"
msgstr ""
#: nova/api/openstack/wsgi.py:223
-#: nova/api/openstack/compute/contrib/hosts.py:78
+#: nova/api/openstack/compute/contrib/cells.py:104
+#: nova/api/openstack/compute/contrib/hosts.py:77
msgid "cannot understand XML"
msgstr ""
-#: nova/api/openstack/wsgi.py:624
+#: nova/api/openstack/wsgi.py:626
msgid "too many body keys"
msgstr ""
-#: nova/api/openstack/wsgi.py:667
+#: nova/api/openstack/wsgi.py:669
#, python-format
msgid "Exception handling resource: %s"
msgstr ""
-#: nova/api/openstack/wsgi.py:671
+#: nova/api/openstack/wsgi.py:673
#, python-format
msgid "Fault thrown: %s"
msgstr ""
-#: nova/api/openstack/wsgi.py:674
+#: nova/api/openstack/wsgi.py:676
#, python-format
msgid "HTTP exception thrown: %s"
msgstr ""
-#: nova/api/openstack/wsgi.py:788
+#: nova/api/openstack/wsgi.py:790
msgid "Unrecognized Content-Type provided in request"
msgstr ""
-#: nova/api/openstack/wsgi.py:792
+#: nova/api/openstack/wsgi.py:794
msgid "No Content-Type provided in request"
msgstr ""
-#: nova/api/openstack/wsgi.py:796
+#: nova/api/openstack/wsgi.py:798
msgid "Empty body provided in request"
msgstr ""
-#: nova/api/openstack/wsgi.py:897
+#: nova/api/openstack/wsgi.py:899
msgid "Invalid XML in request body"
msgstr ""
-#: nova/api/openstack/wsgi.py:916
+#: nova/api/openstack/wsgi.py:918
#, python-format
msgid "There is no such action: %s"
msgstr ""
-#: nova/api/openstack/wsgi.py:919 nova/api/openstack/wsgi.py:932
+#: nova/api/openstack/wsgi.py:921 nova/api/openstack/wsgi.py:938
#: nova/api/openstack/compute/server_metadata.py:58
#: nova/api/openstack/compute/server_metadata.py:76
#: nova/api/openstack/compute/server_metadata.py:101
#: nova/api/openstack/compute/server_metadata.py:126
#: nova/api/openstack/compute/contrib/admin_actions.py:211
+#: nova/api/openstack/compute/contrib/evacuate.py:49
msgid "Malformed request body"
msgstr ""
-#: nova/api/openstack/wsgi.py:929
+#: nova/api/openstack/wsgi.py:925
+#, python-format
+msgid "Action: '%(action)s', body: %(body)s"
+msgstr ""
+
+#: nova/api/openstack/wsgi.py:926
+#, python-format
+msgid "Calling method %s"
+msgstr ""
+
+#: nova/api/openstack/wsgi.py:935
msgid "Unsupported Content-Type"
msgstr ""
-#: nova/api/openstack/wsgi.py:941
+#: nova/api/openstack/wsgi.py:947
msgid "Malformed request url"
msgstr ""
-#: nova/api/openstack/wsgi.py:989
+#: nova/api/openstack/wsgi.py:995
#, python-format
msgid "%(url)s returned a fault: %(e)s"
msgstr ""
+#: nova/api/openstack/wsgi.py:1188
+#, python-format
+msgid ""
+"API request failed, fault raised to the top of the stack. Detailed "
+"stacktrace %s"
+msgstr ""
+
#: nova/api/openstack/xmlutil.py:265
msgid "element is not a child"
msgstr ""
@@ -2092,13 +2232,13 @@ msgstr ""
#: nova/api/openstack/compute/image_metadata.py:80
#: nova/api/openstack/compute/server_metadata.py:80
-#: nova/api/openstack/compute/contrib/flavorextraspecs.py:79
+#: nova/api/openstack/compute/contrib/flavorextraspecs.py:88
msgid "Request body and URI mismatch"
msgstr ""
#: nova/api/openstack/compute/image_metadata.py:83
#: nova/api/openstack/compute/server_metadata.py:84
-#: nova/api/openstack/compute/contrib/flavorextraspecs.py:82
+#: nova/api/openstack/compute/contrib/flavorextraspecs.py:91
msgid "Request body contains too many items"
msgstr ""
@@ -2127,240 +2267,257 @@ msgstr ""
#: nova/api/openstack/compute/server_metadata.py:38
#: nova/api/openstack/compute/server_metadata.py:122
-#: nova/api/openstack/compute/server_metadata.py:166
+#: nova/api/openstack/compute/server_metadata.py:171
msgid "Server does not exist"
msgstr ""
-#: nova/api/openstack/compute/server_metadata.py:148
-#: nova/api/openstack/compute/server_metadata.py:159
+#: nova/api/openstack/compute/server_metadata.py:152
+#: nova/api/openstack/compute/server_metadata.py:163
msgid "Metadata item was not found"
msgstr ""
-#: nova/api/openstack/compute/servers.py:501
+#: nova/api/openstack/compute/servers.py:505
+#: nova/api/openstack/compute/contrib/cells.py:278
msgid "Invalid changes-since value"
msgstr ""
-#: nova/api/openstack/compute/servers.py:520
+#: nova/api/openstack/compute/servers.py:524
msgid "Only administrators may list deleted instances"
msgstr ""
-#: nova/api/openstack/compute/servers.py:539
-msgid "Flavor could not be found"
+#: nova/api/openstack/compute/servers.py:543
+#, python-format
+msgid "Flavor '%s' could not be found "
msgstr ""
-#: nova/api/openstack/compute/servers.py:555
-#: nova/api/openstack/compute/servers.py:723
-#: nova/api/openstack/compute/servers.py:987
-#: nova/api/openstack/compute/servers.py:1090
-#: nova/api/openstack/compute/servers.py:1258
+#: nova/api/openstack/compute/servers.py:560
+#: nova/api/openstack/compute/servers.py:738
+#: nova/api/openstack/compute/servers.py:1010
+#: nova/api/openstack/compute/servers.py:1116
+#: nova/api/openstack/compute/servers.py:1289
msgid "Instance could not be found"
msgstr ""
-#: nova/api/openstack/compute/servers.py:562
-msgid "Server name is not a string or unicode"
+#: nova/api/openstack/compute/servers.py:567
+#, python-format
+msgid "%s is not a string or unicode"
msgstr ""
-#: nova/api/openstack/compute/servers.py:566
-msgid "Server name is an empty string"
+#: nova/api/openstack/compute/servers.py:571
+#, python-format
+msgid "%s is an empty string"
+msgstr ""
+
+#: nova/api/openstack/compute/servers.py:575
+#, python-format
+msgid "%(name)s can be at most %(max_length)s characters."
msgstr ""
-#: nova/api/openstack/compute/servers.py:570
-msgid "Server name must be less than 256 characters."
+#: nova/api/openstack/compute/servers.py:586
+msgid "Device name cannot include spaces."
msgstr ""
-#: nova/api/openstack/compute/servers.py:587
+#: nova/api/openstack/compute/servers.py:603
#, python-format
msgid "Bad personality format: missing %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:590
+#: nova/api/openstack/compute/servers.py:606
msgid "Bad personality format"
msgstr ""
-#: nova/api/openstack/compute/servers.py:594
+#: nova/api/openstack/compute/servers.py:609
#, python-format
msgid "Personality content for %s cannot be decoded"
msgstr ""
-#: nova/api/openstack/compute/servers.py:625
+#: nova/api/openstack/compute/servers.py:640
msgid "Unknown argment : port"
msgstr ""
-#: nova/api/openstack/compute/servers.py:628
+#: nova/api/openstack/compute/servers.py:643
#, python-format
msgid "Bad port format: port uuid is not in proper format (%s)"
msgstr ""
-#: nova/api/openstack/compute/servers.py:638
+#: nova/api/openstack/compute/servers.py:653
#, python-format
msgid "Bad networks format: network uuid is not in proper format (%s)"
msgstr ""
-#: nova/api/openstack/compute/servers.py:648
+#: nova/api/openstack/compute/servers.py:663
#, python-format
msgid "Invalid fixed IP address (%s)"
msgstr ""
-#: nova/api/openstack/compute/servers.py:661
+#: nova/api/openstack/compute/servers.py:676
#, python-format
msgid "Duplicate networks (%s) are not allowed"
msgstr ""
-#: nova/api/openstack/compute/servers.py:667
+#: nova/api/openstack/compute/servers.py:682
#, python-format
msgid "Bad network format: missing %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:670
+#: nova/api/openstack/compute/servers.py:685
msgid "Bad networks format"
msgstr ""
-#: nova/api/openstack/compute/servers.py:696
+#: nova/api/openstack/compute/servers.py:711
msgid "Userdata content cannot be decoded"
msgstr ""
-#: nova/api/openstack/compute/servers.py:703
+#: nova/api/openstack/compute/servers.py:718
msgid "accessIPv4 is not proper IPv4 format"
msgstr ""
-#: nova/api/openstack/compute/servers.py:710
+#: nova/api/openstack/compute/servers.py:725
msgid "accessIPv6 is not proper IPv6 format"
msgstr ""
-#: nova/api/openstack/compute/servers.py:739
+#: nova/api/openstack/compute/servers.py:754
msgid "Server name is not defined"
msgstr ""
-#: nova/api/openstack/compute/servers.py:787
-#: nova/api/openstack/compute/servers.py:893
+#: nova/api/openstack/compute/servers.py:802
+#: nova/api/openstack/compute/servers.py:916
msgid "Invalid flavorRef provided."
msgstr ""
-#: nova/api/openstack/compute/servers.py:827
+#: nova/api/openstack/compute/servers.py:843
msgid "min_count must be an integer value"
msgstr ""
-#: nova/api/openstack/compute/servers.py:830
+#: nova/api/openstack/compute/servers.py:846
msgid "min_count must be > 0"
msgstr ""
-#: nova/api/openstack/compute/servers.py:835
+#: nova/api/openstack/compute/servers.py:852
msgid "max_count must be an integer value"
msgstr ""
-#: nova/api/openstack/compute/servers.py:838
+#: nova/api/openstack/compute/servers.py:855
msgid "max_count must be > 0"
msgstr ""
-#: nova/api/openstack/compute/servers.py:841
+#: nova/api/openstack/compute/servers.py:859
msgid "min_count must be <= max_count"
msgstr ""
-#: nova/api/openstack/compute/servers.py:890
+#: nova/api/openstack/compute/servers.py:911
msgid "Can not find requested image"
msgstr ""
-#: nova/api/openstack/compute/servers.py:896
+#: nova/api/openstack/compute/servers.py:919
msgid "Invalid key_name provided."
msgstr ""
-#: nova/api/openstack/compute/servers.py:975
+#: nova/api/openstack/compute/servers.py:998
msgid "HostId cannot be updated."
msgstr ""
-#: nova/api/openstack/compute/servers.py:979
+#: nova/api/openstack/compute/servers.py:1002
msgid "Personality cannot be updated."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1005
-#: nova/api/openstack/compute/servers.py:1025
+#: nova/api/openstack/compute/servers.py:1028
+#: nova/api/openstack/compute/servers.py:1048
msgid "Instance has not been resized."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1011
+#: nova/api/openstack/compute/servers.py:1034
#, python-format
msgid "Error in confirm-resize %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1031
+#: nova/api/openstack/compute/servers.py:1051
+msgid "Flavor used by the instance could not be found."
+msgstr ""
+
+#: nova/api/openstack/compute/servers.py:1057
#, python-format
msgid "Error in revert-resize %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1044
+#: nova/api/openstack/compute/servers.py:1070
msgid "Argument 'type' for reboot is not HARD or SOFT"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1048
+#: nova/api/openstack/compute/servers.py:1074
msgid "Missing argument 'type' for reboot"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1061
+#: nova/api/openstack/compute/servers.py:1087
#, python-format
msgid "Error in reboot %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1073
+#: nova/api/openstack/compute/servers.py:1099
msgid "Unable to locate requested flavor."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1076
+#: nova/api/openstack/compute/servers.py:1102
msgid "Resize requires a flavor change."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1100
+#: nova/api/openstack/compute/servers.py:1126
msgid "Missing imageRef attribute"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1109
+#: nova/api/openstack/compute/servers.py:1135
msgid "Invalid imageRef provided."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1136
+#: nova/api/openstack/compute/servers.py:1162
msgid "Missing flavorRef attribute"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1149
+#: nova/api/openstack/compute/servers.py:1175
msgid "No adminPass was specified"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1153
-#: nova/api/openstack/compute/servers.py:1355
+#: nova/api/openstack/compute/servers.py:1179
+#: nova/api/openstack/compute/servers.py:1386
msgid "Invalid adminPass"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1164
+#: nova/api/openstack/compute/servers.py:1185
+msgid "Unable to set password on instance"
+msgstr ""
+
+#: nova/api/openstack/compute/servers.py:1194
msgid "Unable to parse metadata key/value pairs."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1177
+#: nova/api/openstack/compute/servers.py:1207
msgid "Resize request has invalid 'flavorRef' attribute."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1180
+#: nova/api/openstack/compute/servers.py:1210
msgid "Resize requests require 'flavorRef' attribute."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1198
+#: nova/api/openstack/compute/servers.py:1228
#: nova/api/openstack/compute/contrib/aggregates.py:143
-#: nova/api/openstack/compute/contrib/coverage_ext.py:246
+#: nova/api/openstack/compute/contrib/coverage_ext.py:256
#: nova/api/openstack/compute/contrib/keypairs.py:78
msgid "Invalid request body"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1203
+#: nova/api/openstack/compute/servers.py:1234
msgid "Could not parse imageRef from request."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1265
+#: nova/api/openstack/compute/servers.py:1296
msgid "Cannot find image for rebuild"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1298
+#: nova/api/openstack/compute/servers.py:1329
msgid "createImage entity requires name attribute"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1382
+#: nova/api/openstack/compute/servers.py:1413
#, python-format
msgid "Removing options '%(unk_opt_str)s' from query"
msgstr ""
@@ -2398,7 +2555,7 @@ msgstr ""
#: nova/api/openstack/compute/contrib/admin_actions.py:154
#: nova/api/openstack/compute/contrib/admin_actions.py:170
#: nova/api/openstack/compute/contrib/admin_actions.py:186
-#: nova/api/openstack/compute/contrib/admin_actions.py:314
+#: nova/api/openstack/compute/contrib/admin_actions.py:312
#: nova/api/openstack/compute/contrib/multinic.py:41
#: nova/api/openstack/compute/contrib/rescue.py:44
msgid "Server not found"
@@ -2457,7 +2614,7 @@ msgstr ""
msgid "Desired state must be specified. Valid states are: %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:317
+#: nova/api/openstack/compute/contrib/admin_actions.py:315
#, python-format
msgid "Compute.api::resetState %s"
msgstr ""
@@ -2499,11 +2656,40 @@ msgstr ""
msgid "Cannot set metadata %(metadata)s in aggregate %(id)s"
msgstr ""
+#: nova/api/openstack/compute/contrib/baremetal_nodes.py:184
+msgid "Must specify id or address"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/cells.py:199
+msgid "Cell name cannot be empty"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/cells.py:203
+msgid "Cell name cannot contain '!' or '.'"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/cells.py:210
+msgid "Cell type must be 'parent' or 'child'"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/cells.py:230
+#: nova/api/openstack/compute/contrib/cells.py:250
+msgid "No cell information in request"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/cells.py:235
+msgid "No cell name in request"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/cells.py:272
+msgid "Only 'updated_since' and 'project_id' are understood."
+msgstr ""
+
#: nova/api/openstack/compute/contrib/certificates.py:73
msgid "Only root certificate can be retrieved."
msgstr ""
-#: nova/api/openstack/compute/contrib/cloudpipe.py:151
+#: nova/api/openstack/compute/contrib/cloudpipe.py:150
msgid ""
"Unable to claim IP for VPN instances, ensure it isn't running, and try "
"again in a few minutes"
@@ -2526,37 +2712,46 @@ msgstr ""
msgid "Unable to get console"
msgstr ""
-#: nova/api/openstack/compute/contrib/coverage_ext.py:101
+#: nova/api/openstack/compute/contrib/coverage_ext.py:105
#, python-format
msgid "Can't connect to service: %s, no portspecified\n"
msgstr ""
-#: nova/api/openstack/compute/contrib/coverage_ext.py:104
+#: nova/api/openstack/compute/contrib/coverage_ext.py:108
#, python-format
msgid "No backdoor API command for service: %s\n"
msgstr ""
-#: nova/api/openstack/compute/contrib/coverage_ext.py:123
+#: nova/api/openstack/compute/contrib/coverage_ext.py:127
msgid "Coverage begin"
msgstr ""
-#: nova/api/openstack/compute/contrib/coverage_ext.py:157
+#: nova/api/openstack/compute/contrib/coverage_ext.py:161
msgid "Coverage not running"
msgstr ""
-#: nova/api/openstack/compute/contrib/coverage_ext.py:186
+#: nova/api/openstack/compute/contrib/coverage_ext.py:190
msgid "Invalid path"
msgstr ""
-#: nova/api/openstack/compute/contrib/coverage_ext.py:190
+#: nova/api/openstack/compute/contrib/coverage_ext.py:194
msgid "No path given for report file"
msgstr ""
-#: nova/api/openstack/compute/contrib/coverage_ext.py:197
+#: nova/api/openstack/compute/contrib/coverage_ext.py:201
msgid "You can't use html reports without combining"
msgstr ""
-#: nova/api/openstack/compute/contrib/coverage_ext.py:244
+#: nova/api/openstack/compute/contrib/coverage_ext.py:211
+#, python-format
+msgid "Directory conflict: %s already exists"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/coverage_ext.py:246
+msgid "Python coverage module is not installed."
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/coverage_ext.py:254
#, python-format
msgid "Coverage doesn't have %s action"
msgstr ""
@@ -2566,6 +2761,23 @@ msgstr ""
msgid "%s must be either 'MANUAL' or 'AUTO'."
msgstr ""
+#: nova/api/openstack/compute/contrib/evacuate.py:43
+msgid "Instance evacuate is admin only functionality"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/evacuate.py:60
+msgid "admin password can't be changed on existing disk"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/evacuate.py:68
+msgid "host and onSharedStorage must be specified."
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/evacuate.py:79
+#, python-format
+msgid "Error in evacuate, %s"
+msgstr ""
+
#: nova/api/openstack/compute/contrib/fixed_ips.py:42
#, python-format
msgid "Fixed IP %s has been deleted"
@@ -2576,75 +2788,79 @@ msgstr ""
msgid "Fixed IP %s not found"
msgstr ""
-#: nova/api/openstack/compute/contrib/flavor_access.py:80
-#: nova/api/openstack/compute/contrib/flavor_access.py:104
+#: nova/api/openstack/compute/contrib/flavor_access.py:76
+#: nova/api/openstack/compute/contrib/flavor_access.py:100
msgid "Flavor not found."
msgstr ""
-#: nova/api/openstack/compute/contrib/flavor_access.py:109
+#: nova/api/openstack/compute/contrib/flavor_access.py:105
msgid "Access list not available for public flavors."
msgstr ""
-#: nova/api/openstack/compute/contrib/flavor_access.py:121
+#: nova/api/openstack/compute/contrib/flavor_access.py:117
msgid "No request body"
msgstr ""
-#: nova/api/openstack/compute/contrib/flavorextraspecs.py:49
+#: nova/api/openstack/compute/contrib/flavorextraspecs.py:58
msgid "No Request Body"
msgstr ""
+#: nova/api/openstack/compute/contrib/floating_ip_dns.py:232
+msgid "DNS entries not found."
+msgstr ""
+
#: nova/api/openstack/compute/contrib/floating_ips.py:97
msgid "Floating ip is not associated"
msgstr ""
-#: nova/api/openstack/compute/contrib/floating_ips.py:151
-#: nova/api/openstack/compute/contrib/floating_ips.py:199
+#: nova/api/openstack/compute/contrib/floating_ips.py:132
+#: nova/api/openstack/compute/contrib/floating_ips.py:180
#, python-format
msgid "Floating ip not found for id %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/floating_ips.py:184
+#: nova/api/openstack/compute/contrib/floating_ips.py:165
#, python-format
msgid "No more floating ips in pool %s."
msgstr ""
-#: nova/api/openstack/compute/contrib/floating_ips.py:186
+#: nova/api/openstack/compute/contrib/floating_ips.py:167
msgid "No more floating ips available."
msgstr ""
-#: nova/api/openstack/compute/contrib/floating_ips.py:234
-#: nova/api/openstack/compute/contrib/floating_ips.py:290
+#: nova/api/openstack/compute/contrib/floating_ips.py:215
+#: nova/api/openstack/compute/contrib/floating_ips.py:271
#: nova/api/openstack/compute/contrib/security_groups.py:414
msgid "Missing parameter dict"
msgstr ""
-#: nova/api/openstack/compute/contrib/floating_ips.py:237
-#: nova/api/openstack/compute/contrib/floating_ips.py:293
+#: nova/api/openstack/compute/contrib/floating_ips.py:218
+#: nova/api/openstack/compute/contrib/floating_ips.py:274
msgid "Address not specified"
msgstr ""
-#: nova/api/openstack/compute/contrib/floating_ips.py:244
+#: nova/api/openstack/compute/contrib/floating_ips.py:225
msgid "No nw_info cache associated with instance"
msgstr ""
-#: nova/api/openstack/compute/contrib/floating_ips.py:249
+#: nova/api/openstack/compute/contrib/floating_ips.py:230
msgid "No fixed ips associated to instance"
msgstr ""
-#: nova/api/openstack/compute/contrib/floating_ips.py:265
+#: nova/api/openstack/compute/contrib/floating_ips.py:246
msgid "floating ip is already associated"
msgstr ""
-#: nova/api/openstack/compute/contrib/floating_ips.py:268
+#: nova/api/openstack/compute/contrib/floating_ips.py:249
msgid "l3driver call to add floating ip failed"
msgstr ""
-#: nova/api/openstack/compute/contrib/floating_ips.py:272
-#: nova/api/openstack/compute/contrib/floating_ips.py:301
+#: nova/api/openstack/compute/contrib/floating_ips.py:253
+#: nova/api/openstack/compute/contrib/floating_ips.py:282
msgid "floating ip not found"
msgstr ""
-#: nova/api/openstack/compute/contrib/floating_ips.py:275
+#: nova/api/openstack/compute/contrib/floating_ips.py:256
msgid "Error. Unable to associate floating ip"
msgstr ""
@@ -2657,77 +2873,82 @@ msgstr ""
msgid "fping utility is not found."
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:122
+#: nova/api/openstack/compute/contrib/hosts.py:173
#, python-format
-msgid "Host '%s' could not be found."
+msgid "Invalid update setting: '%s'"
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:151
+#: nova/api/openstack/compute/contrib/hosts.py:176
#, python-format
msgid "Invalid status: '%s'"
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:155
+#: nova/api/openstack/compute/contrib/hosts.py:178
#, python-format
msgid "Invalid mode: '%s'"
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:159
-#, python-format
-msgid "Invalid update setting: '%s'"
+#: nova/api/openstack/compute/contrib/hosts.py:180
+msgid "'status' or 'maintenance_mode' needed for host update"
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:177
+#: nova/api/openstack/compute/contrib/hosts.py:195
#, python-format
-msgid "Putting host %(host)s in maintenance mode %(mode)s."
+msgid "Putting host %(host_name)s in maintenance mode %(mode)s."
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:182
+#: nova/api/openstack/compute/contrib/hosts.py:200
msgid "Virt driver does not implement host maintenance mode."
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:190
+#: nova/api/openstack/compute/contrib/hosts.py:213
#, python-format
-msgid "Setting host %(host)s to %(state)s."
+msgid "Enabling host %s."
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:195
+#: nova/api/openstack/compute/contrib/hosts.py:215
+#, python-format
+msgid "Disabling host %s."
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/hosts.py:220
msgid "Virt driver does not implement host disabled status."
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:207
+#: nova/api/openstack/compute/contrib/hosts.py:236
msgid "Virt driver does not implement host power management."
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:239
+#: nova/api/openstack/compute/contrib/hosts.py:322
msgid "Describe-resource is admin only functionality"
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:247
-msgid "Host not found"
-msgstr ""
-
-#: nova/api/openstack/compute/contrib/hypervisors.py:184
-#: nova/api/openstack/compute/contrib/hypervisors.py:195
+#: nova/api/openstack/compute/contrib/hypervisors.py:185
+#: nova/api/openstack/compute/contrib/hypervisors.py:196
#, python-format
msgid "Hypervisor with ID '%s' could not be found."
msgstr ""
-#: nova/api/openstack/compute/contrib/hypervisors.py:203
+#: nova/api/openstack/compute/contrib/hypervisors.py:204
msgid "Virt driver does not implement uptime function."
msgstr ""
-#: nova/api/openstack/compute/contrib/hypervisors.py:218
-#: nova/api/openstack/compute/contrib/hypervisors.py:232
+#: nova/api/openstack/compute/contrib/hypervisors.py:220
+#: nova/api/openstack/compute/contrib/hypervisors.py:230
#, python-format
msgid "No hypervisor matching '%s' could be found."
msgstr ""
-#: nova/api/openstack/compute/contrib/instance_usage_audit_log.py:55
+#: nova/api/openstack/compute/contrib/instance_usage_audit_log.py:57
#, python-format
msgid "Invalid timestamp for date %s"
msgstr ""
+#: nova/api/openstack/compute/contrib/keypairs.py:101
+#, python-format
+msgid "Key pair '%s' already exists."
+msgstr ""
+
#: nova/api/openstack/compute/contrib/multinic.py:52
msgid "Missing 'networkId' argument for addFixedIp"
msgstr ""
@@ -2741,65 +2962,97 @@ msgstr ""
msgid "Unable to find address %r"
msgstr ""
-#: nova/api/openstack/compute/contrib/networks.py:72
+#: nova/api/openstack/compute/contrib/networks_associate.py:23
#, python-format
-msgid "Disassociating network with id %s"
+msgid "Disassociating host with network with id %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/networks.py:77
-#: nova/api/openstack/compute/contrib/networks.py:87
-#: nova/api/openstack/compute/contrib/networks.py:97
#: nova/api/openstack/compute/contrib/networks_associate.py:27
#: nova/api/openstack/compute/contrib/networks_associate.py:38
#: nova/api/openstack/compute/contrib/networks_associate.py:50
+#: nova/api/openstack/compute/contrib/os_networks.py:78
+#: nova/api/openstack/compute/contrib/os_networks.py:88
+#: nova/api/openstack/compute/contrib/os_networks.py:98
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:110
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:132
msgid "Network not found"
msgstr ""
-#: nova/api/openstack/compute/contrib/networks.py:83
+#: nova/api/openstack/compute/contrib/networks_associate.py:34
+#, python-format
+msgid "Disassociating project with network with id %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/os_networks.py:73
+#, python-format
+msgid "Disassociating network with id %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/os_networks.py:84
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:106
#, python-format
msgid "Showing network with id %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/networks.py:93
+#: nova/api/openstack/compute/contrib/os_networks.py:94
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:124
#, python-format
msgid "Deleting network with id %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/networks.py:108
+#: nova/api/openstack/compute/contrib/os_networks.py:109
msgid "Missing network in body"
msgstr ""
-#: nova/api/openstack/compute/contrib/networks.py:112
+#: nova/api/openstack/compute/contrib/os_networks.py:113
msgid "Network label is required"
msgstr ""
-#: nova/api/openstack/compute/contrib/networks.py:116
+#: nova/api/openstack/compute/contrib/os_networks.py:117
msgid "Network cidr or cidr_v6 is required"
msgstr ""
-#: nova/api/openstack/compute/contrib/networks.py:118
+#: nova/api/openstack/compute/contrib/os_networks.py:119
#, python-format
msgid "Creating network with label %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/networks.py:134
+#: nova/api/openstack/compute/contrib/os_networks.py:135
#, python-format
msgid "Associating network %(network)s with project %(project)s"
msgstr ""
-#: nova/api/openstack/compute/contrib/networks.py:142
+#: nova/api/openstack/compute/contrib/os_networks.py:143
#, python-format
msgid "Cannot associate network %(network)s with project %(project)s: %(message)s"
msgstr ""
-#: nova/api/openstack/compute/contrib/networks_associate.py:23
-#, python-format
-msgid "Disassociating host with network with id %s"
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:121
+msgid "Failed to update usages deallocating network."
msgstr ""
-#: nova/api/openstack/compute/contrib/networks_associate.py:34
-#, python-format
-msgid "Disassociating project with network with id %s"
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:151
+msgid "No CIDR requested"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:157
+msgid "Requested network does not contain enough (2+) usable hosts"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:161
+msgid "CIDR is malformed."
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:164
+msgid "Address could not be converted."
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:172
+msgid "Quota exceeded, too many networks."
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:183
+msgid "Create networks failed"
msgstr ""
#: nova/api/openstack/compute/contrib/quotas.py:63
@@ -2839,43 +3092,43 @@ msgstr ""
msgid "stop instance"
msgstr ""
-#: nova/api/openstack/compute/contrib/volumes.py:73
+#: nova/api/openstack/compute/contrib/volumes.py:76
#, python-format
msgid "vol=%s"
msgstr ""
-#: nova/api/openstack/compute/contrib/volumes.py:185
+#: nova/api/openstack/compute/contrib/volumes.py:188
#, python-format
msgid "Delete volume with id: %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/volumes.py:350
-#: nova/api/openstack/compute/contrib/volumes.py:438
+#: nova/api/openstack/compute/contrib/volumes.py:356
+#: nova/api/openstack/compute/contrib/volumes.py:446
#, python-format
msgid "Instance %s is not attached."
msgstr ""
-#: nova/api/openstack/compute/contrib/volumes.py:371
+#: nova/api/openstack/compute/contrib/volumes.py:377
#, python-format
msgid "Bad volumeId format: volumeId is not in proper format (%s)"
msgstr ""
-#: nova/api/openstack/compute/contrib/volumes.py:389
+#: nova/api/openstack/compute/contrib/volumes.py:396
#, python-format
msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s"
msgstr ""
-#: nova/api/openstack/compute/contrib/volumes.py:552
+#: nova/api/openstack/compute/contrib/volumes.py:560
#, python-format
msgid "Delete snapshot with id: %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/volumes.py:595
+#: nova/api/openstack/compute/contrib/volumes.py:603
#, python-format
msgid "Create snapshot from volume %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/volumes.py:599
+#: nova/api/openstack/compute/contrib/volumes.py:607
#, python-format
msgid "Invalid value '%s' for force. "
msgstr ""
@@ -2884,87 +3137,94 @@ msgstr ""
msgid "Instance has had its instance_type removed from the DB"
msgstr ""
-#: nova/cells/messaging.py:198
+#: nova/cells/messaging.py:201
#, python-format
msgid "Error processing message locally: %(exc)s"
msgstr ""
-#: nova/cells/messaging.py:352 nova/cells/messaging.py:358
+#: nova/cells/messaging.py:355 nova/cells/messaging.py:361
#, python-format
msgid "destination is %(target_cell)s but routing_path is %(routing_path)s"
msgstr ""
-#: nova/cells/messaging.py:368
+#: nova/cells/messaging.py:371
#, python-format
msgid "Unknown %(cell_type)s when routing to %(target_cell)s"
msgstr ""
-#: nova/cells/messaging.py:392
+#: nova/cells/messaging.py:395
#, python-format
msgid "Error locating next hop for message: %(exc)s"
msgstr ""
-#: nova/cells/messaging.py:419
+#: nova/cells/messaging.py:422
#, python-format
msgid "Failed to send message to cell: %(next_hop)s: %(exc)s"
msgstr ""
-#: nova/cells/messaging.py:498
+#: nova/cells/messaging.py:501
#, python-format
msgid "Error locating next hops for message: %(exc)s"
msgstr ""
-#: nova/cells/messaging.py:518
+#: nova/cells/messaging.py:521
#, python-format
msgid "Error sending message to next hops: %(exc)s"
msgstr ""
-#: nova/cells/messaging.py:536
+#: nova/cells/messaging.py:539
#, python-format
msgid "Error waiting for responses from neighbor cells: %(exc)s"
msgstr ""
-#: nova/cells/messaging.py:628
+#: nova/cells/messaging.py:647
#, python-format
msgid "Unknown method '%(method)s' in compute API"
msgstr ""
-#: nova/cells/messaging.py:651
+#: nova/cells/messaging.py:670
#, python-format
msgid "Received capabilities from child cell %(cell_name)s: %(capabilities)s"
msgstr ""
-#: nova/cells/messaging.py:660
+#: nova/cells/messaging.py:679
#, python-format
msgid "Received capacities from child cell %(cell_name)s: %(capacities)s"
msgstr ""
-#: nova/cells/messaging.py:719
+#: nova/cells/messaging.py:759
#, python-format
msgid "Got update for instance %(instance_uuid)s: %(instance)s"
msgstr ""
-#: nova/cells/messaging.py:742
+#: nova/cells/messaging.py:782
#, python-format
msgid "Got update to delete instance %(instance_uuid)s"
msgstr ""
-#: nova/cells/messaging.py:757
+#: nova/cells/messaging.py:797
#, python-format
msgid "Got broadcast to %(delete_type)s delete instance"
msgstr ""
-#: nova/cells/messaging.py:771
+#: nova/cells/messaging.py:811
#, python-format
msgid "Got message to create instance fault: %(instance_fault)s"
msgstr ""
-#: nova/cells/messaging.py:921
+#: nova/cells/messaging.py:832
+#, python-format
+msgid ""
+"Forcing a sync of instances, project_id=%(projid_str)s, "
+"updated_since=%(since_str)s"
+msgstr ""
+
+#: nova/cells/messaging.py:1009
#, python-format
msgid "Updating parents with our capabilities: %(capabs)s"
msgstr ""
-#: nova/cells/messaging.py:941
+#: nova/cells/messaging.py:1029
#, python-format
msgid "Updating parents with our capacities: %(capacities)s"
msgstr ""
@@ -2986,243 +3246,255 @@ msgstr ""
msgid "Error scheduling instances %(instance_uuids)s"
msgstr ""
-#: nova/cells/state.py:264
+#: nova/cells/state.py:263
msgid "Updating cell cache from db."
msgstr ""
-#: nova/cells/state.py:300
+#: nova/cells/state.py:308
#, python-format
msgid "Unknown cell '%(cell_name)s' when trying to update capabilities"
msgstr ""
-#: nova/cells/state.py:315
+#: nova/cells/state.py:323
#, python-format
msgid "Unknown cell '%(cell_name)s' when trying to update capacities"
msgstr ""
-#: nova/cloudpipe/pipelib.py:43
+#: nova/cloudpipe/pipelib.py:47
msgid "Instance type for vpn instances"
msgstr ""
-#: nova/cloudpipe/pipelib.py:46
+#: nova/cloudpipe/pipelib.py:50
msgid "Template for cloudpipe instance boot script"
msgstr ""
-#: nova/cloudpipe/pipelib.py:49
+#: nova/cloudpipe/pipelib.py:53
msgid "Network to push into openvpn config"
msgstr ""
-#: nova/cloudpipe/pipelib.py:52
+#: nova/cloudpipe/pipelib.py:56
msgid "Netmask to push into openvpn config"
msgstr ""
-#: nova/cloudpipe/pipelib.py:109
+#: nova/cloudpipe/pipelib.py:125
#, python-format
msgid "Launching VPN for %s"
msgstr ""
-#: nova/common/sqlalchemyutils.py:68
-msgid "Id not in sort_keys; is sort_keys unique?"
-msgstr ""
-
-#: nova/common/sqlalchemyutils.py:116
-msgid "Unknown sort direction, must be 'desc' or 'asc'"
-msgstr ""
-
-#: nova/compute/api.py:257
+#: nova/compute/api.py:264
msgid "Cannot run any more instances of this type."
msgstr ""
-#: nova/compute/api.py:264
+#: nova/compute/api.py:271
#, python-format
msgid "Can only run %s more instances of this type."
msgstr ""
-#: nova/compute/api.py:273
+#: nova/compute/api.py:280
#, python-format
msgid ""
"%(overs)s quota exceeded for %(pid)s, tried to run %(min_count)s "
"instances. %(msg)s"
msgstr ""
-#: nova/compute/api.py:293
+#: nova/compute/api.py:300
#, python-format
msgid ""
"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata "
"properties"
msgstr ""
-#: nova/compute/api.py:303
+#: nova/compute/api.py:310
msgid "Metadata property key blank"
msgstr ""
-#: nova/compute/api.py:307
+#: nova/compute/api.py:314
msgid "Metadata property key greater than 255 characters"
msgstr ""
-#: nova/compute/api.py:311
+#: nova/compute/api.py:318
msgid "Metadata property value greater than 255 characters"
msgstr ""
-#: nova/compute/api.py:543
+#: nova/compute/api.py:443
+msgid "Cannot attach one or more volumes to multiple instances"
+msgstr ""
+
+#: nova/compute/api.py:556
#, python-format
msgid "Going to run %s instances..."
msgstr ""
-#: nova/compute/api.py:615
+#: nova/compute/api.py:667
#, python-format
msgid "bdm %s"
msgstr ""
-#: nova/compute/api.py:642
+#: nova/compute/api.py:694
#, python-format
msgid "block_device_mapping %s"
msgstr ""
-#: nova/compute/api.py:878
+#: nova/compute/api.py:956
msgid "instance termination disabled"
msgstr ""
-#: nova/compute/api.py:973
-msgid "host for instance is down, deleting from database"
+#: nova/compute/api.py:1090
+#, python-format
+msgid "instance type %(old_inst_type_id)d not found"
+msgstr ""
+
+#: nova/compute/api.py:1096
+msgid "going to delete a resizing instance"
+msgstr ""
+
+#: nova/compute/api.py:1106
+#, python-format
+msgid "instance's host %s is down, deleting from database"
msgstr ""
-#: nova/compute/api.py:1017
+#: nova/compute/api.py:1150
msgid "Going to try to soft delete instance"
msgstr ""
-#: nova/compute/api.py:1039
+#: nova/compute/api.py:1172
msgid "Going to try to terminate instance"
msgstr ""
-#: nova/compute/api.py:1086
+#: nova/compute/api.py:1221
msgid "Going to try to stop instance"
msgstr ""
-#: nova/compute/api.py:1100
+#: nova/compute/api.py:1237
msgid "Going to try to start instance"
msgstr ""
-#: nova/compute/api.py:1167
+#: nova/compute/api.py:1307
#, python-format
msgid "Searching by: %s"
msgstr ""
-#: nova/compute/api.py:1299
-#, python-format
-msgid "Image type not recognized %s"
-msgstr ""
-
-#: nova/compute/api.py:1408
+#: nova/compute/api.py:1551
#, python-format
msgid "snapshot for %s"
msgstr ""
-#: nova/compute/api.py:1741
+#: nova/compute/api.py:1907
msgid "flavor_id is None. Assuming migration."
msgstr ""
-#: nova/compute/api.py:1750
+#: nova/compute/api.py:1916
#, python-format
msgid ""
"Old instance type %(current_instance_type_name)s, new instance type "
"%(new_instance_type_name)s"
msgstr ""
-#: nova/compute/api.py:1792
+#: nova/compute/api.py:1958
#, python-format
msgid "%(overs)s quota exceeded for %(pid)s, tried to resize instance."
msgstr ""
-#: nova/compute/api.py:1976
+#: nova/compute/api.py:2194
msgid "Locking"
msgstr ""
-#: nova/compute/api.py:1984
+#: nova/compute/api.py:2202
msgid "Unlocking"
msgstr ""
-#: nova/compute/api.py:2052
+#: nova/compute/api.py:2270
msgid "Volume must be attached in order to detach."
msgstr ""
-#: nova/compute/api.py:2137
+#: nova/compute/api.py:2361
#, python-format
msgid "Going to try to live migrate instance to %s"
msgstr ""
-#: nova/compute/api.py:2294
+#: nova/compute/api.py:2380
+msgid "vm evacuation scheduled"
+msgstr ""
+
+#: nova/compute/api.py:2384
+#, python-format
+msgid ""
+"Instance compute service state on %(host)s expected to be down, but it "
+"was up."
+msgstr ""
+
+#: nova/compute/api.py:2601
msgid "Keypair name contains unsafe characters"
msgstr ""
-#: nova/compute/api.py:2298
+#: nova/compute/api.py:2605
msgid "Keypair name must be between 1 and 255 characters long"
msgstr ""
-#: nova/compute/api.py:2399
+#: nova/compute/api.py:2706
#, python-format
msgid "Security group %s is not a string or unicode"
msgstr ""
-#: nova/compute/api.py:2402
+#: nova/compute/api.py:2709
#, python-format
msgid "Security group %s cannot be empty."
msgstr ""
-#: nova/compute/api.py:2410
+#: nova/compute/api.py:2717
#, python-format
msgid ""
"Value (%(value)s) for parameter Group%(property)s is invalid. Content "
"limited to '%(allowed)'."
msgstr ""
-#: nova/compute/api.py:2416
+#: nova/compute/api.py:2723
#, python-format
msgid "Security group %s should not be greater than 255 characters."
msgstr ""
-#: nova/compute/api.py:2436
+#: nova/compute/api.py:2743
msgid "Quota exceeded, too many security groups."
msgstr ""
-#: nova/compute/api.py:2439
+#: nova/compute/api.py:2746
#, python-format
msgid "Create Security Group %s"
msgstr ""
-#: nova/compute/api.py:2446
+#: nova/compute/api.py:2753
#, python-format
msgid "Security group %s already exists"
msgstr ""
-#: nova/compute/api.py:2511
+#: nova/compute/api.py:2818
msgid "Security group is still in use"
msgstr ""
-#: nova/compute/api.py:2519
+#: nova/compute/api.py:2826
msgid "Failed to update usages deallocating security group"
msgstr ""
-#: nova/compute/api.py:2522
+#: nova/compute/api.py:2829
#, python-format
msgid "Delete security group %s"
msgstr ""
-#: nova/compute/api.py:2779
+#: nova/compute/api.py:3087
#, python-format
msgid "Rule (%s) not found"
msgstr ""
-#: nova/compute/api.py:2788
+#: nova/compute/api.py:3096
msgid "Quota exceeded, too many security group rules."
msgstr ""
-#: nova/compute/api.py:2791
+#: nova/compute/api.py:3099
#, python-format
msgid "Authorize security group ingress %s"
msgstr ""
-#: nova/compute/api.py:2802
+#: nova/compute/api.py:3110
#, python-format
msgid "Revoke security group ingress %s"
msgstr ""
@@ -3281,677 +3553,744 @@ msgid ""
"%(requested)d %(unit)s"
msgstr ""
-#: nova/compute/instance_types.py:65
+#: nova/compute/instance_types.py:92
msgid "names can only contain [a-zA-Z0-9_.- ]"
msgstr ""
-#: nova/compute/instance_types.py:74
+#: nova/compute/instance_types.py:101
#, python-format
msgid "'%s' argument must be a positive integer"
msgstr ""
-#: nova/compute/instance_types.py:82
+#: nova/compute/instance_types.py:109
msgid "'rxtx_factor' argument must be a positive float"
msgstr ""
-#: nova/compute/instance_types.py:90
+#: nova/compute/instance_types.py:117
#, python-format
msgid "'%s' argument must be greater than 0"
msgstr ""
-#: nova/compute/instance_types.py:100
+#: nova/compute/instance_types.py:127
msgid "is_public must be a boolean"
msgstr ""
-#: nova/compute/instance_types.py:107
+#: nova/compute/instance_types.py:134
#, python-format
msgid "DB error: %s"
msgstr ""
-#: nova/compute/instance_types.py:117
+#: nova/compute/instance_types.py:144
#, python-format
msgid "Instance type %s not found for deletion"
msgstr ""
-#: nova/compute/manager.py:199
+#: nova/compute/manager.py:192
msgid "Possibly task preempted."
msgstr ""
-#: nova/compute/manager.py:329
+#: nova/compute/manager.py:344
#, python-format
msgid "%(nodename)s is not a valid node managed by this compute host."
msgstr ""
-#: nova/compute/manager.py:358
+#: nova/compute/manager.py:373
msgid "Instance has been destroyed from under us while trying to set it to ERROR"
msgstr ""
-#: nova/compute/manager.py:377
+#: nova/compute/manager.py:392
+#, python-format
+msgid "Instance %(uuid)s found in the hypervisor, but not in the database"
+msgstr ""
+
+#: nova/compute/manager.py:410
+#, python-format
+msgid ""
+"Instance %(driver_instance)s found in the hypervisor, but not in the "
+"database"
+msgstr ""
+
+#: nova/compute/manager.py:431
+#, python-format
+msgid ""
+"Deleting instance as its host (%(instance_host)s) is not equal to our "
+"host (%(our_host)s)."
+msgstr ""
+
+#: nova/compute/manager.py:458
#, python-format
msgid "Current state is %(drv_state)s, state in DB is %(db_state)s."
msgstr ""
-#: nova/compute/manager.py:389
+#: nova/compute/manager.py:470
msgid "Rebooting instance after nova-compute restart."
msgstr ""
-#: nova/compute/manager.py:403
+#: nova/compute/manager.py:484
msgid "Hypervisor driver does not support resume guests"
msgstr ""
-#: nova/compute/manager.py:413
+#: nova/compute/manager.py:489
+msgid "Failed to resume instance"
+msgstr ""
+
+#: nova/compute/manager.py:499
msgid "Hypervisor driver does not support firewall rules"
msgstr ""
-#: nova/compute/manager.py:447
+#: nova/compute/manager.py:533
msgid "Checking state"
msgstr ""
-#: nova/compute/manager.py:523
+#: nova/compute/manager.py:608
#, python-format
msgid "Setting up bdm %s"
msgstr ""
-#: nova/compute/manager.py:595 nova/compute/manager.py:1834
+#: nova/compute/manager.py:682 nova/compute/manager.py:1997
#, python-format
msgid "No node specified, defaulting to %(node)s"
msgstr ""
-#: nova/compute/manager.py:628
+#: nova/compute/manager.py:725
msgid "Failed to dealloc network for deleted instance"
msgstr ""
-#: nova/compute/manager.py:651
+#: nova/compute/manager.py:750
#, python-format
msgid "Error: %s"
msgstr ""
-#: nova/compute/manager.py:684 nova/compute/manager.py:1885
+#: nova/compute/manager.py:785 nova/compute/manager.py:2051
msgid "Error trying to reschedule"
msgstr ""
-#: nova/compute/manager.py:702
+#: nova/compute/manager.py:803
msgid "Retry info not present, will not reschedule"
msgstr ""
-#: nova/compute/manager.py:707
+#: nova/compute/manager.py:808
msgid "No request spec, will not reschedule"
msgstr ""
-#: nova/compute/manager.py:713
+#: nova/compute/manager.py:814
#, python-format
msgid "Re-scheduling %(method)s: attempt %(num)d"
msgstr ""
-#: nova/compute/manager.py:741
+#: nova/compute/manager.py:842
msgid "Instance build timed out. Set to error state."
msgstr ""
-#: nova/compute/manager.py:771
-msgid "Instance has already been created"
-msgstr ""
-
-#: nova/compute/manager.py:817
+#: nova/compute/manager.py:918
#, python-format
msgid ""
"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, "
"allowed_size_bytes=%(allowed_size_bytes)d"
msgstr ""
-#: nova/compute/manager.py:823
+#: nova/compute/manager.py:924
#, python-format
msgid ""
"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed "
"size %(allowed_size_bytes)d"
msgstr ""
-#: nova/compute/manager.py:833
+#: nova/compute/manager.py:934
msgid "Starting instance..."
msgstr ""
-#: nova/compute/manager.py:854
+#: nova/compute/manager.py:956
msgid "Instance failed network setup"
msgstr ""
-#: nova/compute/manager.py:858
+#: nova/compute/manager.py:960
#, python-format
msgid "Instance network_info: |%s|"
msgstr ""
-#: nova/compute/manager.py:871
+#: nova/compute/manager.py:970
msgid "Instance failed block device setup"
msgstr ""
-#: nova/compute/manager.py:889
+#: nova/compute/manager.py:987
msgid "Instance failed to spawn"
msgstr ""
-#: nova/compute/manager.py:913
+#: nova/compute/manager.py:1011
msgid "Deallocating network for instance"
msgstr ""
-#: nova/compute/manager.py:985
+#: nova/compute/manager.py:1087
#, python-format
msgid "%(action_str)s instance"
msgstr ""
-#: nova/compute/manager.py:1016
+#: nova/compute/manager.py:1118
#, python-format
msgid "Ignoring DiskNotFound: %s"
msgstr ""
-#: nova/compute/manager.py:1019
+#: nova/compute/manager.py:1121
#, python-format
msgid "Ignoring VolumeNotFound: %s"
msgstr ""
-#: nova/compute/manager.py:1026
+#: nova/compute/manager.py:1128
#, python-format
msgid "terminating bdm %s"
msgstr ""
-#: nova/compute/manager.py:1051
+#: nova/compute/manager.py:1153
#, python-format
msgid "Ignoring volume cleanup failure due to %s"
msgstr ""
-#: nova/compute/manager.py:1090 nova/compute/manager.py:2053
-#: nova/compute/manager.py:3388
+#: nova/compute/manager.py:1192 nova/compute/manager.py:2230
+#: nova/compute/manager.py:3612
#, python-format
msgid "%s. Setting instance vm_state to ERROR"
msgstr ""
-#: nova/compute/manager.py:1224
+#: nova/compute/manager.py:1332
msgid "Rebuilding instance"
msgstr ""
-#: nova/compute/manager.py:1311
+#: nova/compute/manager.py:1345
+msgid "Invalid state of instance files on shared storage"
+msgstr ""
+
+#: nova/compute/manager.py:1349
+msgid "disk on shared storage, recreating using existing disk"
+msgstr ""
+
+#: nova/compute/manager.py:1353
+#, python-format
+msgid "disk not on shared storagerebuilding from: '%s'"
+msgstr ""
+
+#: nova/compute/manager.py:1437
+#, python-format
+msgid "bringing vm to original state: '%s'"
+msgstr ""
+
+#: nova/compute/manager.py:1461
msgid "Rebooting instance"
msgstr ""
-#: nova/compute/manager.py:1335
+#: nova/compute/manager.py:1480
#, python-format
msgid ""
"trying to reboot a non-running instance: (state: %(state)s expected: "
"%(running)s)"
msgstr ""
-#: nova/compute/manager.py:1344
+#: nova/compute/manager.py:1490
#, python-format
msgid "Cannot reboot instance: %(exc)s"
msgstr ""
-#: nova/compute/manager.py:1381
+#: nova/compute/manager.py:1526
msgid "instance snapshotting"
msgstr ""
-#: nova/compute/manager.py:1387
+#: nova/compute/manager.py:1532
#, python-format
msgid ""
"trying to snapshot a non-running instance: (state: %(state)s expected: "
"%(running)s)"
msgstr ""
-#: nova/compute/manager.py:1440
+#: nova/compute/manager.py:1593
#, python-format
msgid "Found %(num_images)d images (rotation: %(rotation)d)"
msgstr ""
-#: nova/compute/manager.py:1447
+#: nova/compute/manager.py:1600
#, python-format
msgid "Rotating out %d backups"
msgstr ""
-#: nova/compute/manager.py:1452
+#: nova/compute/manager.py:1605
#, python-format
msgid "Deleting image %s"
msgstr ""
-#: nova/compute/manager.py:1483
+#: nova/compute/manager.py:1633
#, python-format
msgid "Failed to set admin password. Instance %s is not running"
msgstr ""
-#: nova/compute/manager.py:1490
+#: nova/compute/manager.py:1640
msgid "Root password set"
msgstr ""
-#: nova/compute/manager.py:1500
-msgid "set_admin_password is not implemented by this driver."
+#: nova/compute/manager.py:1647
+msgid "set_admin_password is not implemented by this driver or guest instance."
msgstr ""
-#: nova/compute/manager.py:1516
+#: nova/compute/manager.py:1662
#, python-format
msgid "set_admin_password failed: %s"
msgstr ""
-#: nova/compute/manager.py:1524
+#: nova/compute/manager.py:1669
msgid "error setting admin password"
msgstr ""
-#: nova/compute/manager.py:1539
+#: nova/compute/manager.py:1682
#, python-format
msgid ""
"trying to inject a file into a non-running (state: "
"%(current_power_state)s expected: %(expected_state)s)"
msgstr ""
-#: nova/compute/manager.py:1543
+#: nova/compute/manager.py:1686
#, python-format
msgid "injecting file to %(path)s"
msgstr ""
-#: nova/compute/manager.py:1564
+#: nova/compute/manager.py:1706
msgid ""
"Unable to find a different image to use for rescue VM, using instance's "
"current image"
msgstr ""
-#: nova/compute/manager.py:1577
+#: nova/compute/manager.py:1720
msgid "Rescuing"
msgstr ""
-#: nova/compute/manager.py:1611
+#: nova/compute/manager.py:1755
msgid "Unrescuing"
msgstr ""
-#: nova/compute/manager.py:1632
+#: nova/compute/manager.py:1776
#, python-format
msgid "Changing instance metadata according to %(diff)r"
msgstr ""
-#: nova/compute/manager.py:1801
+#: nova/compute/manager.py:1955
msgid "Instance has no source host"
msgstr ""
-#: nova/compute/manager.py:1807
+#: nova/compute/manager.py:1961
msgid "destination same as source!"
msgstr ""
-#: nova/compute/manager.py:1816
+#: nova/compute/manager.py:1978
msgid "Migrating"
msgstr ""
-#: nova/compute/manager.py:2050
+#: nova/compute/manager.py:2227
#, python-format
msgid "Failed to rollback quota for failed finish_resize: %(qr_error)s"
msgstr ""
-#: nova/compute/manager.py:2106
+#: nova/compute/manager.py:2282
msgid "Pausing"
msgstr ""
-#: nova/compute/manager.py:2123
+#: nova/compute/manager.py:2300
msgid "Unpausing"
msgstr ""
-#: nova/compute/manager.py:2161
+#: nova/compute/manager.py:2338
msgid "Retrieving diagnostics"
msgstr ""
-#: nova/compute/manager.py:2191
+#: nova/compute/manager.py:2369
msgid "Resuming"
msgstr ""
-#: nova/compute/manager.py:2213
+#: nova/compute/manager.py:2389
msgid "Reset network"
msgstr ""
-#: nova/compute/manager.py:2218
+#: nova/compute/manager.py:2394
msgid "Inject network info"
msgstr ""
-#: nova/compute/manager.py:2221
+#: nova/compute/manager.py:2397
#, python-format
msgid "network_info to inject: |%s|"
msgstr ""
-#: nova/compute/manager.py:2238
+#: nova/compute/manager.py:2414
msgid "Get console output"
msgstr ""
-#: nova/compute/manager.py:2263
+#: nova/compute/manager.py:2439
msgid "Getting vnc console"
msgstr ""
-#: nova/compute/manager.py:2291
+#: nova/compute/manager.py:2467
+msgid "Getting spice console"
+msgstr ""
+
+#: nova/compute/manager.py:2497
#, python-format
msgid "Booting with volume %(volume_id)s at %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2336
+#: nova/compute/manager.py:2548
#, python-format
msgid "Attaching volume %(volume_id)s to %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2345
+#: nova/compute/manager.py:2557
#, python-format
msgid ""
"Failed to connect to volume %(volume_id)s while attaching at "
"%(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2360
+#: nova/compute/manager.py:2572
#, python-format
msgid "Failed to attach volume %(volume_id)s at %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:2390
+#: nova/compute/manager.py:2602
#, python-format
msgid "Detach volume %(volume_id)s from mountpoint %(mp)s"
msgstr ""
-#: nova/compute/manager.py:2394
+#: nova/compute/manager.py:2612
msgid "Detaching volume from unknown instance"
msgstr ""
-#: nova/compute/manager.py:2407
+#: nova/compute/manager.py:2619
#, python-format
-msgid "Faild to detach volume %(volume_id)s from %(mp)s"
+msgid "Failed to detach volume %(volume_id)s from %(mp)s"
msgstr ""
-#: nova/compute/manager.py:2431
+#: nova/compute/manager.py:2643
msgid "Updating volume usage cache with totals"
msgstr ""
-#: nova/compute/manager.py:2468
+#: nova/compute/manager.py:2680
#, python-format
msgid "Host %(host)s not found"
msgstr ""
-#: nova/compute/manager.py:2541
+#: nova/compute/manager.py:2753
msgid "Instance has no volume."
msgstr ""
-#: nova/compute/manager.py:2602
+#: nova/compute/manager.py:2814
#, python-format
msgid "Pre live migration failed at %(dest)s"
msgstr ""
-#: nova/compute/manager.py:2630
+#: nova/compute/manager.py:2842
msgid "_post_live_migration() is started.."
msgstr ""
-#: nova/compute/manager.py:2683
+#: nova/compute/manager.py:2895
#, python-format
msgid "Migrating instance to %(dest)s finished successfully."
msgstr ""
-#: nova/compute/manager.py:2685
+#: nova/compute/manager.py:2897
msgid ""
"You may see the error \"libvirt: QEMU error: Domain not found: no domain "
"with matching name.\" This error can be safely ignored."
msgstr ""
-#: nova/compute/manager.py:2699
+#: nova/compute/manager.py:2911
msgid "Post operation of migration started"
msgstr ""
-#: nova/compute/manager.py:2842
+#: nova/compute/manager.py:3049
msgid "Updated the info_cache for instance"
msgstr ""
-#: nova/compute/manager.py:2887
+#: nova/compute/manager.py:3094
#, python-format
msgid ""
"Found %(migration_count)d unconfirmed migrations older than "
"%(confirm_window)d seconds"
msgstr ""
-#: nova/compute/manager.py:2893
+#: nova/compute/manager.py:3100
#, python-format
msgid "Setting migration %(migration_id)s to error: %(reason)s"
msgstr ""
-#: nova/compute/manager.py:2902
+#: nova/compute/manager.py:3109
#, python-format
msgid ""
"Automatically confirming migration %(migration_id)s for instance "
"%(instance_uuid)s"
msgstr ""
-#: nova/compute/manager.py:2909
+#: nova/compute/manager.py:3116
#, python-format
msgid "Instance %(instance_uuid)s not found"
msgstr ""
-#: nova/compute/manager.py:2913
+#: nova/compute/manager.py:3120
msgid "In ERROR state"
msgstr ""
-#: nova/compute/manager.py:2920
+#: nova/compute/manager.py:3127
#, python-format
-msgid "In states %(vm_state)s/%(task_state)s, notRESIZED/None"
+msgid "In states %(vm_state)s/%(task_state)s, not RESIZED/None"
msgstr ""
-#: nova/compute/manager.py:2928
+#: nova/compute/manager.py:3135
#, python-format
msgid "Error auto-confirming resize: %(e)s. Will retry later."
msgstr ""
-#: nova/compute/manager.py:2943
+#: nova/compute/manager.py:3152
#, python-format
msgid ""
"Running instance usage audit for host %(host)s from %(begin_time)s to "
"%(end_time)s. %(number_instances)s instances."
msgstr ""
-#: nova/compute/manager.py:2961
+#: nova/compute/manager.py:3171
#, python-format
msgid "Failed to generate usage audit for instance on host %s"
msgstr ""
-#: nova/compute/manager.py:2984
+#: nova/compute/manager.py:3195
msgid "Updating bandwidth usage cache"
msgstr ""
-#: nova/compute/manager.py:3102
+#: nova/compute/manager.py:3313
msgid "Updating volume usage cache"
msgstr ""
-#: nova/compute/manager.py:3120
+#: nova/compute/manager.py:3331
msgid "Updating host status"
msgstr ""
-#: nova/compute/manager.py:3149
+#: nova/compute/manager.py:3360
#, python-format
msgid ""
"Found %(num_db_instances)s in the database and %(num_vm_instances)s on "
"the hypervisor."
msgstr ""
-#: nova/compute/manager.py:3155 nova/compute/manager.py:3193
+#: nova/compute/manager.py:3366 nova/compute/manager.py:3404
msgid "During sync_power_state the instance has a pending task. Skip."
msgstr ""
-#: nova/compute/manager.py:3180
+#: nova/compute/manager.py:3391
#, python-format
msgid ""
"During the sync_power process the instance has moved from host %(src)s to"
" host %(dst)s"
msgstr ""
-#: nova/compute/manager.py:3216
+#: nova/compute/manager.py:3427
msgid "Instance shutdown by itself. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:3228 nova/compute/manager.py:3239
-#: nova/compute/manager.py:3253
+#: nova/compute/manager.py:3439 nova/compute/manager.py:3448
+#: nova/compute/manager.py:3478
msgid "error during stop() in sync_power_state."
msgstr ""
-#: nova/compute/manager.py:3233
-msgid "Instance is paused or suspended unexpectedly. Calling the stop API."
+#: nova/compute/manager.py:3443
+msgid "Instance is suspended unexpectedly. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:3246
+#: nova/compute/manager.py:3459
+msgid "Instance is paused unexpectedly. Ignore."
+msgstr ""
+
+#: nova/compute/manager.py:3465
+msgid "Instance is unexpectedly not found. Ignore."
+msgstr ""
+
+#: nova/compute/manager.py:3471
msgid "Instance is not stopped. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:3262
+#: nova/compute/manager.py:3487
msgid "Instance is not (soft-)deleted."
msgstr ""
-#: nova/compute/manager.py:3270
+#: nova/compute/manager.py:3495
msgid "CONF.reclaim_instance_interval <= 0, skipping..."
msgstr ""
-#: nova/compute/manager.py:3285
+#: nova/compute/manager.py:3510
msgid "Reclaiming deleted instance"
msgstr ""
-#: nova/compute/manager.py:3341
+#: nova/compute/manager.py:3565
#, python-format
msgid ""
"Detected instance with name label '%(name)s' which is marked as DELETED "
"but still present on host."
msgstr ""
-#: nova/compute/manager.py:3348
+#: nova/compute/manager.py:3572
#, python-format
msgid ""
"Destroying instance with name label '%(name)s' which is marked as DELETED"
" but still present on host."
msgstr ""
-#: nova/compute/manager.py:3355
+#: nova/compute/manager.py:3579
#, python-format
msgid "Unrecognized value '%(action)s' for CONF.running_deleted_instance_action"
msgstr ""
-#: nova/compute/resource_tracker.py:92
+#: nova/compute/resource_tracker.py:91
msgid ""
"Host field should not be set on the instance until resources have been "
"claimed."
msgstr ""
-#: nova/compute/resource_tracker.py:97
+#: nova/compute/resource_tracker.py:96
msgid ""
"Node field should be not be set on the instance until resources have been"
" claimed."
msgstr ""
-#: nova/compute/resource_tracker.py:244
+#: nova/compute/resource_tracker.py:236
msgid "Auditing locally available compute resources"
msgstr ""
-#: nova/compute/resource_tracker.py:248
+#: nova/compute/resource_tracker.py:240
msgid ""
"Virt driver does not support 'get_available_resource' Compute tracking "
"is disabled."
msgstr ""
-#: nova/compute/resource_tracker.py:299
+#: nova/compute/resource_tracker.py:292
#, python-format
-msgid "Compute_service record created for %s "
+msgid "Compute_service record created for %(host)s:%(node)s"
msgstr ""
-#: nova/compute/resource_tracker.py:304
+#: nova/compute/resource_tracker.py:298
#, python-format
-msgid "Compute_service record updated for %s "
+msgid "Compute_service record updated for %(host)s:%(node)s"
msgstr ""
-#: nova/compute/resource_tracker.py:317
+#: nova/compute/resource_tracker.py:312
#, python-format
msgid "No service record for host %s"
msgstr ""
-#: nova/compute/resource_tracker.py:327
+#: nova/compute/resource_tracker.py:322
#, python-format
msgid "Hypervisor: free ram (MB): %s"
msgstr ""
-#: nova/compute/resource_tracker.py:328
+#: nova/compute/resource_tracker.py:323
#, python-format
msgid "Hypervisor: free disk (GB): %s"
msgstr ""
-#: nova/compute/resource_tracker.py:333
+#: nova/compute/resource_tracker.py:328
#, python-format
msgid "Hypervisor: free VCPUs: %s"
msgstr ""
-#: nova/compute/resource_tracker.py:335
+#: nova/compute/resource_tracker.py:330
msgid "Hypervisor: VCPU information unavailable"
msgstr ""
-#: nova/compute/resource_tracker.py:342
+#: nova/compute/resource_tracker.py:337
#, python-format
msgid "Free ram (MB): %s"
msgstr ""
-#: nova/compute/resource_tracker.py:343
+#: nova/compute/resource_tracker.py:338
#, python-format
msgid "Free disk (GB): %s"
msgstr ""
-#: nova/compute/resource_tracker.py:348
+#: nova/compute/resource_tracker.py:343
#, python-format
msgid "Free VCPUS: %s"
msgstr ""
-#: nova/compute/resource_tracker.py:350
+#: nova/compute/resource_tracker.py:345
msgid "Free VCPU information unavailable"
msgstr ""
-#: nova/compute/resource_tracker.py:388
+#: nova/compute/resource_tracker.py:383
#, python-format
msgid "Updating from migration %s"
msgstr ""
-#: nova/compute/resource_tracker.py:446
+#: nova/compute/resource_tracker.py:439
msgid "Instance not resizing, skipping migration."
msgstr ""
-#: nova/compute/resource_tracker.py:538
+#: nova/compute/resource_tracker.py:453
+msgid "InstanceType could not be found, skipping migration."
+msgstr ""
+
+#: nova/compute/resource_tracker.py:537
#, python-format
msgid ""
"Detected running orphan instance: %(uuid)s (consuming %(memory_mb)s MB "
"memory"
msgstr ""
-#: nova/compute/resource_tracker.py:551
+#: nova/compute/resource_tracker.py:550
#, python-format
msgid "Missing keys: %s"
msgstr ""
-#: nova/compute/rpcapi.py:44
+#: nova/compute/rpcapi.py:50
msgid "No compute host specified"
msgstr ""
-#: nova/compute/rpcapi.py:47
+#: nova/compute/rpcapi.py:53
#, python-format
msgid "Unable to find host for Instance %s"
msgstr ""
-#: nova/compute/utils.py:101
+#: nova/compute/utils.py:139
#, python-format
msgid "Using %(prefix)s instead of %(req_prefix)s"
msgstr ""
-#: nova/conductor/manager.py:62
+#: nova/conductor/api.py:338
+msgid ""
+"Timed out waiting for nova-conductor. Is it running? Or did this service "
+"start before nova-conductor?"
+msgstr ""
+
+#: nova/conductor/manager.py:67
#, python-format
msgid "Instance update attempted for '%(key)s' on %(instance_uuid)s"
msgstr ""
-#: nova/conductor/manager.py:198
+#: nova/conductor/manager.py:227
msgid "Invalid block_device_mapping_destroy invocation"
msgstr ""
-#: nova/console/manager.py:80 nova/console/vmrc_manager.py:62
+#: nova/console/manager.py:79 nova/console/vmrc_manager.py:62
msgid "Adding console"
msgstr ""
-#: nova/console/manager.py:101 nova/console/vmrc_manager.py:114
+#: nova/console/manager.py:99 nova/console/vmrc_manager.py:112
#, python-format
msgid "Tried to remove non-existent console %(console_id)s."
msgstr ""
-#: nova/console/vmrc_manager.py:117
+#: nova/console/vmrc_manager.py:115
#, python-format
msgid "Removing console %(console_id)s."
msgstr ""
+#: nova/console/websocketproxy.py:54
+msgid "Invalid Token"
+msgstr ""
+
+#: nova/console/websocketproxy.py:73
+msgid "Invalid Connection Info"
+msgstr ""
+
#: nova/console/xvp.py:99
msgid "Rebuilding xvp conf"
msgstr ""
@@ -3990,81 +4329,67 @@ msgstr ""
msgid "Failed to run xvp."
msgstr ""
-#: nova/consoleauth/manager.py:70
+#: nova/consoleauth/manager.py:64
#, python-format
msgid "Received Token: %(token)s, %(token_dict)s)"
msgstr ""
-#: nova/consoleauth/manager.py:75
+#: nova/consoleauth/manager.py:69
#, python-format
msgid "Checking Token: %(token)s, %(token_valid)s)"
msgstr ""
-#: nova/db/api.py:580
+#: nova/db/api.py:582
msgid "Failed to notify cells of instance destroy"
msgstr ""
-#: nova/db/api.py:689 nova/db/api.py:710
+#: nova/db/api.py:680 nova/db/api.py:701
msgid "Failed to notify cells of instance update"
msgstr ""
-#: nova/db/api.py:749
+#: nova/db/api.py:741
msgid "Failed to notify cells of instance info cache update"
msgstr ""
-#: nova/db/api.py:1458
+#: nova/db/api.py:1452
msgid "Failed to notify cells of bw_usage update"
msgstr ""
-#: nova/db/api.py:1602
+#: nova/db/api.py:1606
msgid "Failed to notify cells of instance fault"
msgstr ""
-#: nova/db/sqlalchemy/api.py:182 nova/virt/baremetal/db/sqlalchemy/api.py:61
+#: nova/db/sqlalchemy/api.py:194
+msgid "model or base_model parameter should be subclass of NovaBase"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:207 nova/virt/baremetal/db/sqlalchemy/api.py:60
#, python-format
msgid "Unrecognized read_deleted value '%s'"
msgstr ""
-#: nova/db/sqlalchemy/api.py:1374
+#: nova/db/sqlalchemy/api.py:1389
#, python-format
msgid ""
"Unknown osapi_compute_unique_server_name_scope value: %s Flag must be "
"empty, \"global\" or \"project\""
msgstr ""
-#: nova/db/sqlalchemy/api.py:2736
+#: nova/db/sqlalchemy/api.py:2738
#, python-format
msgid "Change will make usage less than 0 for the following resources: %(unders)s"
msgstr ""
-#: nova/db/sqlalchemy/migration.py:71
+#: nova/db/sqlalchemy/migration.py:73
#: nova/virt/baremetal/db/sqlalchemy/migration.py:69
msgid "version should be an integer"
msgstr ""
-#: nova/db/sqlalchemy/migration.py:98
-#: nova/virt/baremetal/db/sqlalchemy/migration.py:96
+#: nova/db/sqlalchemy/migration.py:100
+#: nova/virt/baremetal/db/sqlalchemy/migration.py:97
msgid "Upgrade DB using Essex release first."
msgstr ""
-#: nova/db/sqlalchemy/session.py:384
-msgid "DB exception wrapped."
-msgstr ""
-
-#: nova/db/sqlalchemy/session.py:434
-#, python-format
-msgid "Got mysql server has gone away: %s"
-msgstr ""
-
-#: nova/db/sqlalchemy/session.py:475
-msgid "Using mysql/eventlet db_pool."
-msgstr ""
-
-#: nova/db/sqlalchemy/session.py:519
-#, python-format
-msgid "SQL connection failed. %s attempts left."
-msgstr ""
-
#: nova/db/sqlalchemy/migrate_repo/versions/133_folsom.py:62
msgid "Exception while seeding instance_types table"
msgstr ""
@@ -4081,49 +4406,54 @@ msgstr ""
msgid "volume_usage_cache table not dropped"
msgstr ""
-#: nova/image/glance.py:159
+#: nova/image/glance.py:187
#, python-format
msgid ""
"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', "
"%(extra)s."
msgstr ""
-#: nova/image/s3.py:312
+#: nova/image/glance.py:492
+#, python-format
+msgid "fetching image %s from glance"
+msgstr ""
+
+#: nova/image/s3.py:318
#, python-format
msgid "Failed to download %(image_location)s to %(image_path)s"
msgstr ""
-#: nova/image/s3.py:329
+#: nova/image/s3.py:335
#, python-format
msgid "Failed to decrypt %(image_location)s to %(image_path)s"
msgstr ""
-#: nova/image/s3.py:339
+#: nova/image/s3.py:345
#, python-format
msgid "Failed to untar %(image_location)s to %(image_path)s"
msgstr ""
-#: nova/image/s3.py:349
+#: nova/image/s3.py:355
#, python-format
msgid "Failed to upload %(image_location)s to %(image_path)s"
msgstr ""
-#: nova/image/s3.py:373
+#: nova/image/s3.py:379
#, python-format
msgid "Failed to decrypt private key: %s"
msgstr ""
-#: nova/image/s3.py:380
+#: nova/image/s3.py:386
#, python-format
msgid "Failed to decrypt initialization vector: %s"
msgstr ""
-#: nova/image/s3.py:391
+#: nova/image/s3.py:397
#, python-format
msgid "Failed to decrypt image file %(image_file)s: %(err)s"
msgstr ""
-#: nova/image/s3.py:403
+#: nova/image/s3.py:409
msgid "Unsafe filenames in image"
msgstr ""
@@ -4142,25 +4472,15 @@ msgstr ""
msgid "Bad project_id for to_global_ipv6: %s"
msgstr ""
-#: nova/network/api.py:50
+#: nova/network/api.py:53 nova/network/api_deprecated.py:56
msgid "instance is a required argument to use @refresh_cache"
msgstr ""
-#: nova/network/api.py:76
+#: nova/network/api.py:78 nova/network/api_deprecated.py:79
msgid "Failed storing info cache"
msgstr ""
-#: nova/network/api.py:77
-#, python-format
-msgid "args: %s"
-msgstr ""
-
-#: nova/network/api.py:78
-#, python-format
-msgid "kwargs: %s"
-msgstr ""
-
-#: nova/network/api.py:171
+#: nova/network/api.py:226 nova/network/api_deprecated.py:216
#, python-format
msgid "re-assign floating IP %(address)s from instance %(instance_id)s"
msgstr ""
@@ -4174,245 +4494,245 @@ msgstr ""
msgid "Loading network driver '%s'"
msgstr ""
-#: nova/network/ldapdns.py:98
+#: nova/network/floating_ips.py:85
#, python-format
-msgid ""
-"Found multiple matches for domain %(domain)s.\n"
-"%(entry)s"
+msgid "Fixed ip %(fixed_ip_id)s not found"
msgstr ""
-#: nova/network/ldapdns.py:125
+#: nova/network/floating_ips.py:95 nova/network/floating_ips.py:368
#, python-format
-msgid "Unable to dequalify. %(name)s is not in %(domain)s.\n"
+msgid "Interface %(interface)s not found"
msgstr ""
-#: nova/network/ldapdns.py:320
-msgid "This driver only supports type 'a' entries."
+#: nova/network/floating_ips.py:118
+#, python-format
+msgid "floating IP allocation for instance |%(floating_address)s|"
msgstr ""
-#: nova/network/ldapdns.py:363 nova/network/minidns.py:169
-msgid "This shouldn't be getting called except during testing."
+#: nova/network/floating_ips.py:173
+msgid "Floating IP is not associated. Ignore."
msgstr ""
-#: nova/network/linux_net.py:190
+#: nova/network/floating_ips.py:191
#, python-format
-msgid "Attempted to remove chain %s which does not exist"
+msgid "Address |%(address)s| is not allocated"
msgstr ""
-#: nova/network/linux_net.py:225
+#: nova/network/floating_ips.py:195
#, python-format
-msgid "Unknown chain: %r"
+msgid "Address |%(address)s| is not allocated to your project |%(project)s|"
msgstr ""
-#: nova/network/linux_net.py:250
+#: nova/network/floating_ips.py:215
#, python-format
-msgid ""
-"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r "
-"%(top)r"
+msgid "Quota exceeded for %(pid)s, tried to allocate floating IP"
msgstr ""
-#: nova/network/linux_net.py:387
-msgid "IPTablesManager.apply completed with success"
+#: nova/network/floating_ips.py:276
+msgid "Failed to update usages deallocating floating IP"
msgstr ""
-#: nova/network/linux_net.py:593
+#: nova/network/floating_ips.py:525
#, python-format
-msgid "arping error for ip %s"
+msgid "Starting migration network for instance %(instance_uuid)s"
msgstr ""
-#: nova/network/linux_net.py:849
+#: nova/network/floating_ips.py:532
#, python-format
-msgid "Pid %d is stale, skip killing dnsmasq"
+msgid ""
+"Floating ip address |%(address)s| no longer belongs to instance "
+"%(instance_uuid)s. Will notmigrate it "
msgstr ""
-#: nova/network/linux_net.py:894
+#: nova/network/floating_ips.py:563
#, python-format
-msgid "Hupping dnsmasq threw %s"
+msgid "Finishing migration network for instance %(instance_uuid)s"
msgstr ""
-#: nova/network/linux_net.py:896
+#: nova/network/floating_ips.py:571
#, python-format
-msgid "Pid %d is stale, relaunching dnsmasq"
+msgid ""
+"Floating ip address |%(address)s| no longer belongs to instance "
+"%(instance_uuid)s. Will notsetup it."
msgstr ""
-#: nova/network/linux_net.py:961
+#: nova/network/floating_ips.py:619
#, python-format
-msgid "killing radvd threw %s"
+msgid ""
+"Database inconsistency: DNS domain |%s| is registered in the Nova db but "
+"not visible to either the floating or instance DNS driver. It will be "
+"ignored."
msgstr ""
-#: nova/network/linux_net.py:963
+#: nova/network/floating_ips.py:659
#, python-format
-msgid "Pid %d is stale, relaunching radvd"
+msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|."
msgstr ""
-#: nova/network/linux_net.py:1100
+#: nova/network/floating_ips.py:668
#, python-format
-msgid "Error clearing stale veth %s"
+msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|."
msgstr ""
-#: nova/network/linux_net.py:1223
+#: nova/network/ldapdns.py:99
#, python-format
-msgid "Starting VLAN inteface %s"
+msgid ""
+"Found multiple matches for domain %(domain)s.\n"
+"%(entry)s"
msgstr ""
-#: nova/network/linux_net.py:1254
+#: nova/network/ldapdns.py:126
#, python-format
-msgid "Failed unplugging VLAN interface '%s'"
+msgid "Unable to dequalify. %(name)s is not in %(domain)s.\n"
msgstr ""
-#: nova/network/linux_net.py:1257
-#, python-format
-msgid "Unplugged VLAN interface '%s'"
+#: nova/network/ldapdns.py:321
+msgid "This driver only supports type 'a' entries."
msgstr ""
-#: nova/network/linux_net.py:1279
-#, python-format
-msgid "Starting Bridge %s"
+#: nova/network/ldapdns.py:364 nova/network/minidns.py:171
+msgid "This shouldn't be getting called except during testing."
msgstr ""
-#: nova/network/linux_net.py:1291
+#: nova/network/linux_net.py:194
#, python-format
-msgid "Adding interface %(interface)s to bridge %(bridge)s"
+msgid "Attempted to remove chain %s which does not exist"
msgstr ""
-#: nova/network/linux_net.py:1324
+#: nova/network/linux_net.py:229
#, python-format
-msgid "Failed to add interface: %s"
+msgid "Unknown chain: %r"
msgstr ""
-#: nova/network/linux_net.py:1364
+#: nova/network/linux_net.py:254
#, python-format
-msgid "Failed unplugging bridge interface '%s'"
+msgid ""
+"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r "
+"%(top)r"
msgstr ""
-#: nova/network/linux_net.py:1367
-#, python-format
-msgid "Unplugged bridge interface '%s'"
+#: nova/network/linux_net.py:391
+msgid "IPTablesManager.apply completed with success"
msgstr ""
-#: nova/network/linux_net.py:1522
+#: nova/network/linux_net.py:619
#, python-format
-msgid "Starting bridge %s "
+msgid "arping error for ip %s"
msgstr ""
-#: nova/network/linux_net.py:1530
+#: nova/network/linux_net.py:896
#, python-format
-msgid "Done starting bridge %s"
+msgid "Pid %d is stale, skip killing dnsmasq"
msgstr ""
-#: nova/network/linux_net.py:1549
+#: nova/network/linux_net.py:941
#, python-format
-msgid "Failed unplugging gateway interface '%s'"
+msgid "Hupping dnsmasq threw %s"
msgstr ""
-#: nova/network/linux_net.py:1551
+#: nova/network/linux_net.py:943
#, python-format
-msgid "Unplugged gateway interface '%s'"
+msgid "Pid %d is stale, relaunching dnsmasq"
msgstr ""
-#: nova/network/manager.py:326
+#: nova/network/linux_net.py:1008
#, python-format
-msgid "Fixed ip %(fixed_ip_id)s not found"
+msgid "killing radvd threw %s"
msgstr ""
-#: nova/network/manager.py:335 nova/network/manager.py:606
+#: nova/network/linux_net.py:1010
#, python-format
-msgid "Interface %(interface)s not found"
+msgid "Pid %d is stale, relaunching radvd"
msgstr ""
-#: nova/network/manager.py:350
+#: nova/network/linux_net.py:1142
#, python-format
-msgid "floating IP allocation for instance |%s|"
+msgid "Error clearing stale veth %s"
msgstr ""
-#: nova/network/manager.py:414
-msgid "Floating IP is not associated. Ignore."
+#: nova/network/linux_net.py:1301
+#, python-format
+msgid "Starting VLAN inteface %s"
msgstr ""
-#: nova/network/manager.py:432
+#: nova/network/linux_net.py:1332
#, python-format
-msgid "Address |%(address)s| is not allocated"
+msgid "Failed unplugging VLAN interface '%s'"
msgstr ""
-#: nova/network/manager.py:436
+#: nova/network/linux_net.py:1335
#, python-format
-msgid "Address |%(address)s| is not allocated to your project |%(project)s|"
+msgid "Unplugged VLAN interface '%s'"
msgstr ""
-#: nova/network/manager.py:457
+#: nova/network/linux_net.py:1357
#, python-format
-msgid "Quota exceeded for %(pid)s, tried to allocate floating IP"
+msgid "Starting Bridge %s"
msgstr ""
-#: nova/network/manager.py:519
-msgid "Failed to update usages deallocating floating IP"
+#: nova/network/linux_net.py:1369
+#, python-format
+msgid "Adding interface %(interface)s to bridge %(bridge)s"
msgstr ""
-#: nova/network/manager.py:753
+#: nova/network/linux_net.py:1402
#, python-format
-msgid "Starting migration network for instance %(instance_uuid)s"
+msgid "Failed to add interface: %s"
msgstr ""
-#: nova/network/manager.py:760
+#: nova/network/linux_net.py:1438
#, python-format
-msgid ""
-"Floating ip address |%(address)s| no longer belongs to instance "
-"%(instance_uuid)s. Will notmigrate it "
+msgid "Failed unplugging bridge interface '%s'"
msgstr ""
-#: nova/network/manager.py:790
+#: nova/network/linux_net.py:1441
#, python-format
-msgid "Finishing migration network for instance %(instance_uuid)s"
+msgid "Unplugged bridge interface '%s'"
msgstr ""
-#: nova/network/manager.py:798
+#: nova/network/linux_net.py:1610
#, python-format
-msgid ""
-"Floating ip address |%(address)s| no longer belongs to instance "
-"%(instance_uuid)s. Will notsetup it."
+msgid "Starting bridge %s "
msgstr ""
-#: nova/network/manager.py:845
+#: nova/network/linux_net.py:1618
#, python-format
-msgid ""
-"Database inconsistency: DNS domain |%s| is registered in the Nova db but "
-"not visible to either the floating or instance DNS driver. It will be "
-"ignored."
+msgid "Done starting bridge %s"
msgstr ""
-#: nova/network/manager.py:891
+#: nova/network/linux_net.py:1637
#, python-format
-msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|."
+msgid "Failed unplugging gateway interface '%s'"
msgstr ""
-#: nova/network/manager.py:901
+#: nova/network/linux_net.py:1639
#, python-format
-msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|."
+msgid "Unplugged gateway interface '%s'"
msgstr ""
-#: nova/network/manager.py:1017
+#: nova/network/manager.py:354
#, python-format
msgid "Disassociated %s stale fixed ip(s)"
msgstr ""
-#: nova/network/manager.py:1021
+#: nova/network/manager.py:358
msgid "setting network host"
msgstr ""
-#: nova/network/manager.py:1148
+#: nova/network/manager.py:484
msgid "network allocations"
msgstr ""
-#: nova/network/manager.py:1153
+#: nova/network/manager.py:491
#, python-format
-msgid "networks retrieved for instance: |%(networks)s|"
+msgid "networks retrieved for instance: |%(networks_list)s|"
msgstr ""
-#: nova/network/manager.py:1189
+#: nova/network/manager.py:536
msgid "network deallocation for instance"
msgstr ""
-#: nova/network/manager.py:1419
+#: nova/network/manager.py:782
#, python-format
msgid ""
"instance-dns-zone is |%(domain)s|, which is in availability zone "
@@ -4420,201 +4740,211 @@ msgid ""
"created."
msgstr ""
-#: nova/network/manager.py:1507
+#: nova/network/manager.py:870
#, python-format
msgid "Unable to release %s because vif doesn't exist."
msgstr ""
-#: nova/network/manager.py:1526
+#: nova/network/manager.py:889
#, python-format
msgid "Leased IP |%(address)s|"
msgstr ""
-#: nova/network/manager.py:1530
+#: nova/network/manager.py:893
#, python-format
msgid "IP %s leased that is not associated"
msgstr ""
-#: nova/network/manager.py:1538
+#: nova/network/manager.py:901
#, python-format
msgid "IP |%s| leased that isn't allocated"
msgstr ""
-#: nova/network/manager.py:1543
+#: nova/network/manager.py:906
#, python-format
msgid "Released IP |%(address)s|"
msgstr ""
-#: nova/network/manager.py:1547
+#: nova/network/manager.py:910
#, python-format
msgid "IP %s released that is not associated"
msgstr ""
-#: nova/network/manager.py:1550
+#: nova/network/manager.py:913
#, python-format
msgid "IP %s released that was not leased"
msgstr ""
-#: nova/network/manager.py:1569
+#: nova/network/manager.py:932
#, python-format
msgid "%s must be an integer"
msgstr ""
-#: nova/network/manager.py:1593
+#: nova/network/manager.py:956
msgid "Maximum allowed length for 'label' is 255."
msgstr ""
-#: nova/network/manager.py:1613
+#: nova/network/manager.py:976
#, python-format
msgid ""
"Subnet(s) too large, defaulting to /%s. To override, specify "
"network_size flag."
msgstr ""
-#: nova/network/manager.py:1694
+#: nova/network/manager.py:1057
msgid "cidr already in use"
msgstr ""
-#: nova/network/manager.py:1697
+#: nova/network/manager.py:1060
#, python-format
msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)"
msgstr ""
-#: nova/network/manager.py:1708
+#: nova/network/manager.py:1071
#, python-format
msgid ""
"requested cidr (%(cidr)s) conflicts with existing smaller cidr "
"(%(smaller)s)"
msgstr ""
-#: nova/network/manager.py:1765
+#: nova/network/manager.py:1128
msgid "Network already exists!"
msgstr ""
-#: nova/network/manager.py:1785
+#: nova/network/manager.py:1147
#, python-format
msgid "Network must be disassociated from project %s before delete"
msgstr ""
-#: nova/network/manager.py:2308
+#: nova/network/manager.py:1686
msgid ""
"The sum between the number of networks and the vlan start cannot be "
"greater than 4094"
msgstr ""
-#: nova/network/manager.py:2315
+#: nova/network/manager.py:1693
#, python-format
msgid ""
-"The network range is not big enough to fit %(num_networks)s. Network size"
-" is %(network_size)s"
+"The network range is not big enough to fit %(num_networks)s networks. "
+"Network size is %(network_size)s"
msgstr ""
-#: nova/network/minidns.py:46
+#: nova/network/minidns.py:48
#, python-format
msgid "minidns file is |%s|"
msgstr ""
-#: nova/network/minidns.py:73 nova/network/minidns.py:104
+#: nova/network/minidns.py:75 nova/network/minidns.py:106
msgid "Invalid name"
msgstr ""
-#: nova/network/minidns.py:76
+#: nova/network/minidns.py:78
msgid "This driver only supports type 'a'"
msgstr ""
-#: nova/network/minidns.py:120
+#: nova/network/minidns.py:122
#, python-format
msgid "Cannot delete entry |%s|"
msgstr ""
-#: nova/network/minidns.py:206
+#: nova/network/minidns.py:208
#, python-format
msgid "Cannot delete domain |%s|"
msgstr ""
-#: nova/network/model.py:339
+#: nova/network/model.py:375
msgid "v4 subnets are required for legacy nw_info"
msgstr ""
-#: nova/network/quantumv2/__init__.py:40
+#: nova/network/quantumv2/__init__.py:42
msgid "_get_auth_token() failed"
msgstr ""
-#: nova/network/quantumv2/api.py:102
+#: nova/network/quantumv2/api.py:134
#, python-format
msgid "allocate_for_instance() for %s"
msgstr ""
-#: nova/network/quantumv2/api.py:105
+#: nova/network/quantumv2/api.py:137
#, python-format
msgid "empty project id for instance %s"
msgstr ""
-#: nova/network/quantumv2/api.py:151
+#: nova/network/quantumv2/api.py:198
msgid "Port not found"
msgstr ""
-#: nova/network/quantumv2/api.py:159
+#: nova/network/quantumv2/api.py:206
#, python-format
msgid "Fail to delete port %(portid)s with failure: %(exception)s"
msgstr ""
-#: nova/network/quantumv2/api.py:171
+#: nova/network/quantumv2/api.py:219
#, python-format
msgid "deallocate_for_instance() for %s"
msgstr ""
-#: nova/network/quantumv2/api.py:180
+#: nova/network/quantumv2/api.py:228
#, python-format
msgid "Failed to delete quantum port %(portid)s "
msgstr ""
-#: nova/network/quantumv2/api.py:190
+#: nova/network/quantumv2/api.py:241
#, python-format
msgid "get_instance_nw_info() for %s"
msgstr ""
-#: nova/network/quantumv2/api.py:205
+#: nova/network/quantumv2/api.py:272 nova/network/quantumv2/api.py:299
+#, python-format
+msgid "Unable to update port %(portid)s with failure: %(exception)s"
+msgstr ""
+
+#: nova/network/quantumv2/api.py:309
#, python-format
msgid "validate_networks() for %s"
msgstr ""
-#: nova/network/quantumv2/api.py:459
+#: nova/network/quantumv2/api.py:561
#, python-format
msgid "Multiple floating IP pools matches found for name '%s'"
msgstr ""
-#: nova/openstack/common/lockutils.py:97
+#: nova/openstack/common/lockutils.py:98
#, python-format
msgid "Could not release the acquired lock `%s`"
msgstr ""
-#: nova/openstack/common/lockutils.py:183
+#: nova/openstack/common/lockutils.py:184
#, python-format
msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..."
msgstr ""
-#: nova/openstack/common/lockutils.py:187
+#: nova/openstack/common/lockutils.py:195
#, python-format
msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..."
msgstr ""
-#: nova/openstack/common/lockutils.py:215
+#: nova/openstack/common/lockutils.py:223
#, python-format
msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..."
msgstr ""
-#: nova/openstack/common/log.py:168
+#: nova/openstack/common/lockutils.py:231
#, python-format
-msgid "Deprecated Config: %s"
+msgid "Released file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..."
msgstr ""
-#: nova/openstack/common/log.py:300
+#: nova/openstack/common/log.py:224
+#, python-format
+msgid "Deprecated: %s"
+msgstr ""
+
+#: nova/openstack/common/log.py:362
#, python-format
msgid "syslog facility must be one of: %s"
msgstr ""
-#: nova/openstack/common/log.py:458
+#: nova/openstack/common/log.py:522
#, python-format
msgid "Fatal call to deprecated config: %(msg)s"
msgstr ""
@@ -4634,6 +4964,32 @@ msgstr ""
msgid "Failed to understand rule %(rule)r"
msgstr ""
+#: nova/openstack/common/db/sqlalchemy/session.py:431
+msgid "DB exception wrapped."
+msgstr ""
+
+#: nova/openstack/common/db/sqlalchemy/session.py:481
+#, python-format
+msgid "Got mysql server has gone away: %s"
+msgstr ""
+
+#: nova/openstack/common/db/sqlalchemy/session.py:522
+msgid "Using mysql/eventlet db_pool."
+msgstr ""
+
+#: nova/openstack/common/db/sqlalchemy/session.py:578
+#, python-format
+msgid "SQL connection failed. %s attempts left."
+msgstr ""
+
+#: nova/openstack/common/db/sqlalchemy/utils.py:72
+msgid "Id not in sort_keys; is sort_keys unique?"
+msgstr ""
+
+#: nova/openstack/common/db/sqlalchemy/utils.py:120
+msgid "Unknown sort direction, must be 'desc' or 'asc'"
+msgstr ""
+
#: nova/openstack/common/notifier/api.py:125
#, python-format
msgid "%s not in valid priorities"
@@ -4646,7 +5002,7 @@ msgid ""
"Payload=%(payload)s"
msgstr ""
-#: nova/openstack/common/notifier/api.py:170
+#: nova/openstack/common/notifier/api.py:171
#, python-format
msgid "Failed to load notifier %s. These notifications will not be sent."
msgstr ""
@@ -4656,6 +5012,7 @@ msgid "The rabbit_notifier is now deprecated. Please use rpc_notifier instead."
msgstr ""
#: nova/openstack/common/notifier/rpc_notifier.py:45
+#: nova/openstack/common/notifier/rpc_notifier2.py:50
#, python-format
msgid "Could not send notification to %(topic)s. Payload=%(message)s"
msgstr ""
@@ -4665,325 +5022,356 @@ msgstr ""
msgid "Failed to load plugin %(plug)s: %(exc)s"
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:58
+#: nova/openstack/common/rpc/__init__.py:105
+#, python-format
+msgid ""
+"A RPC is being made while holding a lock. The locks currently held are "
+"%(locks)s. This is probably a bug. Please report it. Include the "
+"following: [%(stack)s]."
+msgstr ""
+
+#: nova/openstack/common/rpc/amqp.py:57
msgid "Pool creating new connection"
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:210
+#: nova/openstack/common/rpc/amqp.py:209
#, python-format
msgid "unpacked context: %s"
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:254
+#: nova/openstack/common/rpc/amqp.py:253
#, python-format
msgid "received %s"
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:260
+#: nova/openstack/common/rpc/amqp.py:259
#, python-format
msgid "no method for message: %s"
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:261
+#: nova/openstack/common/rpc/amqp.py:260
#, python-format
msgid "No method for message: %s"
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:287
-#: nova/openstack/common/rpc/impl_zmq.py:263
+#: nova/openstack/common/rpc/amqp.py:286
+#: nova/openstack/common/rpc/impl_zmq.py:277
#, python-format
msgid "Expected exception during message handling (%s)"
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:293
-#: nova/openstack/common/rpc/impl_zmq.py:269
+#: nova/openstack/common/rpc/amqp.py:292
+#: nova/openstack/common/rpc/impl_zmq.py:283
msgid "Exception during message handling"
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:359
+#: nova/openstack/common/rpc/amqp.py:362
#, python-format
-msgid "Making asynchronous call on %s ..."
+msgid "Making synchronous call on %s ..."
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:362
+#: nova/openstack/common/rpc/amqp.py:365
#, python-format
msgid "MSG_ID is %s"
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:384
+#: nova/openstack/common/rpc/amqp.py:387
#, python-format
msgid "Making asynchronous cast on %s..."
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:392
+#: nova/openstack/common/rpc/amqp.py:395
msgid "Making asynchronous fanout cast..."
msgstr ""
-#: nova/openstack/common/rpc/amqp.py:417
+#: nova/openstack/common/rpc/amqp.py:420
#, python-format
msgid "Sending %(event_type)s on %(topic)s"
msgstr ""
-#: nova/openstack/common/rpc/common.py:35
+#: nova/openstack/common/rpc/common.py:77
msgid "An unknown RPC related exception occurred."
msgstr ""
-#: nova/openstack/common/rpc/common.py:65
+#: nova/openstack/common/rpc/common.py:107
#, python-format
msgid ""
"Remote error: %(exc_type)s %(value)s\n"
"%(traceback)s."
msgstr ""
-#: nova/openstack/common/rpc/common.py:82
+#: nova/openstack/common/rpc/common.py:124
msgid "Timeout while waiting on RPC response."
msgstr ""
-#: nova/openstack/common/rpc/common.py:86
+#: nova/openstack/common/rpc/common.py:128
msgid "Invalid reuse of an RPC connection."
msgstr ""
-#: nova/openstack/common/rpc/common.py:90
+#: nova/openstack/common/rpc/common.py:132
#, python-format
msgid "Specified RPC version, %(version)s, not supported by this endpoint."
msgstr ""
-#: nova/openstack/common/rpc/common.py:208
+#: nova/openstack/common/rpc/common.py:137
+#, python-format
+msgid ""
+"Specified RPC envelope version, %(version)s, not supported by this "
+"endpoint."
+msgstr ""
+
+#: nova/openstack/common/rpc/common.py:239
+#, python-format
+msgid "Failed to sanitize %(item)s. Key error %(err)s"
+msgstr ""
+
+#: nova/openstack/common/rpc/common.py:261
#, python-format
msgid "Returning exception %s to caller"
msgstr ""
-#: nova/openstack/common/rpc/impl_kombu.py:168
-#: nova/openstack/common/rpc/impl_qpid.py:129
+#: nova/openstack/common/rpc/impl_kombu.py:170
+#: nova/openstack/common/rpc/impl_qpid.py:133
msgid "Failed to process message... skipping it."
msgstr ""
-#: nova/openstack/common/rpc/impl_kombu.py:469
+#: nova/openstack/common/rpc/impl_kombu.py:478
#, python-format
msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d"
msgstr ""
-#: nova/openstack/common/rpc/impl_kombu.py:491
+#: nova/openstack/common/rpc/impl_kombu.py:500
#, python-format
msgid "Connected to AMQP server on %(hostname)s:%(port)d"
msgstr ""
-#: nova/openstack/common/rpc/impl_kombu.py:528
+#: nova/openstack/common/rpc/impl_kombu.py:537
#, python-format
msgid ""
"Unable to connect to AMQP server on %(hostname)s:%(port)d after "
"%(max_retries)d tries: %(err_str)s"
msgstr ""
-#: nova/openstack/common/rpc/impl_kombu.py:544
+#: nova/openstack/common/rpc/impl_kombu.py:553
#, python-format
msgid ""
"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying "
"again in %(sleep_time)d seconds."
msgstr ""
-#: nova/openstack/common/rpc/impl_kombu.py:596
-#: nova/openstack/common/rpc/impl_qpid.py:386
+#: nova/openstack/common/rpc/impl_kombu.py:607
+#: nova/openstack/common/rpc/impl_qpid.py:403
#, python-format
msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s"
msgstr ""
-#: nova/openstack/common/rpc/impl_kombu.py:614
-#: nova/openstack/common/rpc/impl_qpid.py:401
+#: nova/openstack/common/rpc/impl_kombu.py:625
+#: nova/openstack/common/rpc/impl_qpid.py:418
#, python-format
msgid "Timed out waiting for RPC response: %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_kombu.py:618
-#: nova/openstack/common/rpc/impl_qpid.py:405
+#: nova/openstack/common/rpc/impl_kombu.py:629
+#: nova/openstack/common/rpc/impl_qpid.py:422
#, python-format
msgid "Failed to consume message from queue: %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_kombu.py:652
-#: nova/openstack/common/rpc/impl_qpid.py:435
+#: nova/openstack/common/rpc/impl_kombu.py:668
+#: nova/openstack/common/rpc/impl_qpid.py:457
#, python-format
msgid "Failed to publish message to topic '%(topic)s': %(err_str)s"
msgstr ""
-#: nova/openstack/common/rpc/impl_qpid.py:336
+#: nova/openstack/common/rpc/impl_qpid.py:351
#, python-format
msgid "Unable to connect to AMQP server: %(e)s. Sleeping %(delay)s seconds"
msgstr ""
-#: nova/openstack/common/rpc/impl_qpid.py:342
+#: nova/openstack/common/rpc/impl_qpid.py:357
#, python-format
msgid "Connected to AMQP server on %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_qpid.py:355
+#: nova/openstack/common/rpc/impl_qpid.py:370
msgid "Re-established AMQP queues"
msgstr ""
-#: nova/openstack/common/rpc/impl_qpid.py:413
+#: nova/openstack/common/rpc/impl_qpid.py:430
msgid "Error processing message. Skipping it."
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:89
+#: nova/openstack/common/rpc/impl_zmq.py:94
msgid "JSON serialization failed."
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:97
+#: nova/openstack/common/rpc/impl_zmq.py:102
#, python-format
msgid "Deserializing: %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:133
+#: nova/openstack/common/rpc/impl_zmq.py:138
#, python-format
msgid "Connecting to %(addr)s with %(type)s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:134
+#: nova/openstack/common/rpc/impl_zmq.py:139
#, python-format
msgid "-> Subscribed to %(subscribe)s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:135
+#: nova/openstack/common/rpc/impl_zmq.py:140
#, python-format
msgid "-> bind: %(bind)s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:143
+#: nova/openstack/common/rpc/impl_zmq.py:148
msgid "Could not open socket."
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:155
+#: nova/openstack/common/rpc/impl_zmq.py:160
#, python-format
msgid "Subscribing to %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:193
+#: nova/openstack/common/rpc/impl_zmq.py:202
msgid "You cannot recv on this socket."
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:198
+#: nova/openstack/common/rpc/impl_zmq.py:207
msgid "You cannot send on this socket."
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:251
+#: nova/openstack/common/rpc/impl_zmq.py:265
#, python-format
msgid "Running func with context: %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:283
+#: nova/openstack/common/rpc/impl_zmq.py:297
msgid "Sending reply"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:351
+#: nova/openstack/common/rpc/impl_zmq.py:331
+msgid "RPC message did not include method."
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:366
msgid "Registering reactor"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:363
+#: nova/openstack/common/rpc/impl_zmq.py:378
msgid "In reactor registered"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:378
+#: nova/openstack/common/rpc/impl_zmq.py:393
msgid "Out reactor registered"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:382
+#: nova/openstack/common/rpc/impl_zmq.py:397
msgid "Consuming socket"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:429
+#: nova/openstack/common/rpc/impl_zmq.py:438
#, python-format
msgid "CONSUMER GOT %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:439
+#: nova/openstack/common/rpc/impl_zmq.py:447
+#, python-format
+msgid "Creating proxy for topic: %s"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:471
#, python-format
-msgid "->response->%s"
+msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:480
+msgid "Topic socket file creation failed."
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:449
+#: nova/openstack/common/rpc/impl_zmq.py:485
#, python-format
-msgid "Created topic proxy: %s"
+msgid "ROUTER RELAY-OUT QUEUED %(data)s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:456
+#: nova/openstack/common/rpc/impl_zmq.py:488
#, python-format
-msgid "ROUTER RELAY-OUT START %(data)s"
+msgid "Local per-topic backlog buffer full for topic %(topic)s. Dropping message."
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:458
+#: nova/openstack/common/rpc/impl_zmq.py:506
#, python-format
-msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s"
+msgid "Could not create IPC directory %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:474
+#: nova/openstack/common/rpc/impl_zmq.py:516
+msgid "Could not create ZeroMQ receiver daemon. Socket may already be in use."
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:536
#, python-format
msgid "CONSUMER RECEIVED DATA: %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:476
+#: nova/openstack/common/rpc/impl_zmq.py:538
#, python-format
msgid "ROUTER RELAY-OUT %(data)s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:502
+#: nova/openstack/common/rpc/impl_zmq.py:564
#, python-format
msgid "Create Consumer for topic (%(topic)s)"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:518
+#: nova/openstack/common/rpc/impl_zmq.py:580
#, python-format
msgid "Consumer is a zmq.%s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:561
+#: nova/openstack/common/rpc/impl_zmq.py:625
msgid "Creating payload"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:574
+#: nova/openstack/common/rpc/impl_zmq.py:638
msgid "Creating queue socket for reply waiter"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:585
+#: nova/openstack/common/rpc/impl_zmq.py:649
msgid "Sending cast"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:588
+#: nova/openstack/common/rpc/impl_zmq.py:653
msgid "Cast sent; Waiting reply"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:591
+#: nova/openstack/common/rpc/impl_zmq.py:656
#, python-format
msgid "Received message: %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:592
+#: nova/openstack/common/rpc/impl_zmq.py:657
msgid "Unpacking response"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:619
+#: nova/openstack/common/rpc/impl_zmq.py:663
+msgid "RPC Message Invalid."
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:687
#, python-format
msgid "%(msg)s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:622
+#: nova/openstack/common/rpc/impl_zmq.py:690
#, python-format
msgid "Sending message(s) to: %s"
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:626
+#: nova/openstack/common/rpc/impl_zmq.py:694
msgid "No matchmaker results. Not casting."
msgstr ""
-#: nova/openstack/common/rpc/impl_zmq.py:716
-msgid ""
-"Matchmaker could not be loaded.\n"
-"rpc_zmq_matchmaker is not a class."
-msgstr ""
-
-#: nova/openstack/common/rpc/impl_zmq.py:718
-msgid "Error loading Matchmaker."
-msgstr ""
-
#: nova/openstack/common/rpc/matchmaker.py:45
msgid "Match not found by MatchMaker."
msgstr ""
@@ -5002,94 +5390,100 @@ msgstr ""
msgid "Could not find another compute"
msgstr ""
-#: nova/scheduler/driver.py:61
+#: nova/scheduler/driver.py:58
msgid "Exception during scheduler.run_instance"
msgstr ""
-#: nova/scheduler/driver.py:65 nova/scheduler/manager.py:186
+#: nova/scheduler/driver.py:60 nova/scheduler/manager.py:192
#, python-format
msgid "Setting instance to %(state)s state."
msgstr ""
-#: nova/scheduler/driver.py:142
+#: nova/scheduler/driver.py:139
msgid "Driver must implement schedule_prep_resize"
msgstr ""
-#: nova/scheduler/driver.py:150
+#: nova/scheduler/driver.py:147
msgid "Driver must implement schedule_run_instance"
msgstr ""
-#: nova/scheduler/driver.py:282
+#: nova/scheduler/driver.py:271
#, python-format
msgid ""
"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of "
"memory(host:%(avail)s <= instance:%(mem_inst)s)"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:52
+#: nova/scheduler/filter_scheduler.py:56
#, python-format
msgid "Attempting to build %(num_instances)d instance(s)"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:190
+#: nova/scheduler/filter_scheduler.py:192
msgid "Invalid value for 'scheduler_max_attempts', must be >= 1"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:207
+#: nova/scheduler/filter_scheduler.py:209
#, python-format
msgid "Error from last host: %(last_host)s (node %(last_node)s): %(exc)s"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:236
+#: nova/scheduler/filter_scheduler.py:238
#, python-format
msgid ""
"Exceeded max scheduling attempts %(max_attempts)d for instance "
"%(instance_uuid)s"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:290
+#: nova/scheduler/filter_scheduler.py:292
#, python-format
msgid "Filtered %(hosts)s"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:295
+#: nova/scheduler/filter_scheduler.py:297
#, python-format
msgid "Choosing host %(best_host)s"
msgstr ""
-#: nova/scheduler/host_manager.py:305
+#: nova/scheduler/filter_scheduler.py:327
+#, python-format
+msgid "Unable to migrate %(instance_uuid)s to %(dest)s: Lack of memory"
+msgstr ""
+
+#: nova/scheduler/host_manager.py:306
#, python-format
msgid "Host filter ignoring hosts: %(ignored_hosts_str)s"
msgstr ""
-#: nova/scheduler/host_manager.py:314
+#: nova/scheduler/host_manager.py:315
#, python-format
msgid ""
"No hosts matched due to not matching 'force_hosts'value of "
"'%(forced_hosts_str)s'"
msgstr ""
-#: nova/scheduler/host_manager.py:319
+#: nova/scheduler/host_manager.py:320
#, python-format
msgid "Host filter forcing available hosts to %(forced_hosts_str)s"
msgstr ""
-#: nova/scheduler/host_manager.py:348
+#: nova/scheduler/host_manager.py:352
#, python-format
msgid "Ignoring %(service_name)s service update from %(host)s"
msgstr ""
-#: nova/scheduler/host_manager.py:353
+#: nova/scheduler/host_manager.py:357
#, python-format
msgid "Received %(service_name)s service update from %(state_key)s."
msgstr ""
-#: nova/scheduler/host_manager.py:371
+#: nova/scheduler/host_manager.py:375
+#: nova/scheduler/filters/trusted_filter.py:220
#, python-format
msgid "No service for compute ID %s"
msgstr ""
-#: nova/scheduler/manager.py:170
+#: nova/scheduler/manager.py:178
#, python-format
msgid "Failed to schedule_%(method)s: %(ex)s"
msgstr ""
@@ -5190,79 +5584,70 @@ msgstr ""
msgid "Host %(host)s %(pass_msg)s. Previously tried hosts: %(hosts)s"
msgstr ""
-#: nova/scheduler/filters/trusted_filter.py:200
-#, python-format
-msgid "TCP: trust state of %(host)s:%(level)s(%(trust)s)"
-msgstr ""
-
#: nova/scheduler/weights/__init__.py:58
msgid "least_cost has been deprecated in favor of the RAM Weigher."
msgstr ""
-#: nova/servicegroup/api.py:48
+#: nova/servicegroup/api.py:59
#, python-format
msgid "ServiceGroup driver defined as an instance of %s"
msgstr ""
-#: nova/servicegroup/api.py:54
+#: nova/servicegroup/api.py:65
#, python-format
msgid "unknown ServiceGroup driver name: %s"
msgstr ""
-#: nova/servicegroup/api.py:70
+#: nova/servicegroup/api.py:82
#, python-format
msgid ""
"Join new ServiceGroup member %(member_id)s to the %(group_id)s group, "
"service = %(service)s"
msgstr ""
-#: nova/servicegroup/api.py:77
+#: nova/servicegroup/api.py:89
#, python-format
msgid "Check if the given member [%s] is part of the ServiceGroup, is up"
msgstr ""
-#: nova/servicegroup/api.py:86
+#: nova/servicegroup/api.py:98
#, python-format
msgid ""
"Explicitly remove the given member %(member_id)s from the%(group_id)s "
"group monitoring"
msgstr ""
-#: nova/servicegroup/api.py:93
+#: nova/servicegroup/api.py:105
#, python-format
msgid "Returns ALL members of the [%s] ServiceGroup"
msgstr ""
-#: nova/servicegroup/api.py:101
+#: nova/servicegroup/api.py:113
#, python-format
msgid "Returns one member of the [%s] group"
msgstr ""
-#: nova/servicegroup/db_driver.py:36
+#: nova/servicegroup/drivers/db.py:41
#, python-format
msgid ""
"DB_Driver: join new ServiceGroup member %(member_id)s to the %(group_id)s"
" group, service = %(service)s"
msgstr ""
-#: nova/servicegroup/db_driver.py:40
+#: nova/servicegroup/drivers/db.py:45
msgid "service is a mandatory argument for DB based ServiceGroup driver"
msgstr ""
-#: nova/servicegroup/db_driver.py:64
+#: nova/servicegroup/drivers/db.py:74
#, python-format
msgid "DB_Driver: get_all members of the %s group"
msgstr ""
-#: nova/servicegroup/db_driver.py:81
-msgid "The service database object disappeared, Recreating it."
-msgstr ""
-
-#: nova/servicegroup/db_driver.py:96
+#: nova/servicegroup/drivers/db.py:97
msgid "Recovered model server connection!"
msgstr ""
-#: nova/servicegroup/db_driver.py:102
+#: nova/servicegroup/drivers/db.py:103
msgid "model server went away"
msgstr ""
@@ -5290,34 +5675,38 @@ msgstr ""
msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'"
msgstr ""
-#: nova/tests/fake_volume.py:182 nova/volume/cinder.py:179
+#: nova/tests/fake_volume.py:180 nova/volume/cinder.py:201
msgid "status must be available"
msgstr ""
-#: nova/tests/fake_volume.py:186 nova/volume/cinder.py:182
+#: nova/tests/fake_volume.py:184 nova/volume/cinder.py:204
msgid "already attached"
msgstr ""
-#: nova/tests/fake_volume.py:191 nova/volume/cinder.py:188
+#: nova/tests/fake_volume.py:189 nova/volume/cinder.py:210
msgid "already detached"
msgstr ""
-#: nova/tests/fakelibvirt.py:852
+#: nova/tests/fakelibvirt.py:861
msgid "Please extend mock libvirt module to support flags"
msgstr ""
-#: nova/tests/fakelibvirt.py:856
+#: nova/tests/fakelibvirt.py:865
msgid "Expected a list for 'auth' parameter"
msgstr ""
-#: nova/tests/fakelibvirt.py:860
+#: nova/tests/fakelibvirt.py:869
msgid "Expected a function in 'auth[0]' parameter"
msgstr ""
-#: nova/tests/fakelibvirt.py:864
+#: nova/tests/fakelibvirt.py:873
msgid "Expected a function in 'auth[1]' parameter"
msgstr ""
+#: nova/tests/test_hypervapi.py:406
+msgid "fake vswitch not found"
+msgstr ""
+
#: nova/tests/test_misc.py:59
#, python-format
msgid ""
@@ -5361,12 +5750,12 @@ msgstr ""
msgid "uuid"
msgstr ""
-#: nova/tests/test_xenapi.py:770
+#: nova/tests/test_xenapi.py:802
#, python-format
msgid "Creating files in %s to simulate guest agent"
msgstr ""
-#: nova/tests/test_xenapi.py:781
+#: nova/tests/test_xenapi.py:813
#, python-format
msgid "Removing simulated guest agent files in %s"
msgstr ""
@@ -5383,17 +5772,17 @@ msgstr ""
msgid "unexpected role header"
msgstr ""
-#: nova/tests/api/openstack/compute/test_servers.py:3097
+#: nova/tests/api/openstack/compute/test_servers.py:3215
msgid ""
"Quota exceeded for instances: Requested 1, but already used 10 of 10 "
"instances"
msgstr ""
-#: nova/tests/api/openstack/compute/test_servers.py:3102
+#: nova/tests/api/openstack/compute/test_servers.py:3220
msgid "Quota exceeded for ram: Requested 4096, but already used 8192 of 10240 ram"
msgstr ""
-#: nova/tests/api/openstack/compute/test_servers.py:3107
+#: nova/tests/api/openstack/compute/test_servers.py:3225
msgid "Quota exceeded for cores: Requested 2, but already used 9 of 10 cores"
msgstr ""
@@ -5448,79 +5837,42 @@ msgstr ""
msgid "test_snapshot_detail: resp_dict=%s"
msgstr ""
-#: nova/tests/compute/test_compute.py:650
-#: nova/tests/compute/test_compute.py:668
-#: nova/tests/compute/test_compute.py:719
-#: nova/tests/compute/test_compute.py:746
-#: nova/tests/compute/test_compute.py:2604
+#: nova/tests/compute/test_compute.py:716
+#: nova/tests/compute/test_compute.py:734
+#: nova/tests/compute/test_compute.py:785
+#: nova/tests/compute/test_compute.py:812
+#: nova/tests/compute/test_compute.py:2766
#, python-format
msgid "Running instances: %s"
msgstr ""
-#: nova/tests/compute/test_compute.py:656
-#: nova/tests/compute/test_compute.py:691
-#: nova/tests/compute/test_compute.py:734
-#: nova/tests/compute/test_compute.py:764
+#: nova/tests/compute/test_compute.py:722
+#: nova/tests/compute/test_compute.py:757
+#: nova/tests/compute/test_compute.py:800
+#: nova/tests/compute/test_compute.py:830
#, python-format
msgid "After terminating instances: %s"
msgstr ""
-#: nova/tests/compute/test_compute.py:1182
+#: nova/tests/compute/test_compute.py:1263
msgid "Internal error"
msgstr ""
-#: nova/tests/compute/test_compute.py:2615
+#: nova/tests/compute/test_compute.py:2777
#, python-format
msgid "After force-killing instances: %s"
msgstr ""
-#: nova/tests/compute/test_compute.py:3085
+#: nova/tests/compute/test_compute.py:3275
msgid "wrong host/node"
msgstr ""
-#: nova/tests/hyperv/hypervutils.py:150 nova/virt/hyperv/vmops.py:552
-#, python-format
-msgid "Failed to change vm state of %(vm_name)s to %(req_state)s"
-msgstr ""
-
-#: nova/tests/hyperv/hypervutils.py:208 nova/virt/hyperv/vmops.py:487
-#, python-format
-msgid "Failed to destroy vm %s"
-msgstr ""
-
-#: nova/tests/hyperv/hypervutils.py:252 nova/virt/hyperv/snapshotops.py:92
-#, python-format
-msgid "Failed to get info for disk %s"
-msgstr ""
-
-#: nova/tests/hyperv/mockproxy.py:73
-#, python-format
-msgid "Couldn't find invocation num. %(c)d of attribute \"%(name)s\""
-msgstr ""
-
-#: nova/tests/hyperv/mockproxy.py:92 nova/tests/hyperv/mockproxy.py:120
-#, python-format
-msgid "Couldn't find attribute \"%s\""
-msgstr ""
-
-#: nova/tests/hyperv/mockproxy.py:97
-#, python-format
-msgid "Couldn't find attribute \"%(name)s\" with arguments \"%(params)s\""
-msgstr ""
-
-#: nova/tests/hyperv/mockproxy.py:100
-#, python-format
-msgid ""
-"Couldn't find invocation num. %(c)d of attribute \"%(name)s\" with "
-"arguments \"%(params)s\""
-msgstr ""
-
-#: nova/tests/integrated/test_api_samples.py:157
+#: nova/tests/integrated/test_api_samples.py:166
#, python-format
msgid "Result: %(result)s is not a dict."
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:161
+#: nova/tests/integrated/test_api_samples.py:170
#, python-format
msgid ""
"Key mismatch:\n"
@@ -5528,25 +5880,21 @@ msgid ""
"%(res_keys)s"
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:169
+#: nova/tests/integrated/test_api_samples.py:178
#, python-format
msgid "Result: %(result)s is not a list."
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:172
-#, python-format
-msgid ""
-"Length mismatch: %(result)s\n"
-"%(expected)s."
+#: nova/tests/integrated/test_api_samples.py:196
+msgid "Extra items in expected:"
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:183
-#, python-format
-msgid "Result: %(res_obj)s not in %(expected)s."
+#: nova/tests/integrated/test_api_samples.py:200
+msgid "Extra items in result:"
msgstr ""
-#: nova/tests/integrated/test_api_samples.py:201
-#: nova/tests/integrated/test_api_samples.py:214
+#: nova/tests/integrated/test_api_samples.py:219
+#: nova/tests/integrated/test_api_samples.py:232
#, python-format
msgid ""
"Values do not match:\n"
@@ -5608,31 +5956,31 @@ msgstr ""
msgid "Decoding JSON: %s"
msgstr ""
-#: nova/virt/configdrive.py:92
+#: nova/virt/configdrive.py:96
#, python-format
msgid "Added %(filepath)s to config drive"
msgstr ""
-#: nova/virt/driver.py:798
+#: nova/virt/driver.py:872
msgid "Compute driver option required, but not specified"
msgstr ""
-#: nova/virt/driver.py:801
+#: nova/virt/driver.py:875
#, python-format
msgid "Loading compute driver '%s'"
msgstr ""
-#: nova/virt/driver.py:808
+#: nova/virt/driver.py:882
#, python-format
msgid "Unable to load the virtualization driver: %s"
msgstr ""
-#: nova/virt/fake.py:195
+#: nova/virt/fake.py:205
#, python-format
msgid "Key '%(key)s' not in instances '%(inst)s'"
msgstr ""
-#: nova/virt/firewall.py:179 nova/virt/libvirt/firewall.py:251
+#: nova/virt/firewall.py:179 nova/virt/libvirt/firewall.py:267
msgid "Attempted to unfilter instance which is not filtered"
msgstr ""
@@ -5649,7 +5997,7 @@ msgstr ""
msgid "Adding security group rule: %r"
msgstr ""
-#: nova/virt/firewall.py:491 nova/virt/xenapi/firewall.py:75
+#: nova/virt/firewall.py:491 nova/virt/xenapi/firewall.py:74
#, python-format
msgid "Adding provider rule: %s"
msgstr ""
@@ -5658,167 +6006,217 @@ msgstr ""
msgid "Snapshot list encountered but no header found!"
msgstr ""
-#: nova/virt/images.py:212
+#: nova/virt/images.py:215
msgid "'qemu-img info' parsing failed."
msgstr ""
-#: nova/virt/images.py:218
+#: nova/virt/images.py:221
#, python-format
msgid "fmt=%(fmt)s backed by: %(backing_file)s"
msgstr ""
-#: nova/virt/images.py:229
+#: nova/virt/images.py:232
#, python-format
msgid "Converted to raw, but format is now %s"
msgstr ""
-#: nova/virt/baremetal/driver.py:90
+#: nova/virt/baremetal/driver.py:94
#, python-format
msgid "Request for baremetal node %s sent to wrong service host"
msgstr ""
-#: nova/virt/baremetal/driver.py:142
+#: nova/virt/baremetal/driver.py:146
msgid "cpu_arch is not found in instance_type_extra_specs"
msgstr ""
-#: nova/virt/baremetal/driver.py:182
-msgid "Baremetal node id not supplied to driver"
+#: nova/virt/baremetal/driver.py:185
+#, python-format
+msgid "Node %(id)r assigned to instance %(uuid)r which cannot be found."
msgstr ""
-#: nova/virt/baremetal/driver.py:241
+#: nova/virt/baremetal/driver.py:200
+#, python-format
+msgid "Baremetal node id not supplied to driver for %r"
+msgstr ""
+
+#: nova/virt/baremetal/driver.py:271
#, python-format
msgid "Failed to update state record for baremetal node %s"
msgstr ""
-#: nova/virt/baremetal/driver.py:260
+#: nova/virt/baremetal/driver.py:290
#, python-format
msgid "Delete called on non-existing instance %s"
msgstr ""
-#: nova/virt/baremetal/ipmi.py:83
+#: nova/virt/baremetal/ipmi.py:84
#, python-format
msgid "pid file %s does not contain any pid"
msgstr ""
-#: nova/virt/baremetal/ipmi.py:106
+#: nova/virt/baremetal/ipmi.py:107
msgid "Node id not supplied to IPMI"
msgstr ""
-#: nova/virt/baremetal/ipmi.py:108
+#: nova/virt/baremetal/ipmi.py:110
msgid "Address not supplied to IPMI"
msgstr ""
-#: nova/virt/baremetal/ipmi.py:110
+#: nova/virt/baremetal/ipmi.py:113
msgid "User not supplied to IPMI"
msgstr ""
-#: nova/virt/baremetal/ipmi.py:112
+#: nova/virt/baremetal/ipmi.py:116
msgid "Password not supplied to IPMI"
msgstr ""
-#: nova/virt/baremetal/ipmi.py:128
+#: nova/virt/baremetal/ipmi.py:133
#, python-format
-msgid "ipmitool stdout: '%(out)s', stderr: '%(err)%s'"
+msgid "ipmitool stdout: '%(out)s', stderr: '%(err)s'"
msgstr ""
-#: nova/virt/baremetal/ipmi.py:154
+#: nova/virt/baremetal/ipmi.py:159
msgid "IPMI power on failed"
msgstr ""
-#: nova/virt/baremetal/ipmi.py:176
+#: nova/virt/baremetal/ipmi.py:181
msgid "IPMI power off failed"
msgstr ""
-#: nova/virt/baremetal/ipmi.py:186
+#: nova/virt/baremetal/ipmi.py:191
msgid "IPMI set next bootdev failed"
msgstr ""
-#: nova/virt/baremetal/ipmi.py:191
+#: nova/virt/baremetal/ipmi.py:196
#, python-format
msgid "Activate node called, but node %s is already active"
msgstr ""
-#: nova/virt/baremetal/utils.py:39
+#: nova/virt/baremetal/pxe.py:85
+#, python-format
+msgid "Building PXE config for deployment %s."
+msgstr ""
+
+#: nova/virt/baremetal/pxe.py:224
+#, python-format
+msgid ""
+"Can not activate PXE bootloader. The following boot parameters were not "
+"passed to baremetal driver: %s"
+msgstr ""
+
+#: nova/virt/baremetal/pxe.py:249
+#, python-format
+msgid "Fetching kernel and ramdisk for instance %s"
+msgstr ""
+
+#: nova/virt/baremetal/pxe.py:281
+#, python-format
+msgid "Fetching image %(ami)s for instance %(name)s"
+msgstr ""
+
+#: nova/virt/baremetal/pxe.py:318
+#, python-format
+msgid "Injecting files into image for instance %(name)s"
+msgstr ""
+
+#: nova/virt/baremetal/utils.py:41
#, python-format
msgid "Failed to inject data into image %(image)s. Error: %(e)s"
msgstr ""
-#: nova/virt/baremetal/utils.py:47
+#: nova/virt/baremetal/utils.py:52
#, python-format
-msgid "Failed to unlink %s"
+msgid "Failed to unlink %(path)s, error: %(e)s"
msgstr ""
-#: nova/virt/baremetal/utils.py:59
+#: nova/virt/baremetal/utils.py:60
#, python-format
-msgid "Failed to create symlink from %(source)s to %(link)s"
+msgid "Failed to remove dir %(path)s, error: %(e)s"
msgstr ""
-#: nova/virt/baremetal/vif_driver.py:37
+#: nova/virt/baremetal/utils.py:75
+#, python-format
+msgid "Failed to create symlink from %(source)s to %(link)s, error: %(e)s"
+msgstr ""
+
+#: nova/virt/baremetal/vif_driver.py:36
#, python-format
msgid "plug: instance_uuid=%(uuid)s vif=%(vif)s"
msgstr ""
-#: nova/virt/baremetal/vif_driver.py:50
+#: nova/virt/baremetal/vif_driver.py:49
#, python-format
msgid "pif:%(id)s is plugged (vif_uuid=%(vif_uuid)s)"
msgstr ""
-#: nova/virt/baremetal/vif_driver.py:57
+#: nova/virt/baremetal/vif_driver.py:56
#, python-format
msgid ""
"Baremetal node: %(id)s has no available physical interface for virtual "
"interface %(vif_uuid)s"
msgstr ""
-#: nova/virt/baremetal/vif_driver.py:63
+#: nova/virt/baremetal/vif_driver.py:62
#, python-format
msgid "unplug: instance_uuid=%(uuid)s vif=%(vif)s"
msgstr ""
-#: nova/virt/baremetal/vif_driver.py:71
+#: nova/virt/baremetal/vif_driver.py:70
#, python-format
msgid "pif:%(id)s is unplugged (vif_uuid=%(vif_uuid)s)"
msgstr ""
-#: nova/virt/baremetal/vif_driver.py:75
+#: nova/virt/baremetal/vif_driver.py:74
#, python-format
msgid "no pif for vif_uuid=%s"
msgstr ""
-#: nova/virt/baremetal/volume_driver.py:131
+#: nova/virt/baremetal/virtual_power_driver.py:97
+msgid "virtual_power_ssh_host not defined. Can not Start"
+msgstr ""
+
+#: nova/virt/baremetal/virtual_power_driver.py:101
+msgid "virtual_power_host_user not defined. Can not Start"
+msgstr ""
+
+#: nova/virt/baremetal/virtual_power_driver.py:105
+msgid "virtual_power_host_pass not defined. Can not Start"
+msgstr ""
+
+#: nova/virt/baremetal/volume_driver.py:120
#, python-format
msgid "baremetal driver was unable to delete tid %s"
msgstr ""
-#: nova/virt/baremetal/volume_driver.py:195
+#: nova/virt/baremetal/volume_driver.py:184
#, python-format
msgid "Could not determine iscsi initiator name for instance %s"
msgstr ""
-#: nova/virt/baremetal/volume_driver.py:237
+#: nova/virt/baremetal/volume_driver.py:225
#, python-format
msgid "No fixed PXE IP is associated to %s"
msgstr ""
-#: nova/virt/baremetal/volume_driver.py:270
+#: nova/virt/baremetal/volume_driver.py:257
#, python-format
msgid "detach volume could not find tid for %s"
msgstr ""
-#: nova/virt/baremetal/db/sqlalchemy/api.py:164
+#: nova/virt/baremetal/db/sqlalchemy/api.py:163
msgid "instance_uuid must be supplied to bm_node_set_uuid_safe"
msgstr ""
-#: nova/virt/baremetal/db/sqlalchemy/api.py:176
+#: nova/virt/baremetal/db/sqlalchemy/api.py:175
#, python-format
msgid "Failed to associate instance %(uuid)s to baremetal node %(id)s."
msgstr ""
-#: nova/virt/baremetal/db/sqlalchemy/api.py:284
+#: nova/virt/baremetal/db/sqlalchemy/api.py:283
msgid "No more PXE IPs available"
msgstr ""
-#: nova/virt/baremetal/db/sqlalchemy/api.py:306
+#: nova/virt/baremetal/db/sqlalchemy/api.py:305
#: nova/virt/baremetal/db/sqlalchemy/api.py:347
#, python-format
msgid "Baremetal interface %s not found"
@@ -5834,33 +6232,33 @@ msgstr ""
msgid "Baremetal virtual interface %s not found"
msgstr ""
-#: nova/virt/disk/api.py:127
+#: nova/virt/disk/api.py:131
#, python-format
msgid "Checking if we can resize image %(image)s. size=%(size)s, CoW=%(use_cow)s"
msgstr ""
-#: nova/virt/disk/api.py:133
+#: nova/virt/disk/api.py:137
#, python-format
msgid "Cannot resize filesystem %s to a smaller size."
msgstr ""
-#: nova/virt/disk/api.py:144
+#: nova/virt/disk/api.py:148
#, python-format
msgid "Unable to mount image %(image)s with error %(error)s. Cannot resize."
msgstr ""
-#: nova/virt/disk/api.py:154
+#: nova/virt/disk/api.py:158
#, python-format
msgid ""
"Unable to determine label for image %(image)s with error %(errror)s. "
"Cannot resize."
msgstr ""
-#: nova/virt/disk/api.py:234
+#: nova/virt/disk/api.py:238
msgid "image already mounted"
msgstr ""
-#: nova/virt/disk/api.py:279
+#: nova/virt/disk/api.py:294
#, python-format
msgid ""
"Inject data image=%(image)s key=%(key)s net=%(net)s metadata=%(metadata)s"
@@ -5868,53 +6266,68 @@ msgid ""
"partition=%(partition)s use_cow=%(use_cow)s"
msgstr ""
-#: nova/virt/disk/api.py:303
+#: nova/virt/disk/api.py:311
+#, python-format
+msgid "Ignoring error injecting data into image (%(e)s)"
+msgstr ""
+
+#: nova/virt/disk/api.py:330
#, python-format
msgid ""
"Failed to mount container filesystem '%(image)s' on '%(target)s': "
"%(errors)s"
msgstr ""
-#: nova/virt/disk/api.py:320
+#: nova/virt/disk/api.py:347
#, python-format
-msgid "Failed to unmount container filesystem: %s"
+msgid "Failed to teardown ntainer filesystem: %s"
msgstr ""
-#: nova/virt/disk/api.py:342
+#: nova/virt/disk/api.py:360
+#, python-format
+msgid "Failed to umount container filesystem: %s"
+msgstr ""
+
+#: nova/virt/disk/api.py:385
+#, python-format
+msgid "Ignoring error injecting %(inject)s into image (%(e)s)"
+msgstr ""
+
+#: nova/virt/disk/api.py:397
#, python-format
msgid "Inject file fs=%(fs)s path=%(path)s append=%(append)s"
msgstr ""
-#: nova/virt/disk/api.py:351
+#: nova/virt/disk/api.py:406
#, python-format
msgid "Inject metadata fs=%(fs)s metadata=%(metadata)s"
msgstr ""
-#: nova/virt/disk/api.py:392
+#: nova/virt/disk/api.py:447
#, python-format
msgid "Inject key fs=%(fs)s key=%(key)s"
msgstr ""
-#: nova/virt/disk/api.py:420
+#: nova/virt/disk/api.py:476
#, python-format
msgid "Inject key fs=%(fs)s net=%(net)s"
msgstr ""
-#: nova/virt/disk/api.py:446
+#: nova/virt/disk/api.py:502
#, python-format
msgid "Inject admin password fs=%(fs)s admin_passwd=ha-ha-not-telling-you"
msgstr ""
-#: nova/virt/disk/api.py:491
+#: nova/virt/disk/api.py:547
msgid "Not implemented on Windows"
msgstr ""
-#: nova/virt/disk/api.py:520
+#: nova/virt/disk/api.py:576
#, python-format
msgid "User %(username)s not found in password file."
msgstr ""
-#: nova/virt/disk/api.py:536
+#: nova/virt/disk/api.py:592
#, python-format
msgid "User %(username)s not found in shadow file."
msgstr ""
@@ -6121,66 +6534,66 @@ msgstr ""
msgid "Setting up appliance for %(imgfile)s %(imgfmt)s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:106
+#: nova/virt/disk/vfs/guestfs.py:107
#, python-format
msgid "Error mounting %(imgfile)s with libguestfs (%(e)s)"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:113
+#: nova/virt/disk/vfs/guestfs.py:114
msgid "Tearing down appliance"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:117
+#: nova/virt/disk/vfs/guestfs.py:120
#, python-format
msgid "Failed to close augeas %s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:121
+#: nova/virt/disk/vfs/guestfs.py:128
#, python-format
msgid "Failed to shutdown appliance %s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:125
+#: nova/virt/disk/vfs/guestfs.py:136
#, python-format
msgid "Failed to close guest handle %s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:135 nova/virt/disk/vfs/localfs.py:102
+#: nova/virt/disk/vfs/guestfs.py:148 nova/virt/disk/vfs/localfs.py:102
#, python-format
msgid "Make directory path=%(path)s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:140 nova/virt/disk/vfs/localfs.py:107
+#: nova/virt/disk/vfs/guestfs.py:153 nova/virt/disk/vfs/localfs.py:107
#, python-format
msgid "Append file path=%(path)s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:145 nova/virt/disk/vfs/localfs.py:116
+#: nova/virt/disk/vfs/guestfs.py:158 nova/virt/disk/vfs/localfs.py:116
#, python-format
msgid "Replace file path=%(path)s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:150 nova/virt/disk/vfs/localfs.py:125
+#: nova/virt/disk/vfs/guestfs.py:163 nova/virt/disk/vfs/localfs.py:125
#, python-format
msgid "Read file path=%(path)s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:155 nova/virt/disk/vfs/localfs.py:131
+#: nova/virt/disk/vfs/guestfs.py:168 nova/virt/disk/vfs/localfs.py:131
#, python-format
msgid "Has file path=%(path)s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:164
+#: nova/virt/disk/vfs/guestfs.py:177
#, python-format
msgid "Set permissions path=%(path)s mode=%(mode)s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:169
+#: nova/virt/disk/vfs/guestfs.py:182
#, python-format
msgid "Set ownership path=%(path)s user=%(user)s group=%(group)s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:182
+#: nova/virt/disk/vfs/guestfs.py:195
#, python-format
msgid "chown uid=%(uid)d gid=%(gid)s"
msgstr ""
@@ -6215,843 +6628,730 @@ msgstr ""
msgid "Set permissions path=%(path)s user=%(user)s group=%(group)s"
msgstr ""
-#: nova/virt/hyperv/basevolumeutils.py:55
+#: nova/virt/hyperv/basevolumeutils.py:73
msgid "The ISCSI initiator name can't be found. Choosing the default one"
msgstr ""
-#: nova/virt/hyperv/basevolumeutils.py:79 nova/virt/libvirt/driver.py:1421
-#: nova/virt/xenapi/vm_utils.py:504
-#, python-format
-msgid "block_device_list %s"
-msgstr ""
-
-#: nova/virt/hyperv/driver.py:183 nova/virt/hyperv/driver.py:186
+#: nova/virt/hyperv/driver.py:146 nova/virt/hyperv/driver.py:149
msgid "plug_vifs called"
msgstr ""
-#: nova/virt/hyperv/driver.py:189
+#: nova/virt/hyperv/driver.py:152
msgid "ensure_filtering_rules_for_instance called"
msgstr ""
-#: nova/virt/hyperv/driver.py:194
+#: nova/virt/hyperv/driver.py:156
msgid "unfilter_instance called"
msgstr ""
-#: nova/virt/hyperv/driver.py:198
+#: nova/virt/hyperv/driver.py:159
msgid "confirm_migration called"
msgstr ""
-#: nova/virt/hyperv/driver.py:203
+#: nova/virt/hyperv/driver.py:163
msgid "finish_revert_migration called"
msgstr ""
-#: nova/virt/hyperv/driver.py:209
+#: nova/virt/hyperv/driver.py:168
msgid "finish_migration called"
msgstr ""
-#: nova/virt/hyperv/driver.py:212
+#: nova/virt/hyperv/driver.py:171
msgid "get_console_output called"
msgstr ""
-#: nova/virt/hyperv/hostops.py:78
-msgid ""
-"Cannot get the number of cpu, because this function is not implemented "
-"for this platform. This error can be safely ignored for now."
-msgstr ""
-
-#: nova/virt/hyperv/hostops.py:134 nova/virt/hyperv/volumeops.py:85
+#: nova/virt/hyperv/hostops.py:89
#, python-format
msgid "Windows version: %s "
msgstr ""
-#: nova/virt/hyperv/hostops.py:146
+#: nova/virt/hyperv/hostops.py:101
msgid "get_available_resource called"
msgstr ""
-#: nova/virt/hyperv/hostops.py:163 nova/virt/libvirt/driver.py:3105
+#: nova/virt/hyperv/hostops.py:131 nova/virt/libvirt/driver.py:3327
#: nova/virt/xenapi/host.py:149
msgid "Updating host stats"
msgstr ""
-#: nova/virt/hyperv/hostops.py:183
+#: nova/virt/hyperv/hostops.py:155
msgid "get_host_stats called"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:53
-msgid ""
-"Live migration is not supported \" \"by this version "
-"of Hyper-V"
-msgstr ""
-
-#: nova/virt/hyperv/livemigrationops.py:62
-msgid "Live migration is not enabled on this host"
-msgstr ""
-
-#: nova/virt/hyperv/livemigrationops.py:65
-msgid "Live migration networks are not configured on this host"
-msgstr ""
-
-#: nova/virt/hyperv/livemigrationops.py:69
+#: nova/virt/hyperv/livemigrationops.py:47
msgid "live_migration called"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:95
-#, python-format
-msgid "Getting live migration networks for remote host: %s"
-msgstr ""
-
-#: nova/virt/hyperv/livemigrationops.py:114
-#, python-format
-msgid "Starting live migration for instance: %s"
-msgstr ""
-
-#: nova/virt/hyperv/livemigrationops.py:127
-#, python-format
-msgid "Failed to live migrate VM %s"
-msgstr ""
-
-#: nova/virt/hyperv/livemigrationops.py:130
+#: nova/virt/hyperv/livemigrationops.py:54
#, python-format
msgid "Calling live migration recover_method for instance: %s"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:134
+#: nova/virt/hyperv/livemigrationops.py:58
#, python-format
msgid "Calling live migration post_method for instance: %s"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:140
+#: nova/virt/hyperv/livemigrationops.py:64
msgid "pre_live_migration called"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:158
+#: nova/virt/hyperv/livemigrationops.py:80
msgid "post_live_migration_at_destination called"
msgstr ""
-#: nova/virt/hyperv/livemigrationops.py:162
+#: nova/virt/hyperv/livemigrationops.py:84
#, python-format
msgid "compare_cpu called %s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:57
+#: nova/virt/hyperv/livemigrationutils.py:40
#, python-format
-msgid "Creating snapshot for instance %s"
+msgid "Live migration is not supported on target host \"%s\""
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:71
+#: nova/virt/hyperv/livemigrationutils.py:43
#, python-format
-msgid "Failed to create snapshot for VM %s"
+msgid "Target live migration host \"%s\" is unreachable"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:83
+#: nova/virt/hyperv/livemigrationutils.py:46
#, python-format
-msgid "Getting info for VHD %s"
-msgstr ""
-
-#: nova/virt/hyperv/snapshotops.py:106
-#, python-format
-msgid "Copying VHD %(src_vhd_path)s to %(dest_vhd_path)s"
-msgstr ""
-
-#: nova/virt/hyperv/snapshotops.py:116
-#, python-format
-msgid "Copying base disk %(src_vhd_path)s to %(dest_base_disk_path)s"
+msgid "Live migration failed: %s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:120
-#, python-format
-msgid ""
-"Reconnecting copied base VHD %(dest_base_disk_path)s and diff VHD "
-"%(dest_vhd_path)s"
-msgstr ""
-
-#: nova/virt/hyperv/snapshotops.py:134
-#, python-format
-msgid ""
-"Failed to reconnect base disk %(dest_base_disk_path)s and diff disk "
-"%(dest_vhd_path)s"
+#: nova/virt/hyperv/livemigrationutils.py:58
+msgid "Live migration is not enabled on this host"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:139
-#, python-format
-msgid "Merging base disk %(dest_base_disk_path)s and diff disk %(dest_vhd_path)s"
+#: nova/virt/hyperv/livemigrationutils.py:61
+msgid "Live migration networks are not configured on this host"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:151
+#: nova/virt/hyperv/livemigrationutils.py:67 nova/virt/hyperv/vmutils.py:94
#, python-format
-msgid ""
-"Failed to merge base disk %(dest_base_disk_path)s and diff disk "
-"%(dest_vhd_path)s"
+msgid "VM not found: %s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:164
+#: nova/virt/hyperv/livemigrationutils.py:69 nova/virt/hyperv/vmutils.py:103
#, python-format
-msgid ""
-"Updating Glance image %(image_id)s with content from merged disk "
-"%(image_vhd_path)s"
+msgid "Duplicate VM name found: %s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:169
+#: nova/virt/hyperv/livemigrationutils.py:94
#, python-format
-msgid "Snapshot image %(image_id)s updated for VM %(instance_name)s"
+msgid "Getting live migration networks for remote host: %s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:172
+#: nova/virt/hyperv/livemigrationutils.py:109
#, python-format
-msgid "Removing snapshot %s"
+msgid "Starting live migration for VM: %s"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:181
+#: nova/virt/hyperv/networkutils.py:46
#, python-format
-msgid "Failed to remove snapshot for VM %s"
+msgid "vswitch \"%s\" not found"
msgstr ""
-#: nova/virt/hyperv/snapshotops.py:186
+#: nova/virt/hyperv/networkutils.py:59
#, python-format
-msgid "Removing folder %s "
-msgstr ""
-
-#: nova/virt/hyperv/vmops.py:80
-msgid "get_info called for instance"
+msgid "Failed to create vswitch port %(port_name)s on switch %(vswitch_path)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:103
+#: nova/virt/hyperv/pathutils.py:42 nova/virt/hyperv/pathutils.py:62
#, python-format
-msgid "hyperv vm state: %s"
+msgid "Creating folder %s "
msgstr ""
-#: nova/virt/hyperv/vmops.py:109
+#: nova/virt/hyperv/pathutils.py:60
#, python-format
-msgid ""
-"Got Info for vm %(instance_name)s: state=%(state)d, mem=%(memusage)s, "
-"num_cpu=%(numprocs)s, uptime=%(uptime)s"
+msgid "Removing existing folder %s "
msgstr ""
-#: nova/virt/hyperv/vmops.py:144
+#: nova/virt/hyperv/snapshotops.py:56
#, python-format
-msgid "cache image failed: %s"
+msgid "Creating snapshot for instance %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:168
+#: nova/virt/hyperv/snapshotops.py:65
#, python-format
-msgid "Starting VM %s "
+msgid "Getting info for VHD %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:170
+#: nova/virt/hyperv/snapshotops.py:73
#, python-format
-msgid "Started VM %s "
-msgstr ""
-
-#: nova/virt/hyperv/vmops.py:172
-#, python-format
-msgid "spawn vm failed: %s"
+msgid "Copying VHD %(src_vhd_path)s to %(dest_vhd_path)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:178
+#: nova/virt/hyperv/snapshotops.py:83
#, python-format
-msgid "Invalid config_drive_format \"%s\""
-msgstr ""
-
-#: nova/virt/hyperv/vmops.py:181 nova/virt/libvirt/driver.py:1362
-msgid "Using config drive"
+msgid "Copying base disk %(src_vhd_path)s to %(dest_base_disk_path)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:192 nova/virt/libvirt/driver.py:1371
+#: nova/virt/hyperv/snapshotops.py:87
#, python-format
-msgid "Creating config drive at %(path)s"
+msgid ""
+"Reconnecting copied base VHD %(dest_base_disk_path)s and diff VHD "
+"%(dest_vhd_path)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:199 nova/virt/libvirt/driver.py:1377
+#: nova/virt/hyperv/snapshotops.py:93
#, python-format
-msgid "Creating config drive failed with error: %s"
+msgid "Merging base disk %(dest_base_disk_path)s and diff disk %(dest_vhd_path)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:238
+#: nova/virt/hyperv/snapshotops.py:98
#, python-format
-msgid "Failed to create VM %s"
+msgid ""
+"Updating Glance image %(image_id)s with content from merged disk "
+"%(image_vhd_path)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:241
+#: nova/virt/hyperv/snapshotops.py:104
#, python-format
-msgid "Created VM %s..."
+msgid "Snapshot image %(image_id)s updated for VM %(instance_name)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:258
+#: nova/virt/hyperv/snapshotops.py:108
#, python-format
-msgid "Set memory for vm %s..."
+msgid "Removing snapshot %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:271
+#: nova/virt/hyperv/snapshotops.py:112
#, python-format
-msgid "Set vcpus for vm %s..."
+msgid "Failed to remove snapshot for VM %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:275
+#: nova/virt/hyperv/snapshotops.py:115
#, python-format
-msgid "Creating a scsi controller for %(vm_name)s for volume attaching"
-msgstr ""
-
-#: nova/virt/hyperv/vmops.py:284
-msgid "Controller not found"
+msgid "Removing folder %s "
msgstr ""
-#: nova/virt/hyperv/vmops.py:292
+#: nova/virt/hyperv/vif.py:75
#, python-format
-msgid "Failed to add scsi controller to VM %s"
+msgid "Creating vswitch port for instance: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:309
+#: nova/virt/hyperv/vmops.py:85
#, python-format
-msgid "Creating disk for %(vm_name)s by attaching disk file %(path)s"
+msgid "VIF driver not found for network_api_class: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:337
-#, python-format
-msgid "Failed to add drive to VM %s"
+#: nova/virt/hyperv/vmops.py:94
+msgid "get_info called for instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:340
+#: nova/virt/hyperv/vmops.py:135
#, python-format
-msgid "New %(drive_type)s drive path is %(drive_path)s"
+msgid "cache image failed: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:365
+#: nova/virt/hyperv/vmops.py:157
#, python-format
-msgid "Failed to add %(drive_type)s image to VM %(vm_name)s"
+msgid "Creating nic for instance: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:367
-#, python-format
-msgid "Created drive type %(drive_type)s for %(vm_name)s"
+#: nova/virt/hyperv/vmops.py:172
+msgid "Spawn instance failed"
msgstr ""
-#: nova/virt/hyperv/vmops.py:372
+#: nova/virt/hyperv/vmops.py:176
#, python-format
-msgid "Creating nic for %s "
-msgstr ""
-
-#: nova/virt/hyperv/vmops.py:377
-msgid "Cannot find vSwitch"
-msgstr ""
-
-#: nova/virt/hyperv/vmops.py:397
-msgid "Failed creating a port on the external vswitch"
+msgid "Invalid config_drive_format \"%s\""
msgstr ""
-#: nova/virt/hyperv/vmops.py:398
+#: nova/virt/hyperv/vmops.py:179
#, python-format
-msgid "Failed creating port for %s"
+msgid "Using config drive for instance: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:401
+#: nova/virt/hyperv/vmops.py:192 nova/virt/libvirt/driver.py:1529
#, python-format
-msgid "Created switch port %(vm_name)s on switch %(ext_path)s"
+msgid "Creating config drive at %(path)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:413
+#: nova/virt/hyperv/vmops.py:199 nova/virt/libvirt/driver.py:1535
#, python-format
-msgid "Failed to add nic to VM %s"
+msgid "Creating config drive failed with error: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:415
+#: nova/virt/hyperv/vmops.py:227
#, python-format
-msgid "Created nic for %s "
+msgid "Got request to destroy instance: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:422 nova/virt/hyperv/vmops.py:425
+#: nova/virt/hyperv/vmops.py:236
#, python-format
-msgid "Attempting to bind NIC to %s "
-msgstr ""
-
-#: nova/virt/hyperv/vmops.py:430
-msgid "No vSwitch specified, attaching to default"
+msgid "Instance not found: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:453
+#: nova/virt/hyperv/vmops.py:239
#, python-format
-msgid "Got request to destroy vm %s"
+msgid "Failed to destroy instance: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:497
-#, python-format
-msgid "Del: disk %(vhdfile)s vm %(name)s"
+#: nova/virt/hyperv/vmops.py:244
+msgid "reboot instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:503
+#: nova/virt/hyperv/vmops.py:250
msgid "Pause instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:508
+#: nova/virt/hyperv/vmops.py:256
msgid "Unpause instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:514
+#: nova/virt/hyperv/vmops.py:263
msgid "Suspend instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:519
+#: nova/virt/hyperv/vmops.py:269
msgid "Resume instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:524
+#: nova/virt/hyperv/vmops.py:275
msgid "Power off instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:529
+#: nova/virt/hyperv/vmops.py:281
msgid "Power on instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:549
-#, python-format
-msgid "Successfully changed vm state of %(vm_name)s to %(req_state)s"
-msgstr ""
-
-#: nova/virt/hyperv/vmops.py:578
-#, python-format
-msgid "use_cow_image:%s"
-msgstr ""
-
-#: nova/virt/hyperv/vmops.py:598
-#, python-format
-msgid "Failed to create Difference Disk from %(base)s to %(target)s"
-msgstr ""
-
-#: nova/virt/hyperv/vmutils.py:54
+#: nova/virt/hyperv/vmops.py:288
#, python-format
-msgid "duplicate name found: %s"
+msgid "Successfully changed state of VM %(vm_name)s to: %(req_state)s"
msgstr ""
-#: nova/virt/hyperv/vmutils.py:72
-#, python-format
-msgid ""
-"WMI job failed with status %(job_state)d. Error details: %(err_sum_desc)s"
-" - %(err_desc)s - Error code: %(err_code)d"
-msgstr ""
-
-#: nova/virt/hyperv/vmutils.py:78
-#, python-format
-msgid "WMI job failed with status %(job_state)d. Error details: %(error)s"
-msgstr ""
-
-#: nova/virt/hyperv/vmutils.py:81
-#, python-format
-msgid "WMI job failed with status %(job_state)d. No error description available"
-msgstr ""
-
-#: nova/virt/hyperv/vmutils.py:86
-#, python-format
-msgid "WMI job succeeded: %(desc)s, Elapsed=%(elap)s"
-msgstr ""
-
-#: nova/virt/hyperv/vmutils.py:93 nova/virt/hyperv/vmutils.py:113
-#, python-format
-msgid "Creating folder %s "
-msgstr ""
-
-#: nova/virt/hyperv/vmutils.py:111
+#: nova/virt/hyperv/vmops.py:292
#, python-format
-msgid "Removing existing folder %s "
+msgid "Failed to change vm state of %(vm_name)s to %(req_state)s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:90 nova/virt/xenapi/vm_utils.py:511
+#: nova/virt/hyperv/vmops.py:321
#, python-format
-msgid "block device info: %s"
+msgid "Use CoW image: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:118
+#: nova/virt/hyperv/vmops.py:326
#, python-format
-msgid "Attach boot from volume failed: %s"
+msgid "Creating differencing VHD. Parent: %(parent_path)s, Target: %(target)s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:121
+#: nova/virt/hyperv/vmops.py:333
#, python-format
-msgid "Unable to attach boot volume to instance %s"
+msgid "Failed to create a differencing disk from %(parent_path)s to %(target)s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:130 nova/virt/xenapi/volumeops.py:115
+#: nova/virt/hyperv/vmutils.py:74
#, python-format
-msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s"
+msgid "Cannot get VM summary data for: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:155
+#: nova/virt/hyperv/vmutils.py:145
#, python-format
-msgid "Attach volume failed: %s"
+msgid "Creating VM %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:158 nova/virt/xenapi/volumeops.py:182
+#: nova/virt/hyperv/vmutils.py:154
#, python-format
-msgid "Unable to attach volume to instance %s"
+msgid "Setting memory for vm %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:177
+#: nova/virt/hyperv/vmutils.py:157
#, python-format
-msgid "Failed to add volume to VM %s"
+msgid "Set vCPUs for vm %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:191
-#, python-format
-msgid "Detach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s"
+#: nova/virt/hyperv/vmutils.py:255
+msgid "Controller not found"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:208
+#: nova/virt/hyperv/vmutils.py:318
#, python-format
-msgid "Mounted disk to detach is: %s"
+msgid "Successfully changed vm state of %(vm_name)s to %(req_state)s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:209
+#: nova/virt/hyperv/vmutils.py:360
#, python-format
-msgid "host_resource disk detached is: %s"
+msgid "Deleting disk file: %(disk)s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:212
+#: nova/virt/hyperv/vmutils.py:374
#, python-format
-msgid "Physical disk detached is: %s"
+msgid "Operation failed with return value: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:219
+#: nova/virt/hyperv/vmutils.py:392
#, python-format
-msgid "Failed to remove volume from VM %s"
-msgstr ""
-
-#: nova/virt/hyperv/volumeops.py:228 nova/virt/libvirt/driver.py:624
-msgid "Could not determine iscsi initiator name"
+msgid ""
+"WMI job failed with status %(job_state)d. Error details: %(err_sum_desc)s"
+" - %(err_desc)s - Error code: %(err_code)d"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:245
+#: nova/virt/hyperv/vmutils.py:400
#, python-format
-msgid "device.InitiatorName: %s"
+msgid "WMI job failed with status %(job_state)d. Error details: %(error)s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:246
+#: nova/virt/hyperv/vmutils.py:404
#, python-format
-msgid "device.TargetName: %s"
+msgid "WMI job failed with status %(job_state)d. No error description available"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:247
+#: nova/virt/hyperv/vmutils.py:410
#, python-format
-msgid "device.ScsiPortNumber: %s"
+msgid "WMI job succeeded: %(desc)s, Elapsed=%(elap)s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:248
+#: nova/virt/hyperv/volumeops.py:74 nova/virt/xenapi/vm_utils.py:496
#, python-format
-msgid "device.ScsiPathId: %s"
+msgid "block device info: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:249
+#: nova/virt/hyperv/volumeops.py:96
#, python-format
-msgid "device.ScsiTargetId): %s"
+msgid "Attach boot from volume failed: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:250
+#: nova/virt/hyperv/volumeops.py:99
#, python-format
-msgid "device.ScsiLun: %s"
+msgid "Unable to attach boot volume to instance %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:251
+#: nova/virt/hyperv/volumeops.py:107
#, python-format
-msgid "device.DeviceInterfaceGuid :%s"
+msgid "Attach_volume: %(connection_info)s to %(instance_name)s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:253
+#: nova/virt/hyperv/volumeops.py:128
#, python-format
-msgid "device.DeviceInterfaceName: %s"
+msgid "Attach volume failed: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:255
+#: nova/virt/hyperv/volumeops.py:130 nova/virt/xenapi/volumeops.py:113
#, python-format
-msgid "device.LegacyName: %s"
+msgid "Unable to attach volume to instance %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:256
+#: nova/virt/hyperv/volumeops.py:139
#, python-format
-msgid "device.DeviceType: %s"
+msgid "Detach_volume: %(connection_info)s from %(instance_name)s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:257
+#: nova/virt/hyperv/volumeops.py:148
#, python-format
-msgid "device.DeviceNumber %s"
+msgid "Detaching physical disk from instance: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:258
-#, python-format
-msgid "device.PartitionNumber :%s"
+#: nova/virt/hyperv/volumeops.py:159 nova/virt/libvirt/driver.py:644
+msgid "Could not determine iscsi initiator name"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:264 nova/virt/hyperv/volumeops.py:283
+#: nova/virt/hyperv/volumeops.py:170 nova/virt/hyperv/volumeops.py:184
#, python-format
msgid "Unable to find a mounted disk for target_iqn: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:266
-#, python-format
-msgid "Device number : %s"
-msgstr ""
-
-#: nova/virt/hyperv/volumeops.py:267
+#: nova/virt/hyperv/volumeops.py:173
#, python-format
-msgid "Target lun : %s"
+msgid "Device number: %(device_number)s, target lun: %(target_lun)s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:273 nova/virt/hyperv/volumeops.py:280
+#: nova/virt/hyperv/volumeutils.py:52
#, python-format
-msgid "Mounted disk is: %s"
-msgstr ""
-
-#: nova/virt/hyperv/volumeops.py:297
-#, python-format
-msgid "Drive number to disconnect is: %s"
+msgid "An error has occurred when calling the iscsi initiator: %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:304
+#: nova/virt/libvirt/blockinfo.py:121
#, python-format
-msgid "DeviceNumber : %s"
+msgid "Unable to determine disk prefix for %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:309
+#: nova/virt/libvirt/blockinfo.py:165
#, python-format
-msgid "Disk path to parse: %s"
+msgid "No free disk device names for prefix '%s'"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:311
+#: nova/virt/libvirt/blockinfo.py:179
#, python-format
-msgid "start_device_id: %s"
+msgid "Unsupported virt type %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:313
+#: nova/virt/libvirt/blockinfo.py:204
#, python-format
-msgid "end_device_id: %s"
+msgid "Disk bus %(disk_bus)s is not valid for %(virt)s"
msgstr ""
-#: nova/virt/hyperv/volumeutils.py:51
+#: nova/virt/libvirt/blockinfo.py:254
#, python-format
-msgid "An error has occurred when calling the iscsi initiator: %s"
+msgid "Unable to determine disk bus for '%s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:341
+#: nova/virt/libvirt/driver.py:351
#, python-format
msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater."
msgstr ""
-#: nova/virt/libvirt/driver.py:347
+#: nova/virt/libvirt/driver.py:357
#, python-format
msgid "Connecting to libvirt: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:368
+#: nova/virt/libvirt/driver.py:378
msgid "Connection to libvirt broke"
msgstr ""
-#: nova/virt/libvirt/driver.py:390 nova/virt/libvirt/driver.py:393
+#: nova/virt/libvirt/driver.py:400 nova/virt/libvirt/driver.py:403
#, python-format
msgid "Can not handle authentication request for %d credentials"
msgstr ""
-#: nova/virt/libvirt/driver.py:411
+#: nova/virt/libvirt/driver.py:421
#, python-format
msgid "Connection to libvirt failed: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:492
+#: nova/virt/libvirt/driver.py:509
#, python-format
msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:506
+#: nova/virt/libvirt/driver.py:523
msgid "During wait destroy, instance disappeared."
msgstr ""
-#: nova/virt/libvirt/driver.py:511
+#: nova/virt/libvirt/driver.py:528
msgid "Instance destroyed successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:533
+#: nova/virt/libvirt/driver.py:551
msgid "Error from libvirt during undefineFlags. Retrying with undefine"
msgstr ""
-#: nova/virt/libvirt/driver.py:548
+#: nova/virt/libvirt/driver.py:566
#, python-format
msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:561
+#: nova/virt/libvirt/driver.py:580
#, python-format
msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:578
+#: nova/virt/libvirt/driver.py:598
#, python-format
msgid "Deleting instance files %(target)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:587
+#: nova/virt/libvirt/driver.py:607
#, python-format
msgid "Failed to cleanup directory %(target)s: %(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:730
+#: nova/virt/libvirt/driver.py:759
msgid "During detach_volume, instance disappeared."
msgstr ""
-#: nova/virt/libvirt/driver.py:837
+#: nova/virt/libvirt/driver.py:852
+msgid "Beginning live snapshot process"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:855
+msgid "Beginning cold snapshot process"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:884
+msgid "Snapshot extracted, beginning image upload"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:896
+msgid "Snapshot image upload complete"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:969
msgid "Instance soft rebooted successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:841
+#: nova/virt/libvirt/driver.py:973
msgid "Failed to soft reboot instance."
msgstr ""
-#: nova/virt/libvirt/driver.py:873
+#: nova/virt/libvirt/driver.py:1008
msgid "Instance shutdown successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:909
+#: nova/virt/libvirt/driver.py:1016
+msgid "Instance may have been rebooted during soft reboot, so return now."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:1058
msgid "Instance rebooted successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:1039
+#: nova/virt/libvirt/driver.py:1201
msgid "Instance is running"
msgstr ""
-#: nova/virt/libvirt/driver.py:1046 nova/virt/powervm/operator.py:255
+#: nova/virt/libvirt/driver.py:1208 nova/virt/powervm/operator.py:272
msgid "Instance spawned successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:1062
+#: nova/virt/libvirt/driver.py:1224
#, python-format
msgid "data: %(data)r, fpath: %(fpath)r"
msgstr ""
-#: nova/virt/libvirt/driver.py:1100 nova/virt/libvirt/driver.py:1126
+#: nova/virt/libvirt/driver.py:1261 nova/virt/libvirt/driver.py:1287
#, python-format
msgid "Truncated console log returned, %d bytes ignored"
msgstr ""
-#: nova/virt/libvirt/driver.py:1115
+#: nova/virt/libvirt/driver.py:1276
msgid "Guest does not have a console available"
msgstr ""
-#: nova/virt/libvirt/driver.py:1164
+#: nova/virt/libvirt/driver.py:1345
#, python-format
msgid "Path '%(path)s' supports direct I/O"
msgstr ""
-#: nova/virt/libvirt/driver.py:1168
+#: nova/virt/libvirt/driver.py:1349
#, python-format
msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:1172 nova/virt/libvirt/driver.py:1176
+#: nova/virt/libvirt/driver.py:1353 nova/virt/libvirt/driver.py:1357
#, python-format
msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:1241
+#: nova/virt/libvirt/driver.py:1423
msgid "Creating image"
msgstr ""
-#: nova/virt/libvirt/driver.py:1389
+#: nova/virt/libvirt/driver.py:1520
+msgid "Using config drive"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:1568
#, python-format
-msgid "Injecting %(injection)s into image %(img_id)s"
+msgid "Injecting %(inj)s into image %(img_id)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1399
+#: nova/virt/libvirt/driver.py:1577
#, python-format
-msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)"
+msgid "Error injecting data into image %(img_id)s (%(e)s)"
msgstr ""
-#: nova/virt/libvirt/driver.py:1473
+#: nova/virt/libvirt/driver.py:1635
#, python-format
msgid ""
"Config requested an explicit CPU model, but the current libvirt "
"hypervisor '%s' does not support selecting CPU models"
msgstr ""
-#: nova/virt/libvirt/driver.py:1479
+#: nova/virt/libvirt/driver.py:1641
msgid "Config requested a custom CPU model, but no model name was provided"
msgstr ""
-#: nova/virt/libvirt/driver.py:1483
+#: nova/virt/libvirt/driver.py:1645
msgid "A CPU model name should not be set when a host CPU model is requested"
msgstr ""
-#: nova/virt/libvirt/driver.py:1487
+#: nova/virt/libvirt/driver.py:1649
#, python-format
msgid "CPU mode '%(mode)s' model '%(model)s' was chosen"
msgstr ""
-#: nova/virt/libvirt/driver.py:1503
+#: nova/virt/libvirt/driver.py:1665
msgid ""
"Passthrough of the host CPU was requested but this libvirt version does "
"not support this feature"
msgstr ""
-#: nova/virt/libvirt/driver.py:1819
-msgid "Starting toXML method"
+#: nova/virt/libvirt/driver.py:1966
+#, python-format
+msgid ""
+"Start to_xml instance=%(instance)s network_info=%(network_info)s "
+"disk_info=%(disk_info)s image_meta=%(image_meta)s "
+"rescue=%(rescue)sblock_device_info=%(block_device_info)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1823
-msgid "Finished toXML method"
+#: nova/virt/libvirt/driver.py:1982
+#, python-format
+msgid "End to_xml instance=%(instance)s xml=%(xml)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1840
+#: nova/virt/libvirt/driver.py:1999
#, python-format
msgid ""
"Error from libvirt while looking up %(instance_name)s: [Error Code "
"%(error_code)s] %(ex)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1994
+#: nova/virt/libvirt/driver.py:2166
msgid ""
"Cannot get the number of cpu, because this function is not implemented "
"for this platform. "
msgstr ""
-#: nova/virt/libvirt/driver.py:2111
+#: nova/virt/libvirt/driver.py:2220
+#, python-format
+msgid "List of domains returned by libVirt: %s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:2222
+#, python-format
+msgid "libVirt can't find a domain with id: %s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:2304
msgid "libvirt version is too old (does not support getVersion)"
msgstr ""
-#: nova/virt/libvirt/driver.py:2194
+#: nova/virt/libvirt/driver.py:2387
#, python-format
msgid "Trying to get stats for the volume %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2218
+#: nova/virt/libvirt/driver.py:2411
#, python-format
msgid ""
"Getting block stats failed, device might have been detached. "
"Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2222
+#: nova/virt/libvirt/driver.py:2415
#, python-format
msgid ""
"Could not find domain in libvirt for instance %s. Cannot get block stats "
"for device"
msgstr ""
-#: nova/virt/libvirt/driver.py:2338
+#: nova/virt/libvirt/driver.py:2531
msgid "Block migration can not be used with shared storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:2346
+#: nova/virt/libvirt/driver.py:2539
msgid "Live migration can not be used without shared storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:2383
+#: nova/virt/libvirt/driver.py:2576
#, python-format
msgid ""
"Unable to migrate %(instance_uuid)s: Disk of instance is too "
"large(available on destination host:%(available)s < need:%(necessary)s)"
msgstr ""
-#: nova/virt/libvirt/driver.py:2408
+#: nova/virt/libvirt/driver.py:2601
#, python-format
msgid ""
"Instance launched has CPU info:\n"
"%s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2420
+#: nova/virt/libvirt/driver.py:2613
#, python-format
msgid ""
"CPU doesn't have compatibility.\n"
@@ -7061,87 +7361,99 @@ msgid ""
"Refer to %(u)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2437
+#: nova/virt/libvirt/driver.py:2630
#, python-format
msgid ""
"Creating tmpfile %s to notify to other compute nodes that they should "
"mount the same storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:2485
+#: nova/virt/libvirt/driver.py:2678
#, python-format
msgid "The firewall filter for %s does not exist"
msgstr ""
-#: nova/virt/libvirt/driver.py:2557
+#: nova/virt/libvirt/driver.py:2750
#, python-format
msgid "Live Migration failure: %(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2647
+#: nova/virt/libvirt/driver.py:2843
#, python-format
msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s."
msgstr ""
-#: nova/virt/libvirt/driver.py:2762
+#: nova/virt/libvirt/driver.py:2953
+#, python-format
+msgid ""
+"Error from libvirt while getting description of %(instance_name)s: [Error"
+" Code %(error_code)s] %(ex)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:2970
#, python-format
msgid "skipping %(path)s since it looks like volume"
msgstr ""
-#: nova/virt/libvirt/driver.py:2811
+#: nova/virt/libvirt/driver.py:3019
#, python-format
msgid "Getting disk size of %(i_name)s: %(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2860
+#: nova/virt/libvirt/driver.py:3067
msgid "Starting migrate_disk_and_power_off"
msgstr ""
-#: nova/virt/libvirt/driver.py:2919
+#: nova/virt/libvirt/driver.py:3126
msgid "Instance running successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:2926
+#: nova/virt/libvirt/driver.py:3132
msgid "Starting finish_migration"
msgstr ""
-#: nova/virt/libvirt/driver.py:2977
+#: nova/virt/libvirt/driver.py:3188
msgid "Starting finish_revert_migration"
msgstr ""
-#: nova/virt/libvirt/firewall.py:35
+#: nova/virt/libvirt/driver.py:3301
+#, python-format
+msgid "Checking instance files accessability%(instance_path)s"
+msgstr ""
+
+#: nova/virt/libvirt/firewall.py:51
msgid ""
"Libvirt module could not be loaded. NWFilterFirewall will not work "
"correctly."
msgstr ""
-#: nova/virt/libvirt/firewall.py:104
+#: nova/virt/libvirt/firewall.py:107
msgid "Called setup_basic_filtering in nwfilter"
msgstr ""
-#: nova/virt/libvirt/firewall.py:112
+#: nova/virt/libvirt/firewall.py:115
msgid "Ensuring static filters"
msgstr ""
-#: nova/virt/libvirt/firewall.py:193
+#: nova/virt/libvirt/firewall.py:209
#, python-format
msgid "The nwfilter(%(instance_filter_name)s) is not found."
msgstr ""
-#: nova/virt/libvirt/firewall.py:216
+#: nova/virt/libvirt/firewall.py:232
#, python-format
msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found."
msgstr ""
-#: nova/virt/libvirt/firewall.py:232
+#: nova/virt/libvirt/firewall.py:248
msgid "iptables firewall: Setup Basic Filtering"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:207
+#: nova/virt/libvirt/imagebackend.py:232
msgid "You should specify libvirt_images_volume_group flag to use LVM images."
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:270
+#: nova/virt/libvirt/imagebackend.py:292
#, python-format
msgid "Unknown image_type=%s"
msgstr ""
@@ -7287,19 +7599,14 @@ msgstr ""
msgid "Verification complete"
msgstr ""
-#: nova/virt/libvirt/snapshots.py:83 nova/virt/libvirt/snapshots.py:86
-#: nova/virt/libvirt/snapshots.py:89
-msgid "LVM snapshots not implemented"
-msgstr ""
-
-#: nova/virt/libvirt/utils.py:113
+#: nova/virt/libvirt/utils.py:124
#, python-format
msgid ""
"Insufficient Space on Volume Group %(vg)s. Only %(free_space)db "
"available, but %(size)db required by volume %(lv)s."
msgstr ""
-#: nova/virt/libvirt/utils.py:122
+#: nova/virt/libvirt/utils.py:133
#, python-format
msgid ""
"Volume group %(vg)s will not be able to hold sparse volume %(lv)s. "
@@ -7307,51 +7614,83 @@ msgid ""
"%(free_space)db."
msgstr ""
-#: nova/virt/libvirt/utils.py:169
+#: nova/virt/libvirt/utils.py:183
+#, python-format
+msgid "vg %s must be LVM volume group"
+msgstr ""
+
+#: nova/virt/libvirt/utils.py:210
#, python-format
msgid "Path %s must be LVM logical volume"
msgstr ""
-#: nova/virt/libvirt/utils.py:435
+#: nova/virt/libvirt/utils.py:483
msgid "Can't retrieve root device path from instance libvirt configuration"
msgstr ""
-#: nova/virt/libvirt/vif.py:110
+#: nova/virt/libvirt/vif.py:231 nova/virt/libvirt/vif.py:342
+#: nova/virt/libvirt/vif.py:429
+#, python-format
+msgid ""
+"vif_type=%(vif_type)s instance=%(instance)s network=%(network)s "
+"mapping=%(mapping)s"
+msgstr ""
+
+#: nova/virt/libvirt/vif.py:237 nova/virt/libvirt/vif.py:348
+#: nova/virt/libvirt/vif.py:435
+msgid "vif_type parameter must be present for this vif_driver implementation"
+msgstr ""
+
+#: nova/virt/libvirt/vif.py:250 nova/virt/libvirt/vif.py:361
+#: nova/virt/libvirt/vif.py:448
+#, python-format
+msgid "Unexpected vif_type=%s"
+msgstr ""
+
+#: nova/virt/libvirt/vif.py:262
#, python-format
msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s"
msgstr ""
-#: nova/virt/libvirt/vif.py:120
+#: nova/virt/libvirt/vif.py:272
#, python-format
msgid "Ensuring bridge %s"
msgstr ""
-#: nova/virt/libvirt/vif.py:197 nova/virt/libvirt/vif.py:268
+#: nova/virt/libvirt/vif.py:378 nova/virt/libvirt/vif.py:407
msgid "Failed while unplugging vif"
msgstr ""
-#: nova/virt/libvirt/volume.py:188
+#: nova/virt/libvirt/volume.py:221
#, python-format
msgid "iSCSI device not found at %s"
msgstr ""
-#: nova/virt/libvirt/volume.py:191
+#: nova/virt/libvirt/volume.py:224
#, python-format
msgid ""
-"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. "
-"Try number: %(tries)s"
+"ISCSI volume not yet found at: %(disk_dev)s. Will rescan & retry. Try "
+"number: %(tries)s"
msgstr ""
-#: nova/virt/libvirt/volume.py:203
+#: nova/virt/libvirt/volume.py:236
#, python-format
-msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)"
+msgid "Found iSCSI node %(disk_dev)s (after %(tries)s rescans)"
msgstr ""
-#: nova/virt/libvirt/volume_nfs.py:81
+#: nova/virt/libvirt/volume.py:309
#, python-format
msgid "%s is already mounted"
msgstr ""
+#: nova/virt/libvirt/volume_nfs.py:36
+msgid ""
+"The nova.virt.libvirt.volume_nfs.NfsVolumeDriver class is deprecated and "
+"will be removed in the Hxxxx release. Please update nova.conf so that the"
+" 'libvirt_volume_drivers' parameter refers to "
+"nova.virt.libvirt.volume.LibvirtNFSVolumeDriver."
+msgstr ""
+
#: nova/virt/powervm/blockdev.py:73
#, python-format
msgid "Removing the logical volume '%s'"
@@ -7390,24 +7729,33 @@ msgstr ""
msgid "Error while attempting cleanup of failed deploy to logical volume."
msgstr ""
-#: nova/virt/powervm/blockdev.py:174
+#: nova/virt/powervm/blockdev.py:158
+msgid "Snapshot added to glance."
+msgstr ""
+
+#: nova/virt/powervm/blockdev.py:164
+#, python-format
+msgid "Failed to clean up snapshot file %(snapshot_file_path)s"
+msgstr ""
+
+#: nova/virt/powervm/blockdev.py:210
msgid "Could not create logical volume. No space left on any volume group."
msgstr ""
-#: nova/virt/powervm/blockdev.py:245
+#: nova/virt/powervm/blockdev.py:298 nova/virt/powervm/blockdev.py:370
msgid "Unable to get checksum"
msgstr ""
-#: nova/virt/powervm/blockdev.py:248
+#: nova/virt/powervm/blockdev.py:301 nova/virt/powervm/blockdev.py:393
msgid "Image checksums do not match"
msgstr ""
-#: nova/virt/powervm/blockdev.py:269
+#: nova/virt/powervm/blockdev.py:322
#, python-format
msgid "Image found on host at '%s'"
msgstr ""
-#: nova/virt/powervm/blockdev.py:277
+#: nova/virt/powervm/blockdev.py:330
msgid "Uncompressed image file not found"
msgstr ""
@@ -7415,153 +7763,189 @@ msgstr ""
msgid "Connection error connecting PowerVM manager"
msgstr ""
-#: nova/virt/powervm/common.py:111
+#: nova/virt/powervm/common.py:66
+#, python-format
+msgid "Running cmd (SSH-as-root): %s"
+msgstr ""
+
+#: nova/virt/powervm/common.py:112
msgid "File transfer to PowerVM manager failed"
msgstr ""
+#: nova/virt/powervm/common.py:131
+#, python-format
+msgid "ftp GET %(remote_path)s to: %(local_path)s"
+msgstr ""
+
+#: nova/virt/powervm/common.py:137
+msgid "File transfer from PowerVM manager failed"
+msgstr ""
+
+#: nova/virt/powervm/driver.py:160
+#, python-format
+msgid "%(inst_name)s captured in %(snapshot_time)s seconds"
+msgstr ""
+
#: nova/virt/powervm/exception.py:21
msgid "Connection to PowerVM manager failed"
msgstr ""
#: nova/virt/powervm/exception.py:25
-msgid "File '%(file_path)' transfer to PowerVM manager failed"
+#, python-format
+msgid "File '%(file_path)s' transfer to PowerVM manager failed"
msgstr ""
#: nova/virt/powervm/exception.py:29
#, python-format
-msgid "LPAR instance '%(instance_name)s' could not be found"
+msgid "FTP %(ftp_cmd)s from %(source_path)s to %(dest_path)s failed"
msgstr ""
#: nova/virt/powervm/exception.py:33
#, python-format
-msgid "LPAR instance '%(instance_name)s' creation failed"
+msgid "LPAR instance '%(instance_name)s' could not be found"
msgstr ""
#: nova/virt/powervm/exception.py:37
+#, python-format
+msgid "LPAR instance '%(instance_name)s' creation failed"
+msgstr ""
+
+#: nova/virt/powervm/exception.py:41
msgid "No space left on any volume group"
msgstr ""
-#: nova/virt/powervm/exception.py:45
+#: nova/virt/powervm/exception.py:49
#, python-format
msgid "Operation '%(operation)s' on LPAR '%(instance_name)s' timed out"
msgstr ""
-#: nova/virt/powervm/exception.py:50
+#: nova/virt/powervm/exception.py:54
msgid "Image creation failed on PowerVM"
msgstr ""
-#: nova/virt/powervm/exception.py:54
+#: nova/virt/powervm/exception.py:58
#, python-format
msgid ""
"Insufficient free memory on PowerVM system to spawn instance "
"'%(instance_name)s'"
msgstr ""
-#: nova/virt/powervm/exception.py:59
+#: nova/virt/powervm/exception.py:63
#, python-format
msgid ""
"Insufficient available CPUs on PowerVM system to spawn instance "
"'%(instance_name)s'"
msgstr ""
-#: nova/virt/powervm/exception.py:64
+#: nova/virt/powervm/exception.py:68
#, python-format
msgid "PowerVM LPAR instance '%(instance_name)s' cleanup failed"
msgstr ""
-#: nova/virt/powervm/operator.py:100
+#: nova/virt/powervm/operator.py:101
#, python-format
msgid "LPAR instance '%s' not found"
msgstr ""
-#: nova/virt/powervm/operator.py:184
+#: nova/virt/powervm/operator.py:185
msgid "Not enough free memory in the host"
msgstr ""
-#: nova/virt/powervm/operator.py:194
+#: nova/virt/powervm/operator.py:195
msgid "Insufficient available CPU on PowerVM"
msgstr ""
-#: nova/virt/powervm/operator.py:218
+#: nova/virt/powervm/operator.py:233
#, python-format
msgid "Creating LPAR instance '%s'"
msgstr ""
-#: nova/virt/powervm/operator.py:221
+#: nova/virt/powervm/operator.py:238
#, python-format
msgid "LPAR instance '%s' creation failed"
msgstr ""
-#: nova/virt/powervm/operator.py:238
+#: nova/virt/powervm/operator.py:255
#, python-format
msgid "PowerVM image creation failed: %s"
msgstr ""
-#: nova/virt/powervm/operator.py:246
+#: nova/virt/powervm/operator.py:263
#, python-format
msgid "Activating the LPAR instance '%s'"
msgstr ""
-#: nova/virt/powervm/operator.py:260
+#: nova/virt/powervm/operator.py:277
#, python-format
msgid "Instance '%s' failed to boot"
msgstr ""
-#: nova/virt/powervm/operator.py:272
+#: nova/virt/powervm/operator.py:289
msgid "Error while attempting to clean up failed instance launch."
msgstr ""
-#: nova/virt/powervm/operator.py:276
+#: nova/virt/powervm/operator.py:293
#, python-format
msgid "Instance spawned in %s seconds"
msgstr ""
-#: nova/virt/powervm/operator.py:287
+#: nova/virt/powervm/operator.py:304
#, python-format
msgid "During destroy, LPAR instance '%s' was not found on PowerVM system."
msgstr ""
-#: nova/virt/powervm/operator.py:296
+#: nova/virt/powervm/operator.py:320
+#, python-format
+msgid "Stopping instance %s for snapshot."
+msgstr ""
+
+#: nova/virt/powervm/operator.py:343
#, python-format
msgid "Shutting down the instance '%s'"
msgstr ""
-#: nova/virt/powervm/operator.py:305
+#: nova/virt/powervm/operator.py:352
#, python-format
msgid "Deleting the LPAR instance '%s'"
msgstr ""
-#: nova/virt/powervm/operator.py:308
+#: nova/virt/powervm/operator.py:355
msgid "PowerVM instance cleanup failed"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:107
+#: nova/virt/vmwareapi/driver.py:137
msgid ""
"Must specify vmwareapi_host_ip,vmwareapi_host_username and "
-"vmwareapi_host_password to usecompute_driver=vmwareapi.VMwareESXDriver"
+"vmwareapi_host_password to usecompute_driver=vmwareapi.VMwareESXDriver or"
+" vmwareapi.VMwareVCDriver"
+msgstr ""
+
+#: nova/virt/vmwareapi/driver.py:355
+#, python-format
+msgid "VMware Cluster %s is not found"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:258
+#: nova/virt/vmwareapi/driver.py:452
#, python-format
msgid "In vmwareapi:_create_session, got this exception: %s"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:341
+#: nova/virt/vmwareapi/driver.py:535
#, python-format
msgid "In vmwareapi:_call_method, got this exception: %s"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:376
+#: nova/virt/vmwareapi/driver.py:570
#, python-format
msgid "Task [%(task_name)s] %(task_ref)s status: success"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:381
+#: nova/virt/vmwareapi/driver.py:575
#, python-format
msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:385
+#: nova/virt/vmwareapi/driver.py:579
#, python-format
msgid "In vmwareapi:_poll_task, Got this error %s"
msgstr ""
@@ -7571,64 +7955,74 @@ msgstr ""
msgid "Error(s) %s occurred in the call to RetrieveProperties"
msgstr ""
-#: nova/virt/vmwareapi/fake.py:44 nova/virt/xenapi/fake.py:77
+#: nova/virt/vmwareapi/fake.py:45 nova/virt/xenapi/fake.py:77
#, python-format
msgid "%(text)s: _db_content => %(content)s"
msgstr ""
-#: nova/virt/vmwareapi/fake.py:131
+#: nova/virt/vmwareapi/fake.py:132
#, python-format
msgid "Property %(attr)s not set for the managed object %(name)s"
msgstr ""
-#: nova/virt/vmwareapi/fake.py:436
+#: nova/virt/vmwareapi/fake.py:486
msgid "There is no VM registered"
msgstr ""
-#: nova/virt/vmwareapi/fake.py:438 nova/virt/vmwareapi/fake.py:608
+#: nova/virt/vmwareapi/fake.py:488 nova/virt/vmwareapi/fake.py:663
#, python-format
msgid "Virtual Machine with ref %s is not there"
msgstr ""
-#: nova/virt/vmwareapi/fake.py:501
+#: nova/virt/vmwareapi/fake.py:551
#, python-format
msgid "Logging out a session that is invalid or already logged out: %s"
msgstr ""
-#: nova/virt/vmwareapi/fake.py:516
+#: nova/virt/vmwareapi/fake.py:566
msgid "Session is faulty"
msgstr ""
-#: nova/virt/vmwareapi/fake.py:519
+#: nova/virt/vmwareapi/fake.py:569
msgid "Session Invalid"
msgstr ""
-#: nova/virt/vmwareapi/fake.py:605
-msgid " No Virtual Machine has been registered yet"
+#: nova/virt/vmwareapi/fake.py:660
+msgid "No Virtual Machine has been registered yet"
msgstr ""
-#: nova/virt/vmwareapi/io_util.py:103
+#: nova/virt/vmwareapi/host.py:40
+#, python-format
+msgid "%(action)s %(host)s"
+msgstr ""
+
+#: nova/virt/vmwareapi/host.py:63
+#, python-format
+msgid "Set maintenance mod on %(host)s to %(mode)s"
+msgstr ""
+
+#: nova/virt/vmwareapi/io_util.py:112
#, python-format
msgid "Glance image %s is in killed state"
msgstr ""
-#: nova/virt/vmwareapi/io_util.py:111
+#: nova/virt/vmwareapi/io_util.py:120
#, python-format
msgid "Glance image %(image_id)s is in unknown state - %(state)s"
msgstr ""
-#: nova/virt/vmwareapi/network_utils.py:128
+#: nova/virt/vmwareapi/network_util.py:138
msgid ""
"ESX SOAP server returned an empty port group for the host system in its "
"response"
msgstr ""
-#: nova/virt/vmwareapi/network_utils.py:155
+#: nova/virt/vmwareapi/network_util.py:164
#, python-format
msgid "Creating Port Group with name %s on the ESX host"
msgstr ""
-#: nova/virt/vmwareapi/network_utils.py:169
+#: nova/virt/vmwareapi/network_util.py:178
#, python-format
msgid "Created Port Group with name %s on the ESX host"
msgstr ""
@@ -7638,274 +8032,383 @@ msgstr ""
msgid "Exception during HTTP connection close in VMwareHTTPWrite. Exception is %s"
msgstr ""
-#: nova/virt/vmwareapi/vim.py:83
+#: nova/virt/vmwareapi/vim.py:84
msgid "Unable to import suds."
msgstr ""
-#: nova/virt/vmwareapi/vim.py:89
+#: nova/virt/vmwareapi/vim.py:90
msgid "Must specify vmwareapi_wsdl_loc"
msgstr ""
-#: nova/virt/vmwareapi/vim.py:144
+#: nova/virt/vmwareapi/vim.py:141
#, python-format
msgid "No such SOAP method '%s' provided by VI SDK"
msgstr ""
-#: nova/virt/vmwareapi/vim.py:149
+#: nova/virt/vmwareapi/vim.py:146
#, python-format
msgid "httplib error in %s: "
msgstr ""
-#: nova/virt/vmwareapi/vim.py:156
+#: nova/virt/vmwareapi/vim.py:153
#, python-format
msgid "Socket error in %s: "
msgstr ""
-#: nova/virt/vmwareapi/vim.py:161
+#: nova/virt/vmwareapi/vim.py:158
#, python-format
msgid "Type error in %s: "
msgstr ""
-#: nova/virt/vmwareapi/vim.py:165
+#: nova/virt/vmwareapi/vim.py:162
#, python-format
msgid "Exception in %s "
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:60
+#: nova/virt/vmwareapi/vmops.py:94
msgid "Getting list of instances"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:76
+#: nova/virt/vmwareapi/vmops.py:110
#, python-format
msgid "Got total of %s instances"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:120
-msgid "Couldn't get a local Datastore reference"
-msgstr ""
-
-#: nova/virt/vmwareapi/vmops.py:190
-msgid "Creating VM on the ESX host"
+#: nova/virt/vmwareapi/vmops.py:204
+msgid "Creating VM on the ESX host"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:198
-msgid "Created VM on the ESX host"
+#: nova/virt/vmwareapi/vmops.py:212
+msgid "Created VM on the ESX host"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:226
+#: nova/virt/vmwareapi/vmops.py:236
#, python-format
msgid ""
"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter "
-"type %(adapter_type)s on the ESX host local store %(data_store_name)s"
+"type %(adapter_type)s on the ESX host local store %(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:244
+#: nova/virt/vmwareapi/vmops.py:255
#, python-format
msgid ""
-"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host "
-"local store %(data_store_name)s"
+"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB and type "
+"%(disk_type)s on the ESX host local store %(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:254
+#: nova/virt/vmwareapi/vmops.py:264
#, python-format
msgid ""
-"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore "
+"Deleting the file %(vmdk_path)s on the ESX host localstore "
"%(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:267
+#: nova/virt/vmwareapi/vmops.py:278
#, python-format
msgid ""
-"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store "
+"Deleted the file %(vmdk_path)s on the ESX host local store "
"%(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:279
+#: nova/virt/vmwareapi/vmops.py:286
#, python-format
msgid ""
"Downloading image file data %(image_ref)s to the ESX data store "
"%(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:295
+#: nova/virt/vmwareapi/vmops.py:306
#, python-format
msgid ""
-"Downloaded image file data %(image_ref)s to the ESX data store "
-"%(data_store_name)s"
+"Downloaded image file data %(image_ref)s to %(upload_vmdk_name)s on the "
+"ESX data store %(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:313
-msgid "Reconfiguring VM instance to attach the image disk"
+#: nova/virt/vmwareapi/vmops.py:320
+#, python-format
+msgid ""
+"Copying Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter type"
+" %(adapter_type)s on the ESX host local store %(data_store_name)s to disk"
+" type %(disk_type)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:320
-msgid "Reconfigured VM instance to attach the image disk"
+#: nova/virt/vmwareapi/vmops.py:341
+#, python-format
+msgid ""
+"Copied Virtual Disk of size %(vmdk_file_size_in_kb)s KB and type "
+"%(disk_type)s on the ESX host local store %(data_store_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:327
+#: nova/virt/vmwareapi/vmops.py:420
msgid "Powering on the VM instance"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:333
+#: nova/virt/vmwareapi/vmops.py:426
msgid "Powered on the VM instance"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:379
+#: nova/virt/vmwareapi/vmops.py:472
msgid "Creating Snapshot of the VM instance"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:389
+#: nova/virt/vmwareapi/vmops.py:482
msgid "Created Snapshot of the VM instance"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:432
+#: nova/virt/vmwareapi/vmops.py:527
msgid "Copying disk data before snapshot of the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:445
+#: nova/virt/vmwareapi/vmops.py:540
msgid "Copied disk data before snapshot of the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:454
+#: nova/virt/vmwareapi/vmops.py:549
#, python-format
msgid "Uploading image %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:468
+#: nova/virt/vmwareapi/vmops.py:563
#, python-format
msgid "Uploaded image %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:479
+#: nova/virt/vmwareapi/vmops.py:576
#, python-format
msgid "Deleting temporary vmdk file %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:488
+#: nova/virt/vmwareapi/vmops.py:585
#, python-format
msgid "Deleted temporary vmdk file %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:520
+#: nova/virt/vmwareapi/vmops.py:617
msgid "instance is not powered on"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:527
+#: nova/virt/vmwareapi/vmops.py:624
msgid "Rebooting guest OS of VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:530
+#: nova/virt/vmwareapi/vmops.py:627
msgid "Rebooted guest OS of VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:532
+#: nova/virt/vmwareapi/vmops.py:629
msgid "Doing hard reboot of VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:536
+#: nova/virt/vmwareapi/vmops.py:633
msgid "Did hard reboot of VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:548
+#: nova/virt/vmwareapi/vmops.py:645 nova/virt/vmwareapi/vmops.py:677
+#: nova/virt/vmwareapi/vmops.py:970
msgid "instance not present"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:567
+#: nova/virt/vmwareapi/vmops.py:651 nova/virt/vmwareapi/vmops.py:974
+msgid "Destroying the VM"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:656 nova/virt/vmwareapi/vmops.py:979
+msgid "Destroyed the VM"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:658
+#, python-format
+msgid "In vmwareapi:vmops:delete, got this exception while destroying the VM: %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:696 nova/virt/vmwareapi/vmops.py:849
msgid "Powering off the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:572
+#: nova/virt/vmwareapi/vmops.py:701 nova/virt/vmwareapi/vmops.py:854
msgid "Powered off the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:576
+#: nova/virt/vmwareapi/vmops.py:705
msgid "Unregistering the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:579
+#: nova/virt/vmwareapi/vmops.py:708
msgid "Unregistered the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:581
+#: nova/virt/vmwareapi/vmops.py:710
#, python-format
msgid ""
"In vmwareapi:vmops:destroy, got this exception while un-registering the "
"VM: %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:593
+#: nova/virt/vmwareapi/vmops.py:723
#, python-format
msgid "Deleting contents of the VM from datastore %(datastore_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:603
+#: nova/virt/vmwareapi/vmops.py:735
#, python-format
msgid "Deleted contents of the VM from datastore %(datastore_name)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:608
+#: nova/virt/vmwareapi/vmops.py:740
#, python-format
msgid ""
"In vmwareapi:vmops:destroy, got this exception while deleting the VM "
"contents from the disk: %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:617
+#: nova/virt/vmwareapi/vmops.py:748
msgid "pause not supported for vmwareapi"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:621
+#: nova/virt/vmwareapi/vmops.py:752
msgid "unpause not supported for vmwareapi"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:635
+#: nova/virt/vmwareapi/vmops.py:766
msgid "Suspending the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:639
+#: nova/virt/vmwareapi/vmops.py:770
msgid "Suspended the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:642
-msgid "instance is powered off and can not be suspended."
+#: nova/virt/vmwareapi/vmops.py:773
+msgid "instance is powered off and cannot be suspended."
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:645
+#: nova/virt/vmwareapi/vmops.py:776
msgid "VM was already in suspended state. So returning without doing anything"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:658
+#: nova/virt/vmwareapi/vmops.py:789
msgid "Resuming the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:663
+#: nova/virt/vmwareapi/vmops.py:794
msgid "Resumed the VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:665
+#: nova/virt/vmwareapi/vmops.py:796
msgid "instance is not in a suspended state"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:701
+#: nova/virt/vmwareapi/vmops.py:857
+msgid "instance is suspended and cannot be powered off."
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:860
+msgid "VM was already in powered off state. So returning without doing anything"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:873
+msgid "VM was already in powered on state. So returning without doing anything"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:877
+msgid "Powering on the VM"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:882
+msgid "Powered on the VM"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:899
+#, python-format
+msgid "Updating instance '%(instance_uuid)s' progress to %(progress)d"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:930
+#, python-format
+msgid "Renaming the VM to %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:936
+#, python-format
+msgid "Renamed the VM to %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:951
+#, python-format
+msgid "Cloning VM to host %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:959
+#, python-format
+msgid "Cloned VM to host %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:981
+#, python-format
+msgid ""
+"In vmwareapi:vmops:confirm_migration, got this exception while destroying"
+" the VM: %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:996
+#, python-format
+msgid "Renaming the VM from %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:1002
+#, python-format
+msgid "Renamed the VM from %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:1025
+#, python-format
+msgid "Migrating VM to host %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:1037
+#, python-format
+msgid "Migrated VM to host %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:1047 nova/virt/xenapi/vmops.py:1263
+#, python-format
+msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:1051
+#, python-format
+msgid "Automatically hard rebooting %d"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:1087
msgid "get_diagnostics not implemented for vmwareapi"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:759
+#: nova/virt/vmwareapi/vmops.py:1163
#, python-format
msgid "Reconfiguring VM instance to set the machine id with ip - %(ip_addr)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:767
+#: nova/virt/vmwareapi/vmops.py:1170
#, python-format
msgid "Reconfigured VM instance to set the machine id with ip - %(ip_addr)s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:804
+#: nova/virt/vmwareapi/vmops.py:1185
+#, python-format
+msgid "Reconfiguring VM instance to enable vnc on port - %(port)s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:1192
+#, python-format
+msgid "Reconfigured VM instance to enable vnc on port - %(port)s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:1285
#, python-format
msgid "Creating directory with path %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:808
+#: nova/virt/vmwareapi/vmops.py:1291
#, python-format
msgid "Created directory with path %s"
msgstr ""
@@ -7915,161 +8418,247 @@ msgstr ""
msgid "Downloading image %s from glance image server"
msgstr ""
-#: nova/virt/vmwareapi/vmware_images.py:108
+#: nova/virt/vmwareapi/vmware_images.py:107
#, python-format
msgid "Downloaded image %s from glance image server"
msgstr ""
-#: nova/virt/vmwareapi/vmware_images.py:114
+#: nova/virt/vmwareapi/vmware_images.py:113
#, python-format
msgid "Uploading image %s to the Glance image server"
msgstr ""
-#: nova/virt/vmwareapi/vmware_images.py:137
+#: nova/virt/vmwareapi/vmware_images.py:135
#, python-format
msgid "Uploaded image %s to the Glance image server"
msgstr ""
-#: nova/virt/vmwareapi/vmware_images.py:148
+#: nova/virt/vmwareapi/vmware_images.py:146
#, python-format
msgid "Getting image size for the image %s"
msgstr ""
-#: nova/virt/vmwareapi/vmware_images.py:153
+#: nova/virt/vmwareapi/vmware_images.py:151
#, python-format
msgid "Got image size of %(size)s for the image %(image)s"
msgstr ""
-#: nova/virt/xenapi/agent.py:84 nova/virt/xenapi/vmops.py:1476
+#: nova/virt/vmwareapi/volume_util.py:163
+#, python-format
+msgid "Rescanning HBA %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/volume_util.py:166
+#, python-format
+msgid "Rescanned HBA %s "
+msgstr ""
+
+#: nova/virt/vmwareapi/volume_util.py:178 nova/virt/xenapi/volume_utils.py:348
+#, python-format
+msgid "Mountpoint cannot be translated: %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/volumeops.py:60
+#, python-format
+msgid ""
+"Reconfiguring VM instance %(instance_name)s to attach disk %(vmdk_path)s "
+"or device %(device_name)s with type %(disk_type)s"
+msgstr ""
+
+#: nova/virt/vmwareapi/volumeops.py:68
+#, python-format
+msgid ""
+"Reconfigured VM instance %(instance_name)s to attach disk %(vmdk_path)s "
+"or device %(device_name)s with type %(disk_type)s"
+msgstr ""
+
+#: nova/virt/vmwareapi/volumeops.py:80
+#, python-format
+msgid "Reconfiguring VM instance %(instance_name)s to detach disk %(disk_key)s"
+msgstr ""
+
+#: nova/virt/vmwareapi/volumeops.py:87
+#, python-format
+msgid "Reconfigured VM instance %(instance_name)s to detach disk %(disk_key)s"
+msgstr ""
+
+#: nova/virt/vmwareapi/volumeops.py:94
+#, python-format
+msgid "Discovering iSCSI target %(target_iqn)s from %(target_portal)s."
+msgstr ""
+
+#: nova/virt/vmwareapi/volumeops.py:99
+msgid "Storage target found. No need to discover"
+msgstr ""
+
+#: nova/virt/vmwareapi/volumeops.py:107
+#, python-format
+msgid "Discovered iSCSI target %(target_iqn)s from %(target_portal)s."
+msgstr ""
+
+#: nova/virt/vmwareapi/volumeops.py:110
+#, python-format
+msgid "Unable to discovered iSCSI target %(target_iqn)s from %(target_portal)s."
+msgstr ""
+
+#: nova/virt/vmwareapi/volumeops.py:130 nova/virt/xenapi/volumeops.py:45
+#, python-format
+msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s"
+msgstr ""
+
+#: nova/virt/vmwareapi/volumeops.py:141 nova/virt/vmwareapi/volumeops.py:180
+msgid "Unable to find iSCSI Target"
+msgstr ""
+
+#: nova/virt/vmwareapi/volumeops.py:159 nova/virt/xenapi/volumeops.py:58
+#, python-format
+msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s"
+msgstr ""
+
+#: nova/virt/vmwareapi/volumeops.py:169 nova/virt/xenapi/volumeops.py:118
+#, python-format
+msgid "Detach_volume: %(instance_name)s, %(mountpoint)s"
+msgstr ""
+
+#: nova/virt/vmwareapi/volumeops.py:188
+msgid "Unable to find volume"
+msgstr ""
+
+#: nova/virt/vmwareapi/volumeops.py:190 nova/virt/xenapi/volumeops.py:131
+#, python-format
+msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s"
+msgstr ""
+
+#: nova/virt/xenapi/agent.py:87 nova/virt/xenapi/vmops.py:1506
#, python-format
msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:88 nova/virt/xenapi/vmops.py:1480
+#: nova/virt/xenapi/agent.py:91 nova/virt/xenapi/vmops.py:1510
#, python-format
msgid ""
"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. "
"args=%(args)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:93 nova/virt/xenapi/vmops.py:1485
+#: nova/virt/xenapi/agent.py:96 nova/virt/xenapi/vmops.py:1515
#, python-format
msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:103
+#: nova/virt/xenapi/agent.py:106
#, python-format
msgid ""
"The agent call to %(method)s returned an invalid response: %(ret)r. "
"path=%(path)s; args=%(args)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:113
+#: nova/virt/xenapi/agent.py:116
#, python-format
msgid "Failed to query agent version: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:131
+#: nova/virt/xenapi/agent.py:135
msgid "Querying agent version"
msgstr ""
-#: nova/virt/xenapi/agent.py:145
+#: nova/virt/xenapi/agent.py:149
msgid "Reached maximum time attempting to query agent version"
msgstr ""
-#: nova/virt/xenapi/agent.py:153
+#: nova/virt/xenapi/agent.py:157
#, python-format
msgid "Updating agent to %s"
msgstr ""
-#: nova/virt/xenapi/agent.py:161
+#: nova/virt/xenapi/agent.py:165
#, python-format
msgid "Failed to update agent: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:175
+#: nova/virt/xenapi/agent.py:179
msgid "Setting admin password"
msgstr ""
-#: nova/virt/xenapi/agent.py:186
+#: nova/virt/xenapi/agent.py:190
#, python-format
msgid "Failed to exchange keys: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:206
+#: nova/virt/xenapi/agent.py:210
#, python-format
msgid "Failed to update password: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:213
+#: nova/virt/xenapi/agent.py:227
#, python-format
msgid "Injecting file path: %r"
msgstr ""
-#: nova/virt/xenapi/agent.py:226
+#: nova/virt/xenapi/agent.py:240
#, python-format
msgid "Failed to inject file: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:233
+#: nova/virt/xenapi/agent.py:247
msgid "Resetting network"
msgstr ""
-#: nova/virt/xenapi/agent.py:239
+#: nova/virt/xenapi/agent.py:253
#, python-format
msgid "Failed to reset network: %(resp)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:262
+#: nova/virt/xenapi/agent.py:276
msgid ""
"XenServer tools installed in this image are capable of network injection."
" Networking files will not bemanipulated"
msgstr ""
-#: nova/virt/xenapi/agent.py:270
+#: nova/virt/xenapi/agent.py:284
msgid ""
"XenServer tools are present in this image but are not capable of network "
"injection"
msgstr ""
-#: nova/virt/xenapi/agent.py:274
+#: nova/virt/xenapi/agent.py:288
msgid "XenServer tools are not installed in this image"
msgstr ""
-#: nova/virt/xenapi/agent.py:326
+#: nova/virt/xenapi/agent.py:340
#, python-format
msgid "OpenSSL error: %s"
msgstr ""
-#: nova/virt/xenapi/driver.py:133
+#: nova/virt/xenapi/driver.py:134
msgid ""
"Must specify xenapi_connection_url, xenapi_connection_username "
"(optionally), and xenapi_connection_password to use "
"compute_driver=xenapi.XenAPIDriver"
msgstr ""
-#: nova/virt/xenapi/driver.py:160
+#: nova/virt/xenapi/driver.py:161
msgid "Failure while cleaning up attached VDIs"
msgstr ""
-#: nova/virt/xenapi/driver.py:344
+#: nova/virt/xenapi/driver.py:347
#, python-format
msgid "Could not determine key: %s"
msgstr ""
-#: nova/virt/xenapi/driver.py:556
+#: nova/virt/xenapi/driver.py:560
msgid "Host startup on XenServer is not supported."
msgstr ""
-#: nova/virt/xenapi/driver.py:616
+#: nova/virt/xenapi/driver.py:620
msgid "Unable to log in to XenAPI (is the Dom0 disk full?)"
msgstr ""
-#: nova/virt/xenapi/driver.py:656
+#: nova/virt/xenapi/driver.py:660
msgid "Host is member of a pool, but DB says otherwise"
msgstr ""
-#: nova/virt/xenapi/driver.py:740 nova/virt/xenapi/driver.py:754
+#: nova/virt/xenapi/driver.py:744 nova/virt/xenapi/driver.py:758
#, python-format
msgid "Got exception: %s"
msgstr ""
@@ -8283,7 +8872,7 @@ msgid ""
" on %(sr_ref)s."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:472
+#: nova/virt/xenapi/vm_utils.py:471
msgid "SR not present and could not be introduced"
msgstr ""
@@ -8297,288 +8886,283 @@ msgstr ""
msgid "No primary VDI found for %(vm_ref)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:611
+#: nova/virt/xenapi/vm_utils.py:615
msgid "Starting snapshot for VM"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:659
+#: nova/virt/xenapi/vm_utils.py:665
#, python-format
msgid "Destroying cached VDI '%(vdi_uuid)s'"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:717
-#, python-format
-msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s"
-msgstr ""
-
-#: nova/virt/xenapi/vm_utils.py:903
+#: nova/virt/xenapi/vm_utils.py:912
#, python-format
msgid ""
"Fast cloning is only supported on default local SR of type ext. SR on "
"this system was found to be of type %(sr_type)s. Ignoring the cow flag."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:959
+#: nova/virt/xenapi/vm_utils.py:968
#, python-format
msgid "Unrecognized cache_images value '%s', defaulting to True"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:993
+#: nova/virt/xenapi/vm_utils.py:1002
#, python-format
msgid "Fetched VDIs of type '%(vdi_type)s' with UUID '%(vdi_uuid)s'"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1005
+#: nova/virt/xenapi/vm_utils.py:1014
#, python-format
msgid ""
"download_vhd %(image_id)s, attempt %(attempt_num)d/%(max_attempts)d, "
"params: %(params)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1018
+#: nova/virt/xenapi/vm_utils.py:1027
#, python-format
msgid "download_vhd failed: %r"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1052
+#: nova/virt/xenapi/vm_utils.py:1061
#, python-format
msgid "Invalid value '%s' for xenapi_torrent_images"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1063
+#: nova/virt/xenapi/vm_utils.py:1072
#, python-format
msgid "Asking xapi to fetch vhd image %(image_id)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1127
+#: nova/virt/xenapi/vm_utils.py:1136
#, python-format
msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1142
+#: nova/virt/xenapi/vm_utils.py:1151
#, python-format
msgid "image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1146
+#: nova/virt/xenapi/vm_utils.py:1155
#, python-format
msgid ""
"Image size %(size_bytes)d exceeded instance_type allowed size "
"%(allowed_size_bytes)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1168
+#: nova/virt/xenapi/vm_utils.py:1177
#, python-format
msgid "Fetching image %(image_id)s, type %(image_type_str)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1181
+#: nova/virt/xenapi/vm_utils.py:1190
#, python-format
msgid "Size for image %(image_id)s: %(virtual_size)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1190
+#: nova/virt/xenapi/vm_utils.py:1199
#, python-format
msgid ""
"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d "
"bytes"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1209
+#: nova/virt/xenapi/vm_utils.py:1218
#, python-format
msgid "Copying VDI %s to /boot/guest on dom0"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1223
+#: nova/virt/xenapi/vm_utils.py:1232
#, python-format
msgid "Kernel/Ramdisk VDI %s destroyed"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1232
+#: nova/virt/xenapi/vm_utils.py:1241
msgid "Failed to fetch glance image"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1273
+#: nova/virt/xenapi/vm_utils.py:1282
#, python-format
msgid "Detected %(image_type_str)s format for image %(image_ref)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1304
+#: nova/virt/xenapi/vm_utils.py:1313
#, python-format
msgid "Looking up vdi %s for PV kernel"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1322
+#: nova/virt/xenapi/vm_utils.py:1331
#, python-format
msgid ""
"Image format is None: trying to determine PV status using pygrub; if "
"instance with vdi %s does not boot correctly, try with image metadata."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1328
+#: nova/virt/xenapi/vm_utils.py:1337
#, python-format
msgid "Unknown image format %(disk_image_type)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1359
+#: nova/virt/xenapi/vm_utils.py:1368
#, python-format
msgid "VDI %s is still available"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1455
+#: nova/virt/xenapi/vm_utils.py:1464
#, python-format
msgid "Unable to parse rrd of %(vm_uuid)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1482
+#: nova/virt/xenapi/vm_utils.py:1491
#, python-format
msgid "Re-scanning SR %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1510
+#: nova/virt/xenapi/vm_utils.py:1519
#, python-format
msgid "Flag sr_matching_filter '%s' does not respect formatting convention"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1528
+#: nova/virt/xenapi/vm_utils.py:1537
msgid ""
"XenAPI is unable to find a Storage Repository to install guest instances "
"on. Please check your configuration and/or configure the flag "
"'sr_matching_filter'"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1541
+#: nova/virt/xenapi/vm_utils.py:1550
msgid "Cannot find SR of content-type ISO"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1549
+#: nova/virt/xenapi/vm_utils.py:1558
#, python-format
msgid "ISO: looking at SR %(sr_rec)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1551
+#: nova/virt/xenapi/vm_utils.py:1560
msgid "ISO: not iso content"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1554
+#: nova/virt/xenapi/vm_utils.py:1563
msgid "ISO: iso content_type, no 'i18n-key' key"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1557
+#: nova/virt/xenapi/vm_utils.py:1566
msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1561
+#: nova/virt/xenapi/vm_utils.py:1570
msgid "ISO: SR MATCHing our criteria"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1563
+#: nova/virt/xenapi/vm_utils.py:1572
msgid "ISO: ISO, looking to see if it is host local"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1566
+#: nova/virt/xenapi/vm_utils.py:1575
#, python-format
msgid "ISO: PBD %(pbd_ref)s disappeared"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1569
+#: nova/virt/xenapi/vm_utils.py:1578
#, python-format
msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1572
+#: nova/virt/xenapi/vm_utils.py:1581
msgid "ISO: SR with local PBD"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1594
+#: nova/virt/xenapi/vm_utils.py:1603
#, python-format
msgid ""
"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: "
"%(server)s."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1610
+#: nova/virt/xenapi/vm_utils.py:1619
#, python-format
msgid "Unable to obtain RRD XML updates with server details: %(server)s."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1664
+#: nova/virt/xenapi/vm_utils.py:1673
#, python-format
msgid "Invalid statistics data from Xenserver: %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1724
+#: nova/virt/xenapi/vm_utils.py:1733
#, python-format
msgid "VHD %(vdi_uuid)s has parent %(parent_uuid)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1811
+#: nova/virt/xenapi/vm_utils.py:1820
#, python-format
msgid ""
"Parent %(parent_uuid)s doesn't match original parent "
"%(original_parent_uuid)s, waiting for coalesce..."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1821
+#: nova/virt/xenapi/vm_utils.py:1830
#, python-format
msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1856
+#: nova/virt/xenapi/vm_utils.py:1865
#, python-format
msgid "Timeout waiting for device %s to be created"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1876
+#: nova/virt/xenapi/vm_utils.py:1885
#, python-format
msgid "Disconnecting stale VDI %s from compute domU"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1889
+#: nova/virt/xenapi/vm_utils.py:1898
#, python-format
msgid "Plugging VBD %s ... "
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1892
+#: nova/virt/xenapi/vm_utils.py:1901
#, python-format
msgid "Plugging VBD %s done."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1894
+#: nova/virt/xenapi/vm_utils.py:1903
#, python-format
msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1897
+#: nova/virt/xenapi/vm_utils.py:1906
#, python-format
msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1902
+#: nova/virt/xenapi/vm_utils.py:1911
#, python-format
msgid "Destroying VBD for VDI %s ... "
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1910
+#: nova/virt/xenapi/vm_utils.py:1919
#, python-format
msgid "Destroying VBD for VDI %s done."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1923
+#: nova/virt/xenapi/vm_utils.py:1932
#, python-format
msgid "Running pygrub against %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1931
+#: nova/virt/xenapi/vm_utils.py:1940
#, python-format
msgid "Found Xen kernel %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1933
+#: nova/virt/xenapi/vm_utils.py:1942
msgid "No Xen kernel found. Booting HVM."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1935
+#: nova/virt/xenapi/vm_utils.py:1944
msgid ""
"Error while executing pygrub! Please, ensure the binary is installed "
"correctly, and available in your PATH; on some Linux distros, pygrub may "
@@ -8586,457 +9170,400 @@ msgid ""
"mode."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1952
+#: nova/virt/xenapi/vm_utils.py:1961
msgid "Partitions:"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1958
+#: nova/virt/xenapi/vm_utils.py:1967
#, python-format
msgid " %(num)s: %(ptype)s %(size)d sectors"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1983
+#: nova/virt/xenapi/vm_utils.py:1992
#, python-format
msgid ""
"Writing partition table %(primary_first)d %(primary_last)d to "
"%(dev_path)s..."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1996
+#: nova/virt/xenapi/vm_utils.py:2005
#, python-format
msgid "Writing partition table %s done."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2050
+#: nova/virt/xenapi/vm_utils.py:2059
#, python-format
msgid ""
"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s "
"virtual_size=%(virtual_size)d block_size=%(block_size)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2082
+#: nova/virt/xenapi/vm_utils.py:2091
#, python-format
msgid ""
"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% "
"reduction in size"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2134
+#: nova/virt/xenapi/vm_utils.py:2143
msgid "Manipulating interface files directly"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2143
+#: nova/virt/xenapi/vm_utils.py:2152
#, python-format
msgid "Failed to mount filesystem (expected for non-linux instances): %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2255
+#: nova/virt/xenapi/vm_utils.py:2264
msgid "This domU must be running on the host specified by xenapi_connection_url"
msgstr ""
-#: nova/virt/xenapi/vmops.py:126 nova/virt/xenapi/vmops.py:692
+#: nova/virt/xenapi/vmops.py:133 nova/virt/xenapi/vmops.py:740
#, python-format
msgid "Updating progress to %(progress)d"
msgstr ""
#: nova/virt/xenapi/vmops.py:168
+#, python-format
+msgid "Importing image upload handler: %s"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:181
msgid "Error: Agent is disabled"
msgstr ""
-#: nova/virt/xenapi/vmops.py:254
+#: nova/virt/xenapi/vmops.py:267
msgid "Starting instance"
msgstr ""
-#: nova/virt/xenapi/vmops.py:323
+#: nova/virt/xenapi/vmops.py:336
msgid "Removing kernel/ramdisk files from dom0"
msgstr ""
-#: nova/virt/xenapi/vmops.py:395
+#: nova/virt/xenapi/vmops.py:409
#, python-format
msgid "Block device information present: %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:426
+#: nova/virt/xenapi/vmops.py:440
msgid "Failed to spawn, rolling back"
msgstr ""
-#: nova/virt/xenapi/vmops.py:499
+#: nova/virt/xenapi/vmops.py:519
msgid "Detected ISO image type, creating blank VM for install"
msgstr ""
-#: nova/virt/xenapi/vmops.py:516
+#: nova/virt/xenapi/vmops.py:536
msgid "Auto configuring disk, attempting to resize partition..."
msgstr ""
-#: nova/virt/xenapi/vmops.py:542
+#: nova/virt/xenapi/vmops.py:582
msgid "Starting VM"
msgstr ""
-#: nova/virt/xenapi/vmops.py:548
+#: nova/virt/xenapi/vmops.py:588
msgid "Waiting for instance state to become running"
msgstr ""
-#: nova/virt/xenapi/vmops.py:562
+#: nova/virt/xenapi/vmops.py:602
#, python-format
msgid ""
"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is "
"%(version)s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:565
+#: nova/virt/xenapi/vmops.py:605
#, python-format
msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:576
+#: nova/virt/xenapi/vmops.py:616
#, python-format
msgid "Instance agent version: %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:603
+#: nova/virt/xenapi/vmops.py:643
msgid "Setting VCPU weight"
msgstr ""
-#: nova/virt/xenapi/vmops.py:611
+#: nova/virt/xenapi/vmops.py:651
#, python-format
msgid "Could not find VM with name %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:661
+#: nova/virt/xenapi/vmops.py:709
msgid "Finished snapshot and upload for VM"
msgstr ""
-#: nova/virt/xenapi/vmops.py:665
+#: nova/virt/xenapi/vmops.py:713
#, python-format
msgid "Migrating VHD '%(vdi_uuid)s' with seq_num %(seq_num)d"
msgstr ""
-#: nova/virt/xenapi/vmops.py:673
+#: nova/virt/xenapi/vmops.py:721
msgid "Failed to transfer vhd to new host"
msgstr ""
-#: nova/virt/xenapi/vmops.py:710
+#: nova/virt/xenapi/vmops.py:758
#, python-format
msgid "Resizing down VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB"
msgstr ""
-#: nova/virt/xenapi/vmops.py:716 nova/virt/xenapi/vmops.py:766
+#: nova/virt/xenapi/vmops.py:764 nova/virt/xenapi/vmops.py:814
msgid "Clean shutdown did not complete successfully, trying hard shutdown."
msgstr ""
-#: nova/virt/xenapi/vmops.py:795
+#: nova/virt/xenapi/vmops.py:843
msgid "Resize down not allowed without auto_disk_config"
msgstr ""
-#: nova/virt/xenapi/vmops.py:840
+#: nova/virt/xenapi/vmops.py:888
#, python-format
msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB"
msgstr ""
-#: nova/virt/xenapi/vmops.py:845
+#: nova/virt/xenapi/vmops.py:893
msgid "Resize complete"
msgstr ""
-#: nova/virt/xenapi/vmops.py:889
+#: nova/virt/xenapi/vmops.py:937
msgid "Starting halted instance found during reboot"
msgstr ""
-#: nova/virt/xenapi/vmops.py:980
+#: nova/virt/xenapi/vmops.py:1028
msgid "Unable to find root VBD/VDI for VM"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1006
+#: nova/virt/xenapi/vmops.py:1032
msgid "Destroying VDIs"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1033
+#: nova/virt/xenapi/vmops.py:1059
msgid "Using RAW or VHD, skipping kernel and ramdisk deletion"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1040
+#: nova/virt/xenapi/vmops.py:1066
msgid "instance has a kernel or ramdisk but not both"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1047
+#: nova/virt/xenapi/vmops.py:1073
msgid "kernel/ramdisk files removed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1073
+#: nova/virt/xenapi/vmops.py:1100
msgid "Destroying VM"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1099
+#: nova/virt/xenapi/vmops.py:1129
msgid "VM is not present, skipping destroy..."
msgstr ""
-#: nova/virt/xenapi/vmops.py:1150
+#: nova/virt/xenapi/vmops.py:1180
#, python-format
msgid "Instance is already in Rescue Mode: %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1184
+#: nova/virt/xenapi/vmops.py:1214
msgid "VM is not present, skipping soft delete..."
msgstr ""
-#: nova/virt/xenapi/vmops.py:1233
-#, python-format
-msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds"
-msgstr ""
-
-#: nova/virt/xenapi/vmops.py:1237
+#: nova/virt/xenapi/vmops.py:1267
msgid "Automatically hard rebooting"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1297
+#: nova/virt/xenapi/vmops.py:1327
msgid "Fetching VM ref while BUILDING failed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1380
+#: nova/virt/xenapi/vmops.py:1410
msgid "Injecting network info to xenstore"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1399
+#: nova/virt/xenapi/vmops.py:1429
msgid "Creating vifs"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1408
+#: nova/virt/xenapi/vmops.py:1438
#, python-format
msgid "Creating VIF for network %(network_ref)s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1411
+#: nova/virt/xenapi/vmops.py:1441
#, python-format
msgid "Created VIF %(vif_ref)s, network %(network_ref)s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1439
+#: nova/virt/xenapi/vmops.py:1469
msgid "Injecting hostname to xenstore"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1535
+#: nova/virt/xenapi/vmops.py:1565
#, python-format
msgid ""
"Destination host:%(hostname)s must be in the same aggregate as the source"
" server"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1567
+#: nova/virt/xenapi/vmops.py:1597
msgid "Migrate Receive failed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1615
+#: nova/virt/xenapi/vmops.py:1645
msgid "VM.assert_can_migratefailed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1651
+#: nova/virt/xenapi/vmops.py:1681
msgid "Migrate Send failed"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:41
+#: nova/virt/xenapi/volume_utils.py:42
msgid "creating sr within volume_utils"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:44 nova/virt/xenapi/volume_utils.py:73
+#: nova/virt/xenapi/volume_utils.py:45 nova/virt/xenapi/volume_utils.py:74
#, python-format
msgid "type is = %s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:47 nova/virt/xenapi/volume_utils.py:76
+#: nova/virt/xenapi/volume_utils.py:48 nova/virt/xenapi/volume_utils.py:77
#, python-format
msgid "name = %s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:60
+#: nova/virt/xenapi/volume_utils.py:61
#, python-format
msgid "Created %(label)s as %(sr_ref)s."
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:65 nova/virt/xenapi/volume_utils.py:164
+#: nova/virt/xenapi/volume_utils.py:66 nova/virt/xenapi/volume_utils.py:165
msgid "Unable to create Storage Repository"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:69
+#: nova/virt/xenapi/volume_utils.py:70
msgid "introducing sr within volume_utils"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:93 nova/virt/xenapi/volume_utils.py:160
-#: nova/virt/xenapi/volumeops.py:141
+#: nova/virt/xenapi/volume_utils.py:94 nova/virt/xenapi/volume_utils.py:161
+#: nova/virt/xenapi/volumeops.py:72
#, python-format
msgid "Introduced %(label)s as %(sr_ref)s."
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:96
+#: nova/virt/xenapi/volume_utils.py:97
msgid "Creating pbd for SR"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:98
+#: nova/virt/xenapi/volume_utils.py:99
msgid "Plugging SR"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:106 nova/virt/xenapi/volumeops.py:145
+#: nova/virt/xenapi/volume_utils.py:107 nova/virt/xenapi/volumeops.py:76
msgid "Unable to introduce Storage Repository"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:117 nova/virt/xenapi/volumeops.py:46
+#: nova/virt/xenapi/volume_utils.py:118
msgid "Unable to get SR using uuid"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:119
+#: nova/virt/xenapi/volume_utils.py:120
#, python-format
msgid "Forgetting SR %s..."
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:127
+#: nova/virt/xenapi/volume_utils.py:128
msgid "Unable to forget Storage Repository"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:147
+#: nova/virt/xenapi/volume_utils.py:148
#, python-format
msgid "Introducing %s..."
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:176
+#: nova/virt/xenapi/volume_utils.py:177
#, python-format
msgid "Unable to find SR from VBD %s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:194
+#: nova/virt/xenapi/volume_utils.py:195
#, python-format
msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:200
+#: nova/virt/xenapi/volume_utils.py:201
#, python-format
msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:224
+#: nova/virt/xenapi/volume_utils.py:225
#, python-format
msgid "Unable to introduce VDI on SR %s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:232
+#: nova/virt/xenapi/volume_utils.py:233
#, python-format
msgid "Unable to get record of VDI %s on"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:254
+#: nova/virt/xenapi/volume_utils.py:255
#, python-format
msgid "Unable to introduce VDI for SR %s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:264
+#: nova/virt/xenapi/volume_utils.py:265
#, python-format
msgid "Error finding vdis in SR %s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:271
+#: nova/virt/xenapi/volume_utils.py:272
#, python-format
msgid "Unable to find vbd for vdi %s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:282
+#: nova/virt/xenapi/volume_utils.py:283
#, python-format
msgid "Unable to obtain target information %(mountpoint)s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:321
+#: nova/virt/xenapi/volume_utils.py:322
#, python-format
msgid "Unable to obtain target information %(connection_data)s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:347
-#, python-format
-msgid "Mountpoint cannot be translated: %s"
-msgstr ""
-
-#: nova/virt/xenapi/volumeops.py:62
-msgid "Could not find VDI ref"
-msgstr ""
-
-#: nova/virt/xenapi/volumeops.py:67
-#, python-format
-msgid "Creating SR %s"
-msgstr ""
-
-#: nova/virt/xenapi/volumeops.py:70
-msgid "Could not create SR"
-msgstr ""
-
-#: nova/virt/xenapi/volumeops.py:73
-msgid "Could not retrieve SR record"
-msgstr ""
-
-#: nova/virt/xenapi/volumeops.py:78
+#: nova/virt/xenapi/volume_utils.py:388
#, python-format
msgid "Introducing SR %s"
msgstr ""
-#: nova/virt/xenapi/volumeops.py:81
+#: nova/virt/xenapi/volume_utils.py:391
msgid "SR found in xapi database. No need to introduce"
msgstr ""
-#: nova/virt/xenapi/volumeops.py:86
+#: nova/virt/xenapi/volume_utils.py:396
msgid "Could not introduce SR"
msgstr ""
-#: nova/virt/xenapi/volumeops.py:90
-#, python-format
-msgid "Checking for SR %s"
-msgstr ""
-
-#: nova/virt/xenapi/volumeops.py:100
+#: nova/virt/xenapi/volume_utils.py:403
#, python-format
msgid "SR %s not found in the xapi database"
msgstr ""
-#: nova/virt/xenapi/volumeops.py:106
+#: nova/virt/xenapi/volume_utils.py:409
msgid "Could not forget SR"
msgstr ""
-#: nova/virt/xenapi/volumeops.py:128
-#, python-format
-msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s"
-msgstr ""
-
-#: nova/virt/xenapi/volumeops.py:163
+#: nova/virt/xenapi/volumeops.py:94
#, python-format
msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s"
msgstr ""
-#: nova/virt/xenapi/volumeops.py:173
+#: nova/virt/xenapi/volumeops.py:104
#, python-format
msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s"
msgstr ""
-#: nova/virt/xenapi/volumeops.py:191
+#: nova/virt/xenapi/imageupload/glance.py:34
#, python-format
-msgid "Detach_volume: %(instance_name)s, %(mountpoint)s"
-msgstr ""
-
-#: nova/virt/xenapi/volumeops.py:199
-#, python-format
-msgid "Unable to locate volume %s"
-msgstr ""
-
-#: nova/virt/xenapi/volumeops.py:208
-#, python-format
-msgid "Unable to detach volume %s"
-msgstr ""
-
-#: nova/virt/xenapi/volumeops.py:213
-#, python-format
-msgid "Unable to destroy vbd %s"
-msgstr ""
-
-#: nova/virt/xenapi/volumeops.py:220
-#, python-format
-msgid "Error purging SR %s"
-msgstr ""
-
-#: nova/virt/xenapi/volumeops.py:222
-#, python-format
-msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s"
+msgid "Asking xapi to upload to glance %(vdi_uuids)s as ID %(image_id)s"
msgstr ""
#: nova/vnc/xvp_proxy.py:95 nova/vnc/xvp_proxy.py:100
@@ -9074,7 +9601,7 @@ msgstr ""
msgid "Starting nova-xvpvncproxy node (version %s)"
msgstr ""
-#: nova/volume/cinder.py:70
+#: nova/volume/cinder.py:90
#, python-format
msgid "Cinderclient connection created using URL: %s"
msgstr ""
diff --git a/nova/locale/tr_TR/LC_MESSAGES/nova.po b/nova/locale/tr_TR/LC_MESSAGES/nova.po
new file mode 100644
index 000000000..519a37857
--- /dev/null
+++ b/nova/locale/tr_TR/LC_MESSAGES/nova.po
@@ -0,0 +1,9859 @@
+# English (United States) translations for nova.
+# Copyright (C) 2012 ORGANIZATION
+# This file is distributed under the same license as the nova project.
+#
+# Translators:
+# Translators:
+# Özcan Zafer AYAN <ozcanzaferayan@gmail.com>, 2013.
+msgid ""
+msgstr ""
+"Project-Id-Version: Nova\n"
+"Report-Msgid-Bugs-To: https://bugs.launchpad.net/nova\n"
+"POT-Creation-Date: 2012-10-29 00:01+0000\n"
+"PO-Revision-Date: 2013-02-02 18:03+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language-Team: en_US <LL@li.org>\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 0.9.6\n"
+"Language: tr_TR\n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+
+#: nova/context.py:62
+#, python-format
+msgid "Arguments dropped when creating context: %s"
+msgstr ""
+
+#: nova/context.py:100
+#, python-format
+msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r"
+msgstr ""
+
+#: nova/crypto.py:47
+msgid "Filename of root CA"
+msgstr "Kök sertifika yetkilisinin dosya adı"
+
+#: nova/crypto.py:50
+msgid "Filename of private key"
+msgstr "Özel anahtarın dosya adı"
+
+#: nova/crypto.py:53
+msgid "Filename of root Certificate Revocation List"
+msgstr "Kök sertifika iptali listesinin dosya adı"
+
+#: nova/crypto.py:56
+msgid "Where we keep our keys"
+msgstr "Anahtarlarımızı tuttuğumuz yer"
+
+#: nova/crypto.py:59
+msgid "Where we keep our root CA"
+msgstr "Kök sertifika yetkilisini tutttuğumuz yer"
+
+#: nova/crypto.py:62
+msgid "Should we use a CA for each project?"
+msgstr "Her proje için sertifika yetkilisini kullanmalı mıyız?"
+
+#: nova/crypto.py:66
+#, python-format
+msgid "Subject for certificate for users, %s for project, user, timestamp"
+msgstr "Kullanıcıların sertifika konusu, proje, kullanıcı ve tarih bilgisi için %s"
+
+#: nova/crypto.py:71
+#, python-format
+msgid "Subject for certificate for projects, %s for project, timestamp"
+msgstr "Projelerin sertifika konusu, proje ve tarih bilgisi için %s"
+
+#: nova/crypto.py:301
+#, python-format
+msgid "Flags path: %s"
+msgstr "Bayrakların yolu: %s"
+
+#: nova/exception.py:56
+msgid "Unexpected error while running command."
+msgstr "Komut çalışırken beklenmeyen bir hata oluştu."
+
+#: nova/exception.py:59
+#, python-format
+msgid ""
+"%(description)s\n"
+"Command: %(cmd)s\n"
+"Exit code: %(exit_code)s\n"
+"Stdout: %(stdout)r\n"
+"Stderr: %(stderr)r"
+msgstr "%(description)s⏎ Komut: %(cmd)s⏎ Çıkış kodu: %(exit_code)s⏎ Standart çıktı: %(stdout)r⏎ Standart hata: %(stderr)r"
+
+#: nova/exception.py:72
+msgid "DB exception wrapped."
+msgstr "Veritabanı istisnası yakalandı."
+
+#: nova/exception.py:131
+msgid "An unknown exception occurred."
+msgstr "Bilinmeyen bir istisna oluştu."
+
+#: nova/exception.py:152 nova/openstack/common/rpc/common.py:46
+msgid "Exception in string format operation"
+msgstr ""
+
+#: nova/exception.py:162
+msgid "Unknown"
+msgstr ""
+
+#: nova/exception.py:179
+#, python-format
+msgid "Fatal call to deprecated config %(msg)s"
+msgstr ""
+
+#: nova/exception.py:183
+msgid "Failed to decrypt text"
+msgstr ""
+
+#: nova/exception.py:187
+msgid "Virtual Interface creation failed"
+msgstr "Sanal arayüz oluşturma hatası"
+
+#: nova/exception.py:191
+msgid "5 attempts to create virtual interfacewith unique mac address failed"
+msgstr "Hata. Eşsiz mac adresiyle sanal arayüz oluşturmada 5 kez girişim yapıldı."
+
+#: nova/exception.py:196
+#, python-format
+msgid "Connection to glance host %(host)s:%(port)s failed: %(reason)s"
+msgstr ""
+
+#: nova/exception.py:201
+msgid "Not authorized."
+msgstr "Yetkiniz yok."
+
+#: nova/exception.py:206
+msgid "User does not have admin privileges"
+msgstr "Kullanıcı yönetici ayrıcalıklarına sahip değil"
+
+#: nova/exception.py:210
+#, python-format
+msgid "Policy doesn't allow %(action)s to be performed."
+msgstr "%(action)s uygulanmasına izin verilmiyor."
+
+#: nova/exception.py:214
+#, python-format
+msgid "Image %(image_id)s is not active."
+msgstr ""
+
+#: nova/exception.py:218
+#, python-format
+msgid "Not authorized for image %(image_id)s."
+msgstr ""
+
+#: nova/exception.py:222
+msgid "Unacceptable parameters."
+msgstr "Kabul edilemez parametreler var."
+
+#: nova/exception.py:227
+msgid "Invalid snapshot"
+msgstr ""
+
+#: nova/exception.py:231
+#, python-format
+msgid "Volume %(volume_id)s is not attached to anything"
+msgstr "%(volume_id)s bölümü hiçbir şeyle ilişkilendirilmedi"
+
+#: nova/exception.py:235
+#, python-format
+msgid "Volume %(volume_id)s is still attached, detach volume first."
+msgstr ""
+
+#: nova/exception.py:239 nova/api/ec2/cloud.py:389 nova/api/ec2/cloud.py:414
+#: nova/api/openstack/compute/contrib/keypairs.py:98 nova/compute/api.py:2194
+msgid "Keypair data is invalid"
+msgstr "Anahtar çifti verisi geçersiz"
+
+#: nova/exception.py:243
+msgid "Failed to load data into json format"
+msgstr ""
+
+#: nova/exception.py:247
+msgid "The request is invalid."
+msgstr "İstek geçersiz"
+
+#: nova/exception.py:251
+msgid "Invalid input received"
+msgstr "Geçersiz giriş yapıldı"
+
+#: nova/exception.py:255
+msgid "Invalid volume type"
+msgstr ""
+
+#: nova/exception.py:259
+msgid "Invalid volume"
+msgstr "Geçersiz bölüm"
+
+#: nova/exception.py:263 nova/api/openstack/compute/servers.py:1283
+#: nova/api/openstack/compute/contrib/admin_actions.py:242
+msgid "Invalid metadata"
+msgstr "Geçersiz metadata"
+
+#: nova/exception.py:267
+msgid "Invalid metadata size"
+msgstr ""
+
+#: nova/exception.py:271
+#, python-format
+msgid "Invalid port range %(from_port)s:%(to_port)s. %(msg)s"
+msgstr "Geçersiz port aralığı %(from_port)s:%(to_port)s. %(msg)s"
+
+#: nova/exception.py:275 nova/api/ec2/cloud.py:571
+#, python-format
+msgid "Invalid IP protocol %(protocol)s."
+msgstr "Geçersiz IP %(protocol)s."
+
+#: nova/exception.py:279
+#, python-format
+msgid "Invalid content type %(content_type)s."
+msgstr "Geçersiz içerik türü %(content_type)s."
+
+#: nova/exception.py:283
+#, python-format
+msgid "Invalid cidr %(cidr)s."
+msgstr "Geçersiz CIDR %(cidr)s."
+
+#: nova/exception.py:287
+msgid "Invalid Parameter: Unicode is not supported by the current database."
+msgstr ""
+
+#: nova/exception.py:294
+#, python-format
+msgid "%(err)s"
+msgstr "Hatalar: %(err)s"
+
+#: nova/exception.py:298
+#, python-format
+msgid ""
+"Cannot perform action '%(action)s' on aggregate %(aggregate_id)s. Reason: "
+"%(reason)s."
+msgstr "%(aggregate_id)s üzerinde '%(action)s' eylemi gerçekleştirilemiyor. Nedeni: %(reason)s."
+
+#: nova/exception.py:303
+#, python-format
+msgid "Group not valid. Reason: %(reason)s"
+msgstr ""
+
+#: nova/exception.py:307
+msgid "Sort key supplied was not valid."
+msgstr ""
+
+#: nova/exception.py:311
+#, python-format
+msgid ""
+"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while "
+"the instance is in this state."
+msgstr "%(attr)s %(state)s 'deki %(instance_uuid)s örneği. Örnek bu durumda iken %(method)s yapılamaz."
+
+#: nova/exception.py:316
+#, python-format
+msgid "Instance %(instance_id)s is not running."
+msgstr "%(instance_id)s örneği çalışmıyor."
+
+#: nova/exception.py:320
+#, python-format
+msgid "Instance %(instance_id)s is not in rescue mode"
+msgstr "%(instance_id)s örneği kurtarma modunda değil"
+
+#: nova/exception.py:324
+#, python-format
+msgid "Instance %(instance_id)s is not ready"
+msgstr ""
+
+#: nova/exception.py:328
+msgid "Failed to suspend instance"
+msgstr "Örnek kapatmada hata oluştu."
+
+#: nova/exception.py:332
+msgid "Failed to resume server"
+msgstr ""
+
+#: nova/exception.py:336
+msgid "Failed to reboot instance"
+msgstr "Tekrar yükleme örneğinde hata oluştu."
+
+#: nova/exception.py:340
+msgid "Failed to terminate instance"
+msgstr ""
+
+#: nova/exception.py:344
+msgid "Service is unavailable at this time."
+msgstr "Şu anda servis kullanılamıyor."
+
+#: nova/exception.py:348
+msgid "Insufficient compute resources."
+msgstr ""
+
+#: nova/exception.py:352
+msgid "Compute service is unavailable at this time."
+msgstr "Hesaplama servisi şu anda kullanılamıyor."
+
+#: nova/exception.py:356
+#, python-format
+msgid ""
+"Unable to migrate instance (%(instance_id)s) to current host (%(host)s)."
+msgstr "Mevcut (%(host)s) sunucusundan (%(instance_id)s) örneği geçirilemez."
+
+#: nova/exception.py:361
+msgid "The supplied hypervisor type of is invalid."
+msgstr "Desteklenen hypervisor türü geçersiz."
+
+#: nova/exception.py:365
+msgid ""
+"The instance requires a newer hypervisor version than has been provided."
+msgstr "Örnek şu ankinden daha yeni hypervisor versiyonu gerektirir."
+
+#: nova/exception.py:370
+#, python-format
+msgid ""
+"The supplied disk path (%(path)s) already exists, it is expected not to "
+"exist."
+msgstr "Desteklenen disk yolu (%(path)s) halen var,fakat var olmaması gerekir."
+
+#: nova/exception.py:375
+#, python-format
+msgid "The supplied device path (%(path)s) is invalid."
+msgstr "Desteklenen cihaz yolu (%(path)s) geçersiz."
+
+#: nova/exception.py:379
+#, python-format
+msgid "The supplied device path (%(path)s) is in use."
+msgstr ""
+
+#: nova/exception.py:383
+#, python-format
+msgid "The supplied device (%(device)s) is busy."
+msgstr ""
+
+#: nova/exception.py:387
+msgid "Unacceptable CPU info"
+msgstr "Kabul edilemeyen CPU bilgisi"
+
+#: nova/exception.py:391
+#, python-format
+msgid "%(address)s is not a valid IP v4/6 address."
+msgstr "%(address)s geçerli bir IP v4/6 adresi değildir."
+
+#: nova/exception.py:395
+#, python-format
+msgid ""
+"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN tag"
+" is %(tag)s, but the one associated with the port group is %(pgroup)s."
+msgstr "VLAN etiketi %(bridge)s port grubu için uygun değil. Beklenen VLAN etiketi %(tag)s'dir, fakat tek ilişkili port grubu %(pgroup)s'dir."
+
+#: nova/exception.py:401
+#, python-format
+msgid ""
+"vSwitch which contains the port group %(bridge)s is not associated with the "
+"desired physical adapter. Expected vSwitch is %(expected)s, but the one "
+"associated is %(actual)s."
+msgstr "%(bridge)s port grubunu içeren vSwitch istenilen fiziksel adaptörü ile ilişkilendirilmemiştir. Beklenen vSwitch %(expected)s'dir, fakat ilişkilendirilen ise %(actual)s'dir."
+
+#: nova/exception.py:408
+#, python-format
+msgid "Disk format %(disk_format)s is not acceptable"
+msgstr "%(disk_format)s disk formatı kabul edilemez."
+
+#: nova/exception.py:412
+#, python-format
+msgid "Image %(image_id)s is unacceptable: %(reason)s"
+msgstr "%(image_id)s imajı kabul edilemez: %(reason)s"
+
+#: nova/exception.py:416
+#, python-format
+msgid "Instance %(instance_id)s is unacceptable: %(reason)s"
+msgstr "%(instance_id)s örneği kabul edilemez: %(reason)s"
+
+#: nova/exception.py:420
+#, python-format
+msgid "Ec2 id %(ec2_id)s is unacceptable."
+msgstr "%(ec2_id)s Ec2 id'si kabul edilemez."
+
+#: nova/exception.py:424
+#, python-format
+msgid "Expected a uuid but received %(uuid)s."
+msgstr ""
+
+#: nova/exception.py:428
+msgid "Constraint not met."
+msgstr ""
+
+#: nova/exception.py:433
+msgid "Resource could not be found."
+msgstr "Kaynak bulunamadı."
+
+#: nova/exception.py:438
+#, python-format
+msgid "Could not find driver for compute_driver %(name)s"
+msgstr ""
+
+#: nova/exception.py:442
+#, python-format
+msgid "Volume %(volume_id)s persistence file could not be found."
+msgstr ""
+
+#: nova/exception.py:446
+#, python-format
+msgid "Volume %(volume_id)s could not be found."
+msgstr "%(volume_id)s bölümü bulunamadı."
+
+#: nova/exception.py:450
+#, python-format
+msgid "Unable to locate account %(account_name)s on Solidfire device"
+msgstr ""
+
+#: nova/exception.py:455
+#, python-format
+msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s."
+msgstr ""
+
+#: nova/exception.py:460
+#, python-format
+msgid "Volume type %(volume_type_id)s could not be found."
+msgstr ""
+
+#: nova/exception.py:464
+#, python-format
+msgid "Volume type with name %(volume_type_name)s could not be found."
+msgstr ""
+
+#: nova/exception.py:469
+#, python-format
+msgid ""
+"Volume Type %(volume_type_id)s has no extra specs with key "
+"%(extra_specs_key)s."
+msgstr ""
+
+#: nova/exception.py:474
+#, python-format
+msgid "Snapshot %(snapshot_id)s could not be found."
+msgstr "%(snapshot_id)s sistem anlık görüntüsü bulunamadı."
+
+#: nova/exception.py:478
+#, python-format
+msgid "deleting volume %(volume_name)s that has snapshot"
+msgstr ""
+
+#: nova/exception.py:482
+#, python-format
+msgid "deleting snapshot %(snapshot_name)s that has dependent volumes"
+msgstr ""
+
+#: nova/exception.py:487
+#, python-format
+msgid "No target id found for volume %(volume_id)s."
+msgstr "%(volume_id)s bölümü için hedef id bulunamadı."
+
+#: nova/exception.py:491
+#, python-format
+msgid "Failed to create iscsi target for volume %(volume_id)s."
+msgstr ""
+
+#: nova/exception.py:495
+#, python-format
+msgid "Failed to remove iscsi target for volume %(volume_id)s."
+msgstr ""
+
+#: nova/exception.py:499
+#, python-format
+msgid "No disk at %(location)s"
+msgstr "%(location)s'da disk yok."
+
+#: nova/exception.py:503
+#, python-format
+msgid "Could not find a handler for %(driver_type)s volume."
+msgstr "%(driver_type)s bölümü için bir işleyici bulunamadı."
+
+#: nova/exception.py:507
+#, python-format
+msgid "Invalid image href %(image_href)s."
+msgstr "Geçersiz %(image_href)s imaj kaynak dosyası."
+
+#: nova/exception.py:511
+#, python-format
+msgid "Image %(image_id)s could not be found."
+msgstr "%(image_id)s imaj kaynak dosyası bulunamadı."
+
+#: nova/exception.py:515
+#, python-format
+msgid ""
+"Image %(image_id)s could not be found. The nova EC2 API assigns image ids "
+"dynamically when they are listed for the first time. Have you listed image "
+"ids since adding this image?"
+msgstr ""
+
+#: nova/exception.py:522
+#, python-format
+msgid "Project %(project_id)s could not be found."
+msgstr "%(project_id)s projesi bulunamadı."
+
+#: nova/exception.py:526
+msgid "Cannot find SR to read/write VDI."
+msgstr "VDI'ya okuma/yazma yapılırken SR(Saklama deposu) bulunamadı."
+
+#: nova/exception.py:530
+#, python-format
+msgid "Network %(network_id)s is still in use."
+msgstr ""
+
+#: nova/exception.py:534
+#, python-format
+msgid "%(req)s is required to create a network."
+msgstr "Ağ oluşturulurken %(req)s gereklidir."
+
+#: nova/exception.py:538
+#, python-format
+msgid "Network %(network_id)s could not be found."
+msgstr "%(network_id)s ağı bulunamadı."
+
+#: nova/exception.py:542
+#, python-format
+msgid "Network could not be found for bridge %(bridge)s"
+msgstr " %(bridge)s köprüsü için ağ bulunamadı."
+
+#: nova/exception.py:546
+#, python-format
+msgid "Network could not be found for uuid %(uuid)s"
+msgstr "%(uuid)s UUID'si için bir ağ bulunamadı"
+
+#: nova/exception.py:550
+#, python-format
+msgid "Network could not be found with cidr %(cidr)s."
+msgstr "%(cidr)s CIDR ile bir ağ bulunamadı."
+
+#: nova/exception.py:554
+#, python-format
+msgid "Network could not be found for instance %(instance_id)s."
+msgstr "%(instance_id)s örneği için ağ bulunamadı."
+
+#: nova/exception.py:558
+msgid "No networks defined."
+msgstr "Tanımlı bir ağ yok."
+
+#: nova/exception.py:562
+#, python-format
+msgid ""
+"Either Network uuid %(network_uuid)s is not present or is not assigned to "
+"the project %(project_id)s."
+msgstr "%(network_uuid)s ağ UUID verilmedi veya %(project_id)s projesi ile ilişkilendirilmedi."
+
+#: nova/exception.py:567
+#, python-format
+msgid "Host is not set to the network (%(network_id)s)."
+msgstr ""
+
+#: nova/exception.py:571
+msgid "Could not find the datastore reference(s) which the VM uses."
+msgstr "VM'nin kullandığı veri deposu referansı(ları) bulunamadı."
+
+#: nova/exception.py:575
+#, python-format
+msgid "Port %(port_id)s is still in use."
+msgstr ""
+
+#: nova/exception.py:579
+#, python-format
+msgid "Port %(port_id)s could not be found."
+msgstr ""
+
+#: nova/exception.py:583
+#, python-format
+msgid "No fixed IP associated with id %(id)s."
+msgstr "%(id)s ile ilişkilendirilmiş bir dinamik IP yok."
+
+#: nova/exception.py:587
+#, python-format
+msgid "Fixed ip not found for address %(address)s."
+msgstr "%(address)s adresi için bir dinamik IP bulunamadı."
+
+#: nova/exception.py:591
+#, python-format
+msgid "Instance %(instance_uuid)s has zero fixed ips."
+msgstr ""
+
+#: nova/exception.py:595
+#, python-format
+msgid "Network host %(host)s has zero fixed ips in network %(network_id)s."
+msgstr "%(network_id)s ağında %(host)s ağ sunucusunun hiç dinamik IP'si yok.\n "
+
+#: nova/exception.py:600
+#, python-format
+msgid "Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'."
+msgstr ""
+
+#: nova/exception.py:604
+#, python-format
+msgid ""
+"Fixed IP address (%(address)s) does not exist in network (%(network_uuid)s)."
+msgstr "(%(network_uuid)s) ağında bir (%(address)s) dinamik IP adresi yok."
+
+#: nova/exception.py:609
+#, python-format
+msgid ""
+"Fixed IP address %(address)s is already in use on instance "
+"%(instance_uuid)s."
+msgstr ""
+
+#: nova/exception.py:614
+#, python-format
+msgid ""
+"More than one instance is associated with fixed ip address '%(address)s'."
+msgstr ""
+
+#: nova/exception.py:619
+#, python-format
+msgid "Fixed IP address %(address)s is invalid."
+msgstr "%(address)s dinamik IP adresi geçersiz."
+
+#: nova/exception.py:623
+msgid "Zero fixed ips available."
+msgstr "Kullanılabilir hiç dinamik IP yok."
+
+#: nova/exception.py:627
+msgid "Zero fixed ips could be found."
+msgstr "Hiç dinamik IP bulunamadı."
+
+#: nova/exception.py:636
+#, python-format
+msgid "Floating ip %(address)s already exists."
+msgstr ""
+
+#: nova/exception.py:640
+#, python-format
+msgid "Floating ip not found for id %(id)s."
+msgstr "%(id)s ID'si için değişken IP bulunamadı."
+
+#: nova/exception.py:644
+#, python-format
+msgid "The DNS entry %(name)s already exists in domain %(domain)s."
+msgstr "%(domain)s alanında %(name)s DNS girdisi halen var."
+
+#: nova/exception.py:648
+#, python-format
+msgid "Floating ip not found for address %(address)s."
+msgstr "%(address)s adresi için bir değişken IP bulunamadı."
+
+#: nova/exception.py:652
+#, python-format
+msgid "Floating ip not found for host %(host)s."
+msgstr " %(host)s sunucusu için bir değişken IP bulunamadı."
+
+#: nova/exception.py:656
+#, python-format
+msgid "Multiple floating ips are found for address %(address)s."
+msgstr ""
+
+#: nova/exception.py:660
+msgid "Floating ip pool not found."
+msgstr ""
+
+#: nova/exception.py:665
+msgid "Zero floating ips available."
+msgstr "Hiç mevcut değişken IP yok."
+
+#: nova/exception.py:670
+#, python-format
+msgid "Floating ip %(address)s is associated."
+msgstr "%(address)s değişken IP adresi ilişkilendirildi."
+
+#: nova/exception.py:674
+#, python-format
+msgid "Floating ip %(address)s is not associated."
+msgstr "%(address)s değişken IP adresi ilişkilendirilemedi."
+
+#: nova/exception.py:678
+msgid "Zero floating ips exist."
+msgstr "Hiç değişken IP yok."
+
+#: nova/exception.py:682
+#, python-format
+msgid "Interface %(interface)s not found."
+msgstr "%(interface)s arayüzü bulunamadı."
+
+#: nova/exception.py:686
+msgid "Cannot disassociate auto assigined floating ip"
+msgstr ""
+
+#: nova/exception.py:690
+#, python-format
+msgid "Keypair %(name)s not found for user %(user_id)s"
+msgstr "%(user_id)s kullanıcısı için %(name)s anahtar çifti bulunamadı"
+
+#: nova/exception.py:694
+#, python-format
+msgid "Certificate %(certificate_id)s not found."
+msgstr "%(certificate_id)s sertifikası bulunamadı"
+
+#: nova/exception.py:698
+#, python-format
+msgid "Service %(service_id)s could not be found."
+msgstr "%(service_id)s servisi bulunamadı."
+
+#: nova/exception.py:702
+#, python-format
+msgid "Host %(host)s could not be found."
+msgstr "%(host)s sunucusu bulunamadı."
+
+#: nova/exception.py:706
+#, python-format
+msgid "Compute host %(host)s could not be found."
+msgstr "%(host)s hesaplama sunucusu bulunamadı."
+
+#: nova/exception.py:710
+#, python-format
+msgid "Could not find binary %(binary)s on host %(host)s."
+msgstr "%(host)s sunucusunda %(binary)s ikilisi bulunamadı."
+
+#: nova/exception.py:714
+#, python-format
+msgid "Invalid reservation expiration %(expire)s."
+msgstr ""
+
+#: nova/exception.py:718
+#, python-format
+msgid ""
+"Change would make usage less than 0 for the following resources: %(unders)s"
+msgstr ""
+
+#: nova/exception.py:723
+msgid "Quota could not be found"
+msgstr "Kota bulunamadı."
+
+#: nova/exception.py:727
+#, python-format
+msgid "Unknown quota resources %(unknown)s."
+msgstr ""
+
+#: nova/exception.py:731
+#, python-format
+msgid "Quota for project %(project_id)s could not be found."
+msgstr "%(project_id)s projesi için bir kota bulunamadı."
+
+#: nova/exception.py:735
+#, python-format
+msgid "Quota class %(class_name)s could not be found."
+msgstr ""
+
+#: nova/exception.py:739
+#, python-format
+msgid "Quota usage for project %(project_id)s could not be found."
+msgstr ""
+
+#: nova/exception.py:743
+#, python-format
+msgid "Quota reservation %(uuid)s could not be found."
+msgstr ""
+
+#: nova/exception.py:747
+#, python-format
+msgid "Quota exceeded for resources: %(overs)s"
+msgstr ""
+
+#: nova/exception.py:751
+#, python-format
+msgid "Security group %(security_group_id)s not found."
+msgstr "%(security_group_id)s güvenlik grubu bulunamadı."
+
+#: nova/exception.py:755
+#, python-format
+msgid ""
+"Security group %(security_group_id)s not found for project %(project_id)s."
+msgstr "%(project_id)s projesi için %(security_group_id)s güvenlik grubu bulunamadı."
+
+#: nova/exception.py:760
+#, python-format
+msgid "Security group with rule %(rule_id)s not found."
+msgstr "%(rule_id)s kurallı güvenlik grubu bulunamadı."
+
+#: nova/exception.py:764
+#, python-format
+msgid ""
+"Security group %(security_group_id)s is already associated with the instance"
+" %(instance_id)s"
+msgstr "%(security_group_id)s güvenlik grubu zaten %(instance_id)s örneği ile ilişkilendirimiş."
+
+#: nova/exception.py:769
+#, python-format
+msgid ""
+"Security group %(security_group_id)s is not associated with the instance "
+"%(instance_id)s"
+msgstr "%(security_group_id)s güvenlik grubu %(instance_id)s örneği ile ilişkilendirilmedi."
+
+#: nova/exception.py:774
+#, python-format
+msgid "Migration %(migration_id)s could not be found."
+msgstr "%(migration_id)s göçü bulunamadı."
+
+#: nova/exception.py:778
+#, python-format
+msgid ""
+"Migration not found for instance %(instance_id)s with status %(status)s."
+msgstr "%(status)s durumuyla %(instance_id)s örneği için göç bulunamadı."
+
+#: nova/exception.py:783
+#, python-format
+msgid "Console pool %(pool_id)s could not be found."
+msgstr "%(pool_id)s konsol havuzu bulunamadı."
+
+#: nova/exception.py:787
+#, python-format
+msgid ""
+"Console pool of type %(console_type)s for compute host %(compute_host)s on "
+"proxy host %(host)s not found."
+msgstr "%(host)s roxy sunucusundaki %(compute_host)s hesaplama sunucusu için %(console_type)s türünün konsol havuzu bulunamadı."
+
+#: nova/exception.py:793
+#, python-format
+msgid "Console %(console_id)s could not be found."
+msgstr "%(console_id)s konsolu bulunamadı."
+
+#: nova/exception.py:797
+#, python-format
+msgid "Console for instance %(instance_uuid)s could not be found."
+msgstr ""
+
+#: nova/exception.py:801
+#, python-format
+msgid ""
+"Console for instance %(instance_uuid)s in pool %(pool_id)s could not be "
+"found."
+msgstr ""
+
+#: nova/exception.py:806
+#, python-format
+msgid "Invalid console type %(console_type)s "
+msgstr ""
+
+#: nova/exception.py:810
+#, python-format
+msgid "Instance type %(instance_type_id)s could not be found."
+msgstr "%(instance_type_id)s örnek türü bulunamadı"
+
+#: nova/exception.py:814
+#, python-format
+msgid "Instance type with name %(instance_type_name)s could not be found."
+msgstr "%(instance_type_name)s isimli örnek türü bulunamadı."
+
+#: nova/exception.py:819
+#, python-format
+msgid "Flavor %(flavor_id)s could not be found."
+msgstr "%(flavor_id)s örnek türü bulunamadı."
+
+#: nova/exception.py:823
+#, python-format
+msgid "Flavor access not found for %(flavor_id) / %(project_id) combination."
+msgstr ""
+
+#: nova/exception.py:828
+#, python-format
+msgid "Scheduler Host Filter %(filter_name)s could not be found."
+msgstr "%(filter_name)s zamanlayıcı sunucu filtresi bulunamadı."
+
+#: nova/exception.py:832
+#, python-format
+msgid "Scheduler cost function %(cost_fn_str)s could not be found."
+msgstr "%(cost_fn_str)s zamanlayıcı maliyet fonksiyonu bulunamadı."
+
+#: nova/exception.py:837
+#, python-format
+msgid "Scheduler weight flag not found: %(flag_name)s"
+msgstr "Zamanlayıcı yük bayrağı bulunamadı: %(flag_name)s"
+
+#: nova/exception.py:841
+#, python-format
+msgid "Instance %(instance_uuid)s has no metadata with key %(metadata_key)s."
+msgstr ""
+
+#: nova/exception.py:846
+#, python-format
+msgid ""
+"Instance %(instance_uuid)s has no system metadata with key %(metadata_key)s."
+msgstr ""
+
+#: nova/exception.py:851
+#, python-format
+msgid ""
+"Instance Type %(instance_type_id)s has no extra specs with key "
+"%(extra_specs_key)s."
+msgstr "%(instance_type_id)s örnek türü %(extra_specs_key)s anahtarı ile ekstra özelliklere sahip değil."
+
+#: nova/exception.py:856
+#, python-format
+msgid "File %(file_path)s could not be found."
+msgstr "%(file_path)s dosyası bulunamadı."
+
+#: nova/exception.py:860
+msgid "Zero files could be found."
+msgstr "Hiç dosya bulunamadı."
+
+#: nova/exception.py:864
+#, python-format
+msgid ""
+"Virtual switch associated with the network adapter %(adapter)s not found."
+msgstr "%(adapter)s ağ bağdaştırıcısı ile ilişkilendirilen sanal anahtar bulunamadı."
+
+#: nova/exception.py:869
+#, python-format
+msgid "Network adapter %(adapter)s could not be found."
+msgstr "%(adapter)s ağ bağdaştırıcısı bulunamadı."
+
+#: nova/exception.py:873
+#, python-format
+msgid "Class %(class_name)s could not be found: %(exception)s"
+msgstr "%(class_name)s sınıfı bulunamadı: %(exception)s"
+
+#: nova/exception.py:877
+msgid "Action not allowed."
+msgstr "İşlem için izin verilmiyor."
+
+#: nova/exception.py:881
+msgid "Rotation is not allowed for snapshots"
+msgstr "Önceki sistem geri görüntüsüne dönmek için izin verilmiyor."
+
+#: nova/exception.py:885
+msgid "Rotation param is required for backup image_type"
+msgstr "Yedek imajı geri dönüş parametresi gerekli"
+
+#: nova/exception.py:889
+#, python-format
+msgid "Key pair %(key_name)s already exists."
+msgstr "%(key_name)s anahtar çifti zaten var."
+
+#: nova/exception.py:893
+#, python-format
+msgid "Instance %(name)s already exists."
+msgstr "%(name)s örneği zaten var."
+
+#: nova/exception.py:897
+#, python-format
+msgid "Instance Type with name %(name)s already exists."
+msgstr ""
+
+#: nova/exception.py:901
+#, python-format
+msgid "Instance Type with ID %(flavor_id)s already exists."
+msgstr ""
+
+#: nova/exception.py:905
+#, python-format
+msgid ""
+"Flavor access alreay exists for flavor %(flavor_id)s and project "
+"%(project_id)s combination."
+msgstr ""
+
+#: nova/exception.py:910
+#, python-format
+msgid "Volume Type %(name)s already exists."
+msgstr ""
+
+#: nova/exception.py:914
+#, python-format
+msgid "%(path)s is not on shared storage: %(reason)s"
+msgstr ""
+
+#: nova/exception.py:918
+#, python-format
+msgid "%(path)s is not on local storage: %(reason)s"
+msgstr ""
+
+#: nova/exception.py:922
+msgid "Migration error"
+msgstr "Geçiş hatası."
+
+#: nova/exception.py:926
+#, python-format
+msgid "Malformed message body: %(reason)s"
+msgstr "Hatalı biçimlendirilmiş mesaj gövdesi: %(reason)s"
+
+#: nova/exception.py:932
+#, python-format
+msgid "Could not find config at %(path)s"
+msgstr "%(path)s'deki yapılandırma bulunamadı"
+
+#: nova/exception.py:936
+#, python-format
+msgid "Could not load paste app '%(name)s' from %(path)s"
+msgstr ""
+
+#: nova/exception.py:940
+msgid "When resizing, instances must change flavor!"
+msgstr ""
+
+#: nova/exception.py:944
+msgid "Image is larger than instance type allows"
+msgstr "İmaj dosyası verilen örneğin müsade ettiği türden daha büyük"
+
+#: nova/exception.py:948
+msgid "Instance type's memory is too small for requested image."
+msgstr "Örnek türün belleği istenilen imaj dosyası için çok küçük."
+
+#: nova/exception.py:952
+msgid "Instance type's disk is too small for requested image."
+msgstr "Örnek türünün diski istenilen imaj dosyası için çok küçük."
+
+#: nova/exception.py:956
+#, python-format
+msgid "Insufficient free memory on compute node to start %(uuid)s."
+msgstr "%(uuid)s hesaplama düğümü başlatmada yetersiz boş hafıza."
+
+#: nova/exception.py:960
+msgid "Could not fetch bandwidth/cpu/disk metrics for this host."
+msgstr "Bu sunucu için bant genişliği/cpu/disk ölçümleri alınamadı."
+
+#: nova/exception.py:964
+#, python-format
+msgid "No valid host was found. %(reason)s"
+msgstr "Geçerli bir sunucu bulunamadı: %(reason)s"
+
+#: nova/exception.py:968
+#, python-format
+msgid "Host %(host)s is not up or doesn't exist."
+msgstr ""
+
+#: nova/exception.py:972
+msgid "Quota exceeded"
+msgstr "Kota aşıldı"
+
+#: nova/exception.py:979
+#, python-format
+msgid ""
+"Quota exceeded for %(overs)s: Requested %(req)s, but already used %(used)d "
+"of %(allowed)d %(resource)s"
+msgstr ""
+
+#: nova/exception.py:984
+msgid "Maximum volume size exceeded"
+msgstr ""
+
+#: nova/exception.py:988
+#, python-format
+msgid "Maximum number of volumes allowed (%(allowed)d) exceeded"
+msgstr ""
+
+#: nova/exception.py:992
+msgid "Maximum number of floating ips exceeded"
+msgstr ""
+
+#: nova/exception.py:996
+#, python-format
+msgid "Maximum number of metadata items exceeds %(allowed)d"
+msgstr ""
+
+#: nova/exception.py:1000
+msgid "Personality file limit exceeded"
+msgstr "Kişisel dosya limiti aşıldı"
+
+#: nova/exception.py:1004
+msgid "Personality file path too long"
+msgstr "Kişisel dosya yolu çok uzun"
+
+#: nova/exception.py:1008
+msgid "Personality file content too long"
+msgstr "Kişisel dosya içeriği çok uzun"
+
+#: nova/exception.py:1012
+msgid "Maximum number of key pairs exceeded"
+msgstr ""
+
+#: nova/exception.py:1016
+msgid "Maximum number of security groups or rules exceeded"
+msgstr ""
+
+#: nova/exception.py:1020
+#, python-format
+msgid ""
+"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: %(reason)s."
+msgstr ""
+
+#: nova/exception.py:1025
+#, python-format
+msgid "Aggregate %(aggregate_id)s could not be found."
+msgstr "%(aggregate_id)s kümesi bulunamadı."
+
+#: nova/exception.py:1029
+#, python-format
+msgid "Aggregate %(aggregate_name)s already exists."
+msgstr "%(aggregate_name)s kümesi zaten var."
+
+#: nova/exception.py:1033
+#, python-format
+msgid "Aggregate %(aggregate_id)s has no host %(host)s."
+msgstr "%(aggregate_id)s kümesi %(host)s sunucusuna sahip değil."
+
+#: nova/exception.py:1037
+#, python-format
+msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s."
+msgstr "%(aggregate_id)s kümesi %(metadata_key)s. anahtarı ile hiç metadata'sı yok."
+
+#: nova/exception.py:1042
+#, python-format
+msgid "Aggregate %(aggregate_id)s already has host %(host)s."
+msgstr "%(aggregate_id)s kümesi zaten%(host)s sunucusuna sahip."
+
+#: nova/exception.py:1046
+#, python-format
+msgid "Detected more than one volume with name %(vol_name)s"
+msgstr ""
+
+#: nova/exception.py:1050
+#, python-format
+msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s"
+msgstr ""
+
+#: nova/exception.py:1055
+#, python-format
+msgid ""
+"Bad or unexpected response from the storage volume backend API: %(data)s"
+msgstr ""
+
+#: nova/exception.py:1060
+msgid "Unknown NFS exception"
+msgstr ""
+
+#: nova/exception.py:1064
+msgid "No mounted NFS shares found"
+msgstr ""
+
+#: nova/exception.py:1068
+#, python-format
+msgid "There is no share which can host %(volume_size)sG"
+msgstr ""
+
+#: nova/exception.py:1072
+msgid "Unable to create instance type"
+msgstr "Örnek tür oluşturulamıyor."
+
+#: nova/exception.py:1076
+#, python-format
+msgid "Failed to set admin password on %(instance)s because %(reason)s"
+msgstr ""
+
+#: nova/exception.py:1082
+msgid "Bad response from SolidFire API"
+msgstr ""
+
+#: nova/exception.py:1086
+#, python-format
+msgid "Error in SolidFire API response: data=%(data)s"
+msgstr ""
+
+#: nova/exception.py:1090
+#, python-format
+msgid "Detected existing vlan with id %(vlan)d"
+msgstr ""
+
+#: nova/exception.py:1094
+#, python-format
+msgid "Instance %(instance_id)s could not be found."
+msgstr "%(instance_id)s örneği bulunamadı."
+
+#: nova/exception.py:1098
+#, python-format
+msgid "Marker %(marker)s could not be found."
+msgstr ""
+
+#: nova/exception.py:1102
+#, python-format
+msgid "Invalid id: %(val)s (expecting \"i-...\")."
+msgstr ""
+
+#: nova/exception.py:1106
+#, python-format
+msgid "Could not fetch image %(image_id)s"
+msgstr ""
+
+#: nova/exception.py:1110
+#, python-format
+msgid "Task %(task_name)s is already running on host %(host)s"
+msgstr ""
+
+#: nova/exception.py:1114
+#, python-format
+msgid "Task %(task_name)s is not running on host %(host)s"
+msgstr ""
+
+#: nova/exception.py:1118
+#, python-format
+msgid "Instance %(instance_uuid)s is locked"
+msgstr ""
+
+#: nova/exception.py:1122
+#, python-format
+msgid ""
+"Could not mount vfat config drive. %(operation)s failed. Error: %(error)s"
+msgstr ""
+
+#: nova/exception.py:1127
+#, python-format
+msgid "Unknown config drive format %(format)s. Select one of iso9660 or vfat."
+msgstr ""
+
+#: nova/exception.py:1132
+#, python-format
+msgid ""
+"User data too large. User data must be no larger than %(maxsize)s bytes once"
+" base64 encoded. Your data is %(length)d bytes"
+msgstr ""
+
+#: nova/exception.py:1138
+msgid "User data needs to be valid base 64."
+msgstr ""
+
+#: nova/exception.py:1142
+#, python-format
+msgid ""
+"unexpected task state: expecting %(expected)s but the actual state is "
+"%(actual)s"
+msgstr ""
+
+#: nova/exception.py:1147
+#, python-format
+msgid "The CA file for %(project)s could not be found"
+msgstr ""
+
+#: nova/exception.py:1151
+#, python-format
+msgid "The CRL file for %(project)s could not be found"
+msgstr ""
+
+#: nova/manager.py:166
+#, python-format
+msgid ""
+"Skipping %(full_task_name)s, %(ticks_to_skip)s ticks left until next run"
+msgstr ""
+
+#: nova/manager.py:172
+#, python-format
+msgid "Running periodic task %(full_task_name)s"
+msgstr "%(full_task_name)s periyodik görevi çalışıyor"
+
+#: nova/manager.py:182
+#, python-format
+msgid "Error during %(full_task_name)s: %(e)s"
+msgstr "%(full_task_name)s sırasında hata: %(e)s"
+
+#: nova/manager.py:231
+msgid "Notifying Schedulers of capabilities ..."
+msgstr "Yetenekler zamanlayıcısı bildiriliyor ..."
+
+#: nova/notifications.py:112 nova/notifications.py:152
+msgid "Failed to send state update notification"
+msgstr ""
+
+#: nova/policy.py:32
+msgid "JSON file representing policy"
+msgstr "JSON dosyası ilkeyi temsil eder"
+
+#: nova/policy.py:35
+msgid "Rule checked when requested rule is not found"
+msgstr "İstekte bulunan kural bulunamadığında kural kontrol edildi."
+
+#: nova/quota.py:697
+#, python-format
+msgid "Created reservations %(reservations)s"
+msgstr ""
+
+#: nova/quota.py:716
+#, python-format
+msgid "Failed to commit reservations %(reservations)s"
+msgstr ""
+
+#: nova/quota.py:734
+#, python-format
+msgid "Failed to roll back reservations %(reservations)s"
+msgstr ""
+
+#: nova/service.py:179
+msgid "Full set of FLAGS:"
+msgstr ""
+
+#: nova/service.py:186
+#, python-format
+msgid "%(flag)s : FLAG SET "
+msgstr "%(flag)s : FLAG SET"
+
+#: nova/service.py:196 nova/service.py:294
+#, python-format
+msgid "Caught %s, exiting"
+msgstr ""
+
+#: nova/service.py:240
+msgid "Parent process has died unexpectedly, exiting"
+msgstr ""
+
+#: nova/service.py:276
+msgid "Forking too fast, sleeping"
+msgstr ""
+
+#: nova/service.py:299
+msgid "Unhandled exception"
+msgstr ""
+
+#: nova/service.py:306
+#, python-format
+msgid "Started child %d"
+msgstr ""
+
+#: nova/service.py:316
+#, python-format
+msgid "Starting %d workers"
+msgstr ""
+
+#: nova/service.py:330
+#, python-format
+msgid "Child %(pid)d killed by signal %(sig)d"
+msgstr ""
+
+#: nova/service.py:333
+#, python-format
+msgid "Child %(pid)d exited with status %(code)d"
+msgstr ""
+
+#: nova/service.py:336
+#, python-format
+msgid "pid %d not in child list"
+msgstr ""
+
+#: nova/service.py:356
+#, python-format
+msgid "Caught %s, stopping children"
+msgstr ""
+
+#: nova/service.py:367
+#, python-format
+msgid "Waiting on %d children to exit"
+msgstr ""
+
+#: nova/service.py:396
+#, python-format
+msgid "Starting %(topic)s node (version %(vcs_string)s)"
+msgstr ""
+
+#: nova/service.py:413
+#, python-format
+msgid "Creating Consumer connection for Service %s"
+msgstr "%s servisi için tüketici bağlantısı oluşturuluyor."
+
+#: nova/service.py:503
+msgid "Service killed that has no database entry"
+msgstr "Servis, hiç veritabanı girdisi olmayanı sonlandırdı."
+
+#: nova/service.py:540
+msgid "The service database object disappeared, Recreating it."
+msgstr ""
+
+#: nova/service.py:555
+msgid "Recovered model server connection!"
+msgstr "Kurtarılmış model sunucu bağlantısı!"
+
+#: nova/service.py:561
+msgid "model server went away"
+msgstr "Model sunucusu gitti."
+
+#: nova/service.py:649
+msgid "serve() can only be called once"
+msgstr ""
+
+#: nova/utils.py:160
+#, python-format
+msgid "Got unknown keyword args to utils.execute: %r"
+msgstr "utils.execute için bilinmeyen anahtar kelime argümanları: %r"
+
+#: nova/utils.py:171
+#, python-format
+msgid "Running cmd (subprocess): %s"
+msgstr "Çalışan komut(alt süreç): %s"
+
+#: nova/utils.py:187 nova/utils.py:265 nova/virt/powervm/common.py:82
+#, python-format
+msgid "Result was %s"
+msgstr "Sonuç %s"
+
+#: nova/utils.py:200
+#, python-format
+msgid "%r failed. Retrying."
+msgstr "%r hatalı. Yeniden deneniyor."
+
+#: nova/utils.py:240
+#, python-format
+msgid "Running cmd (SSH): %s"
+msgstr "Çalışan komut(SSH): %s"
+
+#: nova/utils.py:242
+msgid "Environment not supported over SSH"
+msgstr "SSH üzerinde ortam desteklenmemektedir."
+
+#: nova/utils.py:246
+msgid "process_input not supported over SSH"
+msgstr "SSH üzerinde process_input desteklenmemektedir."
+
+#: nova/utils.py:281
+#, python-format
+msgid "debug in callback: %s"
+msgstr ""
+
+#: nova/utils.py:440
+#, python-format
+msgid "Link Local address is not found.:%s"
+msgstr "Yerel adres bağlantısı bulunamadı.:%s"
+
+#: nova/utils.py:443
+#, python-format
+msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s"
+msgstr "Couldn't get Link Local IP of %(interface)s :%(ex)s"
+
+#: nova/utils.py:478
+#, python-format
+msgid "Invalid backend: %s"
+msgstr "Geçersiz backend: %s"
+
+#: nova/utils.py:539
+msgid "in looping call"
+msgstr ""
+
+#: nova/utils.py:700
+#, python-format
+msgid "Expected object of type: %s"
+msgstr "Beklenen nesne türü: %s"
+
+#: nova/utils.py:729
+#, python-format
+msgid "Invalid server_string: %s"
+msgstr "Geçersiz server_string: %s"
+
+#: nova/utils.py:869
+#, python-format
+msgid "timefunc: '%(name)s' took %(total_time).2f secs"
+msgstr "Zaman Fonksiyonu: %(name)s %(total_time).2f saniye sürdü"
+
+#: nova/utils.py:947
+#, python-format
+msgid "Reloading cached file %s"
+msgstr ""
+
+#: nova/utils.py:1065 nova/virt/configdrive.py:151
+#, python-format
+msgid "Could not remove tmpdir: %s"
+msgstr ""
+
+#: nova/wsgi.py:85
+#, python-format
+msgid "%(name)s listening on %(host)s:%(port)s"
+msgstr ""
+
+#: nova/wsgi.py:109
+msgid "Stopping WSGI server."
+msgstr "WSGI sunucusu durduruluyor."
+
+#: nova/wsgi.py:127
+msgid "WSGI server has stopped."
+msgstr "WSGI sunucusu durduruldu."
+
+#: nova/wsgi.py:196
+msgid "You must implement __call__"
+msgstr "__call__ fonksiyonunu uygulamalısınız."
+
+#: nova/wsgi.py:382
+#, python-format
+msgid "Loading app %(name)s from %(path)s"
+msgstr ""
+
+#: nova/api/auth.py:108
+msgid "Invalid service catalog json."
+msgstr ""
+
+#: nova/api/auth.py:131
+msgid "Sourcing roles from deprecated X-Role HTTP header"
+msgstr ""
+
+#: nova/api/sizelimit.py:51
+msgid "Request is too large."
+msgstr ""
+
+#: nova/api/validator.py:138
+#, python-format
+msgid "%(key)s with value %(value)s failed validator %(name)s"
+msgstr ""
+
+#: nova/api/ec2/__init__.py:79
+#, python-format
+msgid "%(code)s: %(message)s"
+msgstr "%(code)s: %(message)s"
+
+#: nova/api/ec2/__init__.py:102
+#, python-format
+msgid "FaultWrapper: %s"
+msgstr "Hata yakalayıcı: %s"
+
+#: nova/api/ec2/__init__.py:177
+msgid "Too many failed authentications."
+msgstr "Çok sayıda kimlik doğrulama başarısız oldu."
+
+#: nova/api/ec2/__init__.py:187
+#, python-format
+msgid ""
+"Access key %(access_key)s has had %(failures)d failed authentications and "
+"will be locked out for %(lock_mins)d minutes."
+msgstr "%(access_key)s erişim anahtarı %(failures)d kez yanlış girildi ve %(lock_mins)d dakika boyunca kilitlenmiş olacak."
+
+#: nova/api/ec2/__init__.py:204
+msgid "Signature not provided"
+msgstr "İmza verilmedi."
+
+#: nova/api/ec2/__init__.py:208
+msgid "Access key not provided"
+msgstr "Erişim anahtarı verilmedi."
+
+#: nova/api/ec2/__init__.py:243 nova/api/ec2/__init__.py:258
+msgid "Failure communicating with keystone"
+msgstr ""
+
+#: nova/api/ec2/__init__.py:328
+#, python-format
+msgid "action: %s"
+msgstr "işlem: %s"
+
+#: nova/api/ec2/__init__.py:330
+#, python-format
+msgid "arg: %(key)s\t\tval: %(value)s"
+msgstr "arg: %(key)s»»val: %(value)s"
+
+#: nova/api/ec2/__init__.py:405
+#, python-format
+msgid ""
+"Unauthorized request for controller=%(controller)s and action=%(action)s"
+msgstr "%(controller)s kontrolcüsü ve %(action)s işlemi için izinsiz istek"
+
+#: nova/api/ec2/__init__.py:477
+#, python-format
+msgid "InstanceNotFound raised: %s"
+msgstr "ÖrnekBulunamadı hatası: %s"
+
+#: nova/api/ec2/__init__.py:483
+#, python-format
+msgid "VolumeNotFound raised: %s"
+msgstr "BölümBulunamadı hatası: %s"
+
+#: nova/api/ec2/__init__.py:489
+#, python-format
+msgid "SnapshotNotFound raised: %s"
+msgstr "SistemAnlıkGörüntüsüBulunamadı hatası: %s"
+
+#: nova/api/ec2/__init__.py:495
+#, python-format
+msgid "NotFound raised: %s"
+msgstr "Bulunamadı hatası: %s"
+
+#: nova/api/ec2/__init__.py:498
+#, python-format
+msgid "EC2APIError raised: %s"
+msgstr "EC2API hatası: %s"
+
+#: nova/api/ec2/__init__.py:506
+#, python-format
+msgid "KeyPairExists raised: %s"
+msgstr "AnahtarÇiftiZatenVar hatası: %s"
+
+#: nova/api/ec2/__init__.py:510
+#, python-format
+msgid "InvalidParameterValue raised: %s"
+msgstr "GeçersizParametreDeğeri hatası: %s"
+
+#: nova/api/ec2/__init__.py:514
+#, python-format
+msgid "InvalidPortRange raised: %s"
+msgstr "GeçersizPortGenişliği hatası: %s"
+
+#: nova/api/ec2/__init__.py:518
+#, python-format
+msgid "NotAuthorized raised: %s"
+msgstr "KimlikDoğrulama hatası: %s"
+
+#: nova/api/ec2/__init__.py:522
+#, python-format
+msgid "InvalidRequest raised: %s"
+msgstr "Geçersizİstek hatası: %s"
+
+#: nova/api/ec2/__init__.py:526
+#, python-format
+msgid "QuotaError raised: %s"
+msgstr ""
+
+#: nova/api/ec2/__init__.py:530
+#, python-format
+msgid "Invalid id: bogus (expecting \"i-...\"): %s"
+msgstr ""
+
+#: nova/api/ec2/__init__.py:539
+#, python-format
+msgid "Unexpected error raised: %s"
+msgstr "BeklenmeyenHata: %s"
+
+#: nova/api/ec2/__init__.py:540
+#, python-format
+msgid "Environment: %s"
+msgstr "Ortam: %s"
+
+#: nova/api/ec2/__init__.py:542 nova/api/metadata/handler.py:79
+msgid "An unknown error has occurred. Please try your request again."
+msgstr "Bilinmeyen bir hata oluştu. Lütfen tekrar deneyin."
+
+#: nova/api/ec2/apirequest.py:64
+#, python-format
+msgid ""
+"Unsupported API request: controller = %(controller)s, action = %(action)s"
+msgstr "Desteklenmeyen API isteği: kontrolcü = %(controller)s, işlem = %(action)s"
+
+#: nova/api/ec2/cloud.py:337
+#, python-format
+msgid "Create snapshot of volume %s"
+msgstr "Bölüm %s'in sistem anlık görüntüsünü oluştur"
+
+#: nova/api/ec2/cloud.py:363
+#, python-format
+msgid "Could not find key pair(s): %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:379
+#, python-format
+msgid "Create key pair %s"
+msgstr "%s anahtar çiftini oluştur"
+
+#: nova/api/ec2/cloud.py:386 nova/api/ec2/cloud.py:411
+#: nova/api/openstack/compute/contrib/keypairs.py:93
+msgid "Quota exceeded, too many key pairs."
+msgstr ""
+
+#: nova/api/ec2/cloud.py:392 nova/api/ec2/cloud.py:417
+#: nova/api/openstack/compute/contrib/keypairs.py:101
+#, python-format
+msgid "Key pair '%s' already exists."
+msgstr "'%s' anahtar çifti zaten var."
+
+#: nova/api/ec2/cloud.py:401
+#, python-format
+msgid "Import key %s"
+msgstr "%s anahtarını içeriye aktar"
+
+#: nova/api/ec2/cloud.py:424
+#, python-format
+msgid "Delete key pair %s"
+msgstr "%s anahtar çiftini sil"
+
+#: nova/api/ec2/cloud.py:558 nova/api/ec2/cloud.py:679
+msgid "Not enough parameters, need group_name or group_id"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:563
+#, python-format
+msgid "%s Not enough parameters to build a valid rule"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:601 nova/api/ec2/cloud.py:633
+msgid "No rule for the specified parameters."
+msgstr "Belirlenmiş parametreler için hiç kural yok"
+
+#: nova/api/ec2/cloud.py:624
+#, python-format
+msgid "%s - This rule already exists in group"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:690
+#, python-format
+msgid "Get console output for instance %s"
+msgstr "%s örneği için konsol çıktısını getir"
+
+#: nova/api/ec2/cloud.py:766
+#, python-format
+msgid "Create volume from snapshot %s"
+msgstr "%s sistem görüntüsünden birim oluştur"
+
+#: nova/api/ec2/cloud.py:770 nova/api/openstack/compute/contrib/volumes.py:250
+#: nova/api/openstack/volume/volumes.py:310
+#, python-format
+msgid "Create volume of %s GB"
+msgstr "%s GB'lık birim oluştur"
+
+#: nova/api/ec2/cloud.py:798
+msgid "Delete Failed"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:811
+#, python-format
+msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s"
+msgstr "%(device)s'daki %(instance_id)s örneğine %(volume_id)s birimini bağla"
+
+#: nova/api/ec2/cloud.py:819
+msgid "Attach Failed."
+msgstr ""
+
+#: nova/api/ec2/cloud.py:832 nova/api/openstack/compute/contrib/volumes.py:429
+#, python-format
+msgid "Detach volume %s"
+msgstr "Birimi ayır %s"
+
+#: nova/api/ec2/cloud.py:838
+msgid "Detach Volume Failed."
+msgstr ""
+
+#: nova/api/ec2/cloud.py:864 nova/api/ec2/cloud.py:921
+#: nova/api/ec2/cloud.py:1458 nova/api/ec2/cloud.py:1473
+#, python-format
+msgid "attribute not supported: %s"
+msgstr "Özellik desteklenmiyor: %s"
+
+#: nova/api/ec2/cloud.py:987
+#, python-format
+msgid "vol = %s\n"
+msgstr "birim = %s\\n"
+
+#: nova/api/ec2/cloud.py:1138
+msgid "Allocate address"
+msgstr "Adres tahsisi"
+
+#: nova/api/ec2/cloud.py:1142
+msgid "No more floating IPs available"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:1146
+#, python-format
+msgid "Release address %s"
+msgstr "%s adresini serbest bırak"
+
+#: nova/api/ec2/cloud.py:1151
+msgid "Unable to release IP Address."
+msgstr ""
+
+#: nova/api/ec2/cloud.py:1154
+#, python-format
+msgid "Associate address %(public_ip)s to instance %(instance_id)s"
+msgstr "%(instance_id)s örneğine %(public_ip)s adresini ilişkilendir"
+
+#: nova/api/ec2/cloud.py:1162
+msgid "Unable to associate IP Address, no fixed_ips."
+msgstr ""
+
+#: nova/api/ec2/cloud.py:1170
+#: nova/api/openstack/compute/contrib/floating_ips.py:257
+#, python-format
+msgid "multiple fixed_ips exist, using the first: %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:1179
+msgid "Floating ip is already associated."
+msgstr ""
+
+#: nova/api/ec2/cloud.py:1182
+msgid "l3driver call to add floating ip failed."
+msgstr ""
+
+#: nova/api/ec2/cloud.py:1185
+msgid "Error, unable to associate floating ip."
+msgstr ""
+
+#: nova/api/ec2/cloud.py:1193
+#, python-format
+msgid "Disassociate address %s"
+msgstr "Adresi kes %s"
+
+#: nova/api/ec2/cloud.py:1198
+msgid "Floating ip is not associated."
+msgstr ""
+
+#: nova/api/ec2/cloud.py:1201
+#: nova/api/openstack/compute/contrib/floating_ips.py:100
+msgid "Cannot disassociate auto assigned floating ip"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:1228
+msgid "Image must be available"
+msgstr "İmaj müsait olmak zorunda"
+
+#: nova/api/ec2/cloud.py:1260
+msgid "Going to start terminating instances"
+msgstr "Örnekleri sonlandırma işlemi başlatılıyor"
+
+#: nova/api/ec2/cloud.py:1270
+#, python-format
+msgid "Reboot instance %r"
+msgstr "%r örneğini tekrar yükle"
+
+#: nova/api/ec2/cloud.py:1279
+msgid "Going to stop instances"
+msgstr "Örnekler durdurulacak"
+
+#: nova/api/ec2/cloud.py:1288
+msgid "Going to start instances"
+msgstr "Örnekler başlatılacak"
+
+#: nova/api/ec2/cloud.py:1379
+#, python-format
+msgid "De-registering image %s"
+msgstr "%s imaj kaydı siliniyor"
+
+#: nova/api/ec2/cloud.py:1395
+msgid "imageLocation is required"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:1414
+#, python-format
+msgid "Registered image %(image_location)s with id %(image_id)s"
+msgstr "%(image_id)s id ile %(image_location)s imajı kaydedildi"
+
+#: nova/api/ec2/cloud.py:1476
+msgid "user or group not specified"
+msgstr "Kullanıcı veya grup belirlenmedi"
+
+#: nova/api/ec2/cloud.py:1478
+msgid "only group \"all\" is supported"
+msgstr "Sadece \"all\" grubu destekleniyor"
+
+#: nova/api/ec2/cloud.py:1480
+msgid "operation_type must be add or remove"
+msgstr "İşlem türü eklenmek veya kaldırılmak zorunda"
+
+#: nova/api/ec2/cloud.py:1482
+#, python-format
+msgid "Updating image %s publicity"
+msgstr "%s imaj tanıtımı güncelleniyor"
+
+#: nova/api/ec2/cloud.py:1495
+#, python-format
+msgid "Not allowed to modify attributes for image %s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:1524
+#, python-format
+msgid ""
+"Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not have a"
+" volume attached at root (%(root)s)"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:1554
+#, python-format
+msgid "Couldn't stop instance with in %d sec"
+msgstr "%d saniye içinde örnek durdurulamadı"
+
+#: nova/api/ec2/cloud.py:1572
+#, python-format
+msgid "image of %(instance)s at %(now)s"
+msgstr ""
+
+#: nova/api/ec2/cloud.py:1605
+msgid "Invalid CIDR"
+msgstr ""
+
+#: nova/api/metadata/handler.py:77 nova/api/metadata/handler.py:84
+#, python-format
+msgid "Failed to get metadata for ip: %s"
+msgstr "IP için metadata alma hata ile sonuçlandı: %s"
+
+#: nova/api/openstack/__init__.py:51
+#, python-format
+msgid "Caught error: %s"
+msgstr "Yakalama hatası: %s"
+
+#: nova/api/openstack/__init__.py:60 nova/api/openstack/wsgi.py:986
+#, python-format
+msgid "%(url)s returned with HTTP %(status)d"
+msgstr "%(url)s HTTP %(status)d ile geri döndü"
+
+#: nova/api/openstack/__init__.py:126
+msgid "Must specify an ExtensionManager class"
+msgstr "UzantıYöneticisi sınıfı belirlenmek zorunda"
+
+#: nova/api/openstack/__init__.py:137
+#, python-format
+msgid "Extended resource: %s"
+msgstr "Genişletilmiş kaynak: %s"
+
+#: nova/api/openstack/__init__.py:171
+#, python-format
+msgid ""
+"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such "
+"resource"
+msgstr "%(ext_name)s uzantısı: %(collection)s kaynağı genişletilemiyor: Böyle bir kaynak yok"
+
+#: nova/api/openstack/__init__.py:176
+#, python-format
+msgid "Extension %(ext_name)s extending resource: %(collection)s"
+msgstr "%(ext_name)s uzantısı kaynağı genişletiyor: %(collection)s"
+
+#: nova/api/openstack/common.py:99
+#, python-format
+msgid ""
+"status is UNKNOWN from vm_state=%(vm_state)s task_state=%(task_state)s. Bad "
+"upgrade or db corrupted?"
+msgstr ""
+
+#: nova/api/openstack/common.py:138 nova/api/openstack/common.py:172
+msgid "limit param must be an integer"
+msgstr "Sınır parametresi tam sayı olmak zorunda"
+
+#: nova/api/openstack/common.py:141 nova/api/openstack/common.py:176
+msgid "limit param must be positive"
+msgstr "Sınır parametresi pozitif olmak zorunda"
+
+#: nova/api/openstack/common.py:166
+msgid "offset param must be an integer"
+msgstr "Göreli konum parametresi tam sayı olmak zorunda"
+
+#: nova/api/openstack/common.py:180
+msgid "offset param must be positive"
+msgstr "Göreli konum parametresi pozitif olmak zorunda"
+
+#: nova/api/openstack/common.py:215 nova/api/openstack/compute/servers.py:536
+#, python-format
+msgid "marker [%s] not found"
+msgstr " [%s] göstergesi bulunamadı"
+
+#: nova/api/openstack/common.py:255
+#, python-format
+msgid "href %s does not contain version"
+msgstr "%s referansı versiyon içermiyor"
+
+#: nova/api/openstack/common.py:270
+msgid "Image metadata limit exceeded"
+msgstr "İmaj üstveri sınırı aşıldı"
+
+#: nova/api/openstack/common.py:278
+msgid "Image metadata key cannot be blank"
+msgstr ""
+
+#: nova/api/openstack/common.py:281
+msgid "Image metadata key too long"
+msgstr ""
+
+#: nova/api/openstack/common.py:284
+msgid "Invalid image metadata"
+msgstr ""
+
+#: nova/api/openstack/common.py:335
+#, python-format
+msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s"
+msgstr "Örnek %(attr)s %(state)s'de iken '%(action)s' yapılamaz"
+
+#: nova/api/openstack/common.py:338
+#, python-format
+msgid "Instance is in an invalid state for '%(action)s'"
+msgstr "Örnek '%(action)s' için geçersiz durumda"
+
+#: nova/api/openstack/common.py:418
+msgid "Rejecting snapshot request, snapshots currently disabled"
+msgstr "Sistem görüntüsü isteği reddedildi, sistem görüntüleri şu anda devre dışı."
+
+#: nova/api/openstack/common.py:420
+msgid "Instance snapshots are not permitted at this time."
+msgstr "Örnek sistem görüntüleri şu an onaylanmadı."
+
+#: nova/api/openstack/extensions.py:201
+#, python-format
+msgid "Loaded extension: %s"
+msgstr "Yüklenen bölüm: %s"
+
+#: nova/api/openstack/extensions.py:240
+#, python-format
+msgid "Ext name: %s"
+msgstr "Ext adı: %s"
+
+#: nova/api/openstack/extensions.py:241
+#, python-format
+msgid "Ext alias: %s"
+msgstr "Ext takma adı: %s"
+
+#: nova/api/openstack/extensions.py:242
+#, python-format
+msgid "Ext description: %s"
+msgstr "Ext açıklaması: %s"
+
+#: nova/api/openstack/extensions.py:244
+#, python-format
+msgid "Ext namespace: %s"
+msgstr "Ext isim uzayı: %s"
+
+#: nova/api/openstack/extensions.py:245
+#, python-format
+msgid "Ext updated: %s"
+msgstr "Ext güncellendi: %s"
+
+#: nova/api/openstack/extensions.py:247
+#, python-format
+msgid "Exception loading extension: %s"
+msgstr "İstisna yükleme uzantısı: %s"
+
+#: nova/api/openstack/extensions.py:261
+#, python-format
+msgid "Loading extension %s"
+msgstr "Genişletme yükleniyor: %s"
+
+#: nova/api/openstack/extensions.py:270
+#, python-format
+msgid "Calling extension factory %s"
+msgstr "Genişletme fabrikası çağırılıyor %s"
+
+#: nova/api/openstack/extensions.py:282
+#, python-format
+msgid "Failed to load extension %(ext_factory)s: %(exc)s"
+msgstr "%(ext_factory)s uzantısı yüklemede hata: %(exc)s"
+
+#: nova/api/openstack/extensions.py:363
+#, python-format
+msgid "Failed to load extension %(classpath)s: %(exc)s"
+msgstr "%(classpath)s uzantısı yüklemede hata: %(exc)s"
+
+#: nova/api/openstack/extensions.py:387
+#, python-format
+msgid "Failed to load extension %(ext_name)s: %(exc)s"
+msgstr "%(ext_name)s uzantısı yüklemede hata: %(exc)s"
+
+#: nova/api/openstack/wsgi.py:199 nova/api/openstack/wsgi.py:619
+msgid "cannot understand JSON"
+msgstr "JSON dosyası anlaşılamadı"
+
+#: nova/api/openstack/wsgi.py:223
+#: nova/api/openstack/compute/contrib/hosts.py:85
+msgid "cannot understand XML"
+msgstr "XML dosyası anlaşılamadı"
+
+#: nova/api/openstack/wsgi.py:624
+msgid "too many body keys"
+msgstr "Çok sayıda gövde anahtarları"
+
+#: nova/api/openstack/wsgi.py:667
+#, python-format
+msgid "Exception handling resource: %s"
+msgstr ""
+
+#: nova/api/openstack/wsgi.py:671
+#, python-format
+msgid "Fault thrown: %s"
+msgstr "Hata fırlatıldı: %s"
+
+#: nova/api/openstack/wsgi.py:674
+#, python-format
+msgid "HTTP exception thrown: %s"
+msgstr "HTTP istisnası fırlatıldı: %s"
+
+#: nova/api/openstack/wsgi.py:788
+msgid "Unrecognized Content-Type provided in request"
+msgstr "İstekte tanınamayan içerik türü sağlayıcısı"
+
+#: nova/api/openstack/wsgi.py:792
+msgid "No Content-Type provided in request"
+msgstr "İstekte içerik türü sağlayıcısı yok"
+
+#: nova/api/openstack/wsgi.py:796
+msgid "Empty body provided in request"
+msgstr "İstekteki sağlayıcı gövdesi boş"
+
+#: nova/api/openstack/wsgi.py:897
+msgid "Invalid XML in request body"
+msgstr ""
+
+#: nova/api/openstack/wsgi.py:916
+#, python-format
+msgid "There is no such action: %s"
+msgstr "Böyle bir işlem yok: %s"
+
+#: nova/api/openstack/wsgi.py:919 nova/api/openstack/wsgi.py:932
+#: nova/api/openstack/compute/server_metadata.py:58
+#: nova/api/openstack/compute/server_metadata.py:76
+#: nova/api/openstack/compute/server_metadata.py:101
+#: nova/api/openstack/compute/server_metadata.py:126
+#: nova/api/openstack/compute/contrib/admin_actions.py:215
+msgid "Malformed request body"
+msgstr "Kusurlu istek gövdesi"
+
+#: nova/api/openstack/wsgi.py:929
+msgid "Unsupported Content-Type"
+msgstr "Desteklenmeyen içerik türü"
+
+#: nova/api/openstack/wsgi.py:941
+msgid "Malformed request url"
+msgstr "Hatalı istek bağlantısı"
+
+#: nova/api/openstack/wsgi.py:989
+#, python-format
+msgid "%(url)s returned a fault: %(e)s"
+msgstr "%(url)s hata döndürdü: %(e)s"
+
+#: nova/api/openstack/xmlutil.py:265
+msgid "element is not a child"
+msgstr "eleman çocuk değil"
+
+#: nova/api/openstack/xmlutil.py:414
+msgid "root element selecting a list"
+msgstr "listeden kök elemanı seçiliyor"
+
+#: nova/api/openstack/xmlutil.py:739
+#, python-format
+msgid ""
+"Template tree mismatch; adding slave %(slavetag)s to master %(mastertag)s"
+msgstr "Şablon ağacı uyumsuz; %(mastertag)s ana kaydına %(slavetag)s bağımlı birimi ekleniyor"
+
+#: nova/api/openstack/xmlutil.py:858
+msgid "subclasses must implement construct()!"
+msgstr "alt sınıflar yapıcı() oluşturmak zorunda!"
+
+#: nova/api/openstack/compute/extensions.py:30
+#: nova/api/openstack/volume/extensions.py:29
+msgid "Initializing extension manager."
+msgstr "Genişletme yöneticisi başlatılıyor"
+
+#: nova/api/openstack/compute/flavors.py:111
+#, python-format
+msgid "Invalid is_public filter [%s]"
+msgstr ""
+
+#: nova/api/openstack/compute/flavors.py:130
+#, python-format
+msgid "Invalid minRam filter [%s]"
+msgstr ""
+
+#: nova/api/openstack/compute/flavors.py:137
+#, python-format
+msgid "Invalid minDisk filter [%s]"
+msgstr ""
+
+#: nova/api/openstack/compute/image_metadata.py:40
+#: nova/api/openstack/compute/images.py:146
+#: nova/api/openstack/compute/images.py:161
+msgid "Image not found."
+msgstr "İmaj bulunamadı"
+
+#: nova/api/openstack/compute/image_metadata.py:80
+msgid "Incorrect request body format"
+msgstr "Geçersiz gövde biçimi isteği."
+
+#: nova/api/openstack/compute/image_metadata.py:84
+#: nova/api/openstack/compute/server_metadata.py:80
+#: nova/api/openstack/compute/contrib/flavorextraspecs.py:79
+#: nova/api/openstack/compute/contrib/volumetypes.py:174
+#: nova/api/openstack/volume/contrib/types_extra_specs.py:101
+msgid "Request body and URI mismatch"
+msgstr "URI ve gövde isteği uyumsuz"
+
+#: nova/api/openstack/compute/image_metadata.py:87
+#: nova/api/openstack/compute/server_metadata.py:84
+#: nova/api/openstack/compute/contrib/flavorextraspecs.py:82
+#: nova/api/openstack/compute/contrib/volumetypes.py:177
+#: nova/api/openstack/volume/contrib/types_extra_specs.py:104
+msgid "Request body contains too many items"
+msgstr "İstek gövdesi çok sayıda öğe içeriyor"
+
+#: nova/api/openstack/compute/image_metadata.py:113
+msgid "Invalid metadata key"
+msgstr "Geçersiz özellik anahtarı"
+
+#: nova/api/openstack/compute/ips.py:74
+msgid "Instance does not exist"
+msgstr "Örnek mevcut değil"
+
+#: nova/api/openstack/compute/ips.py:97
+msgid "Instance is not a member of specified network"
+msgstr "Örnek belirlenmiş ağın bir üyesi değil"
+
+#: nova/api/openstack/compute/limits.py:145
+#, python-format
+msgid ""
+"Only %(value)s %(verb)s request(s) can be made to %(uri)s every "
+"%(unit_string)s."
+msgstr ""
+
+#: nova/api/openstack/compute/limits.py:271
+msgid "This request was rate-limited."
+msgstr "Bu istek sayı limitlidir."
+
+#: nova/api/openstack/compute/server_metadata.py:38
+#: nova/api/openstack/compute/server_metadata.py:122
+#: nova/api/openstack/compute/server_metadata.py:166
+msgid "Server does not exist"
+msgstr "Sunucu mevcut değil"
+
+#: nova/api/openstack/compute/server_metadata.py:148
+#: nova/api/openstack/compute/server_metadata.py:159
+msgid "Metadata item was not found"
+msgstr "İçerik özelliği bilgisi bulunamadı"
+
+#: nova/api/openstack/compute/servers.py:445
+#: nova/api/openstack/compute/servers.py:457
+#: nova/api/openstack/compute/servers.py:552
+#: nova/api/openstack/compute/servers.py:720
+#: nova/api/openstack/compute/servers.py:981
+#: nova/api/openstack/compute/servers.py:1084
+#: nova/api/openstack/compute/servers.py:1234
+msgid "Instance could not be found"
+msgstr "Örnek bulunamadı."
+
+#: nova/api/openstack/compute/servers.py:496
+msgid "Invalid changes-since value"
+msgstr "Geçersiz değişiklik-oluşturma değeri"
+
+#: nova/api/openstack/compute/servers.py:515
+msgid "Only administrators may list deleted instances"
+msgstr ""
+
+#: nova/api/openstack/compute/servers.py:559
+msgid "Server name is not a string or unicode"
+msgstr ""
+
+#: nova/api/openstack/compute/servers.py:563
+msgid "Server name is an empty string"
+msgstr ""
+
+#: nova/api/openstack/compute/servers.py:567
+msgid "Server name must be less than 256 characters."
+msgstr ""
+
+#: nova/api/openstack/compute/servers.py:584
+#, python-format
+msgid "Bad personality format: missing %s"
+msgstr "Yanlış kişisel biçim: %s bulunamadı"
+
+#: nova/api/openstack/compute/servers.py:587
+msgid "Bad personality format"
+msgstr "Yanlış kişisel biçim"
+
+#: nova/api/openstack/compute/servers.py:591
+#, python-format
+msgid "Personality content for %s cannot be decoded"
+msgstr "%s için kişisel içerik çözümlenemedi"
+
+#: nova/api/openstack/compute/servers.py:622
+msgid "Unknown argment : port"
+msgstr ""
+
+#: nova/api/openstack/compute/servers.py:625
+#, python-format
+msgid "Bad port format: port uuid is not in proper format (%s)"
+msgstr ""
+
+#: nova/api/openstack/compute/servers.py:635
+#, python-format
+msgid "Bad networks format: network uuid is not in proper format (%s)"
+msgstr "Yanlış ağ biçimi: ağ UUID'si uygun formatta değil(%s)"
+
+#: nova/api/openstack/compute/servers.py:645
+#, python-format
+msgid "Invalid fixed IP address (%s)"
+msgstr "Geçersiz değişken IP adresi (%s)"
+
+#: nova/api/openstack/compute/servers.py:658
+#, python-format
+msgid "Duplicate networks (%s) are not allowed"
+msgstr "Tekrarlanan ağlara (%s) izin verilmiyor"
+
+#: nova/api/openstack/compute/servers.py:664
+#, python-format
+msgid "Bad network format: missing %s"
+msgstr "Yanlış ağ biçimi: %s bulunamadı"
+
+#: nova/api/openstack/compute/servers.py:667
+msgid "Bad networks format"
+msgstr "Hatalı ağ biçimi"
+
+#: nova/api/openstack/compute/servers.py:693
+msgid "Userdata content cannot be decoded"
+msgstr "Kullanıcı verisi içeriği çözülemez"
+
+#: nova/api/openstack/compute/servers.py:700
+msgid "accessIPv4 is not proper IPv4 format"
+msgstr "IPv4 adresi uygun IPv4 formatında değil"
+
+#: nova/api/openstack/compute/servers.py:707
+msgid "accessIPv6 is not proper IPv6 format"
+msgstr ""
+
+#: nova/api/openstack/compute/servers.py:736
+msgid "Server name is not defined"
+msgstr "Sunucu adı tanımlanmadı"
+
+#: nova/api/openstack/compute/servers.py:785
+#: nova/api/openstack/compute/servers.py:891
+msgid "Invalid flavorRef provided."
+msgstr "Geçersiz örnek türü sağlayıcısı"
+
+#: nova/api/openstack/compute/servers.py:825
+msgid "min_count must be an integer value"
+msgstr ""
+
+#: nova/api/openstack/compute/servers.py:828
+msgid "min_count must be > 0"
+msgstr ""
+
+#: nova/api/openstack/compute/servers.py:833
+msgid "max_count must be an integer value"
+msgstr ""
+
+#: nova/api/openstack/compute/servers.py:836
+msgid "max_count must be > 0"
+msgstr ""
+
+#: nova/api/openstack/compute/servers.py:839
+msgid "min_count must be <= max_count"
+msgstr ""
+
+#: nova/api/openstack/compute/servers.py:888
+msgid "Can not find requested image"
+msgstr "İstenilen imaj dosyası bulunamadı"
+
+#: nova/api/openstack/compute/servers.py:894
+msgid "Invalid key_name provided."
+msgstr "Geçersiz anahtar adı verildi."
+
+#: nova/api/openstack/compute/servers.py:973
+msgid "HostId cannot be updated."
+msgstr ""
+
+#: nova/api/openstack/compute/servers.py:999
+#: nova/api/openstack/compute/servers.py:1019
+msgid "Instance has not been resized."
+msgstr "Örnek tekrar boyutlandırılacak şekilde ayarlanmadı."
+
+#: nova/api/openstack/compute/servers.py:1005
+#, python-format
+msgid "Error in confirm-resize %s"
+msgstr "Yeniden boyutlandırma onayında hata %s"
+
+#: nova/api/openstack/compute/servers.py:1025
+#, python-format
+msgid "Error in revert-resize %s"
+msgstr "Yeniden boyutlandırma dönüşünde hata %s"
+
+#: nova/api/openstack/compute/servers.py:1038
+msgid "Argument 'type' for reboot is not HARD or SOFT"
+msgstr "Önyükleme için argüman türü HARD veya SOFT değil"
+
+#: nova/api/openstack/compute/servers.py:1042
+msgid "Missing argument 'type' for reboot"
+msgstr "Önyükleme için tür argümanı eksik"
+
+#: nova/api/openstack/compute/servers.py:1055
+#, python-format
+msgid "Error in reboot %s"
+msgstr "Önyükleme sırasında hata %s"
+
+#: nova/api/openstack/compute/servers.py:1067
+msgid "Unable to locate requested flavor."
+msgstr "İstenilen örnek türü konumlandırılamıyor."
+
+#: nova/api/openstack/compute/servers.py:1070
+msgid "Resize requires a flavor change."
+msgstr ""
+
+#: nova/api/openstack/compute/servers.py:1094
+msgid "Missing imageRef attribute"
+msgstr "İmaj referans özelliği eksik"
+
+#: nova/api/openstack/compute/servers.py:1103
+#: nova/api/openstack/volume/volumes.py:263
+#: nova/api/openstack/volume/volumes.py:267
+msgid "Invalid imageRef provided."
+msgstr "Geçersiz imaj referansı verildi."
+
+#: nova/api/openstack/compute/servers.py:1112
+msgid "Missing flavorRef attribute"
+msgstr "Örnek türü referans özelliği eksik"
+
+#: nova/api/openstack/compute/servers.py:1125
+msgid "No adminPass was specified"
+msgstr "Belirlenmiş bir yönetici parolası yok"
+
+#: nova/api/openstack/compute/servers.py:1129
+#: nova/api/openstack/compute/servers.py:1331
+msgid "Invalid adminPass"
+msgstr "Geçersiz yönetici parolası"
+
+#: nova/api/openstack/compute/servers.py:1140
+msgid "Unable to parse metadata key/value pairs."
+msgstr "Çözümlenemeyen metadata anahtar/değer çifti."
+
+#: nova/api/openstack/compute/servers.py:1153
+msgid "Resize request has invalid 'flavorRef' attribute."
+msgstr "Yeniden boyutlandırma isteği geçersiz örnek türü referansı özelliğine sahip."
+
+#: nova/api/openstack/compute/servers.py:1156
+msgid "Resize requests require 'flavorRef' attribute."
+msgstr "Yeniden boyutlandırma isteği geçersiz örnek türü referansı özelliği gerektirir."
+
+#: nova/api/openstack/compute/servers.py:1174
+#: nova/api/openstack/compute/contrib/aggregates.py:142
+#: nova/api/openstack/compute/contrib/keypairs.py:78
+#: nova/api/openstack/compute/contrib/networks.py:75
+#: nova/api/openstack/volume/contrib/volume_actions.py:86
+msgid "Invalid request body"
+msgstr "Geçersiz istek gövdesi"
+
+#: nova/api/openstack/compute/servers.py:1179
+msgid "Could not parse imageRef from request."
+msgstr "İstekte bulunulan imaj referansı çözümlenemedi."
+
+#: nova/api/openstack/compute/servers.py:1241
+msgid "Cannot find image for rebuild"
+msgstr "Yeniden kurulum için imaj dosyası bulunamadı."
+
+#: nova/api/openstack/compute/servers.py:1274
+msgid "createImage entity requires name attribute"
+msgstr "createImage varlığının isim özelliğine ihtiyacı var"
+
+#: nova/api/openstack/compute/servers.py:1358
+#, python-format
+msgid "Removing options '%(unk_opt_str)s' from query"
+msgstr "Sorgudan '%(unk_opt_str)s' seçenekleri kaldırılıyor"
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:64
+#, python-format
+msgid "Compute.api::pause %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:81
+#, python-format
+msgid "Compute.api::unpause %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:98
+#, python-format
+msgid "compute.api::suspend %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:115
+#, python-format
+msgid "compute.api::resume %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:131
+#, python-format
+msgid "Error in migrate %s"
+msgstr "Göçte hata %s"
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:145
+#, python-format
+msgid "Compute.api::reset_network %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:158
+#: nova/api/openstack/compute/contrib/admin_actions.py:174
+#: nova/api/openstack/compute/contrib/admin_actions.py:190
+#: nova/api/openstack/compute/contrib/admin_actions.py:312
+#: nova/api/openstack/compute/contrib/multinic.py:41
+#: nova/api/openstack/compute/contrib/rescue.py:44
+msgid "Server not found"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:161
+#, python-format
+msgid "Compute.api::inject_network_info %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:177
+#, python-format
+msgid "Compute.api::lock %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:193
+#, python-format
+msgid "Compute.api::unlock %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:223
+#, python-format
+msgid "createBackup entity requires %s attribute"
+msgstr "yedekOluşturma birimi %s özelliğini gerektiriyor"
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:227
+msgid "Malformed createBackup entity"
+msgstr "Bozuk yedekOluşturma birimi"
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:233
+msgid "createBackup attribute 'rotation' must be an integer"
+msgstr "yedekOluşturma 'dönüşme' özelliği tamsayı olmak zorunda"
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:248
+#: nova/api/openstack/compute/contrib/console_output.py:47
+#: nova/api/openstack/compute/contrib/server_diagnostics.py:47
+#: nova/api/openstack/compute/contrib/server_start_stop.py:38
+msgid "Instance not found"
+msgstr "Örnek bulunamadı"
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:276
+msgid "host and block_migration must be specified."
+msgstr "Sunucu ve blok_göçü tanımlanmak zorunda."
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:284
+#, python-format
+msgid "Live migration of instance %(id)s to host %(host)s failed"
+msgstr "%(id)s'dan %(host)s sunucusuna örnek göçü hatalı"
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:302
+#, python-format
+msgid "Desired state must be specified. Valid states are: %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:315
+#, python-format
+msgid "Compute.api::resetState %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/aggregates.py:76
+#, python-format
+msgid ""
+"Cannot create aggregate with name %(name)s and availability zone "
+"%(avail_zone)s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/aggregates.py:88
+#, python-format
+msgid "Cannot show aggregate: %(id)s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/aggregates.py:114
+#, python-format
+msgid "Cannot update aggregate: %(id)s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/aggregates.py:126
+#, python-format
+msgid "Cannot delete aggregate: %(id)s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/aggregates.py:139
+#, python-format
+msgid "Aggregates does not have %s action"
+msgstr "Kümeler %s işlemine sahip değil"
+
+#: nova/api/openstack/compute/contrib/aggregates.py:152
+#: nova/api/openstack/compute/contrib/aggregates.py:157
+#, python-format
+msgid "Cannot add host %(host)s in aggregate %(id)s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/aggregates.py:170
+#: nova/api/openstack/compute/contrib/aggregates.py:174
+#, python-format
+msgid "Cannot remove host %(host)s in aggregate %(id)s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/aggregates.py:194
+#, python-format
+msgid "Cannot set metadata %(metadata)s in aggregate %(id)s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/certificates.py:76
+msgid "Only root certificate can be retrieved."
+msgstr "Sadece kök sertifikası alınabilir."
+
+#: nova/api/openstack/compute/contrib/cloudpipe.py:149
+msgid ""
+"Unable to claim IP for VPN instances, ensure it isn't running, and try again"
+" in a few minutes"
+msgstr "VPN örnekleri için IP talep edilemiyor, çalışmadığından emin olun ve birkaç dakika içerisinde tekrar deneyin"
+
+#: nova/api/openstack/compute/contrib/console_output.py:52
+msgid "os-getConsoleOutput malformed or missing from request body"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/console_output.py:59
+msgid "Length in request body must be an integer value"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/console_output.py:67
+msgid "Unable to get console"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/disk_config.py:43
+#, python-format
+msgid "%s must be either 'MANUAL' or 'AUTO'."
+msgstr "%s 'MANUAL' veya 'AUTO' olmak zorunda"
+
+#: nova/api/openstack/compute/contrib/flavor_access.py:80
+#: nova/api/openstack/compute/contrib/flavor_access.py:104
+msgid "Flavor not found."
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/flavor_access.py:109
+msgid "Access list not available for public flavors."
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/flavor_access.py:121
+msgid "No request body"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/flavorextraspecs.py:49
+#: nova/api/openstack/compute/contrib/volumetypes.py:147
+msgid "No Request Body"
+msgstr "İstek gövdesi yok"
+
+#: nova/api/openstack/compute/contrib/floating_ips.py:97
+msgid "Floating ip is not associated"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/floating_ips.py:151
+#: nova/api/openstack/compute/contrib/floating_ips.py:199
+#, python-format
+msgid "Floating ip not found for id %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/floating_ips.py:184
+#, python-format
+msgid "No more floating ips in pool %s."
+msgstr "%s havuzunda daha fazla değişken IP yok"
+
+#: nova/api/openstack/compute/contrib/floating_ips.py:186
+msgid "No more floating ips available."
+msgstr "Daha fazla değişken IP mevcut değil"
+
+#: nova/api/openstack/compute/contrib/floating_ips.py:234
+#: nova/api/openstack/compute/contrib/floating_ips.py:290
+#: nova/api/openstack/compute/contrib/security_groups.py:417
+msgid "Missing parameter dict"
+msgstr "Parametre dizini eksik"
+
+#: nova/api/openstack/compute/contrib/floating_ips.py:237
+#: nova/api/openstack/compute/contrib/floating_ips.py:293
+msgid "Address not specified"
+msgstr "Adres belirtilmedi"
+
+#: nova/api/openstack/compute/contrib/floating_ips.py:244
+msgid "No nw_info cache associated with instance"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/floating_ips.py:249
+msgid "No fixed ips associated to instance"
+msgstr "Örnekle değişken IP ilişkilendirilmedi"
+
+#: nova/api/openstack/compute/contrib/floating_ips.py:265
+msgid "floating ip is already associated"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/floating_ips.py:268
+msgid "l3driver call to add floating ip failed"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/floating_ips.py:272
+#: nova/api/openstack/compute/contrib/floating_ips.py:301
+msgid "floating ip not found"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/floating_ips.py:275
+msgid "Error. Unable to associate floating ip"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/hosts.py:119
+#, python-format
+msgid "Host '%s' could not be found."
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/hosts.py:148
+#, python-format
+msgid "Invalid status: '%s'"
+msgstr "Geçersiz durum: %s"
+
+#: nova/api/openstack/compute/contrib/hosts.py:152
+#, python-format
+msgid "Invalid mode: '%s'"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/hosts.py:156
+#, python-format
+msgid "Invalid update setting: '%s'"
+msgstr "Geçersiz güncelleme ayarı: '%s'"
+
+#: nova/api/openstack/compute/contrib/hosts.py:174
+#, python-format
+msgid "Putting host %(host)s in maintenance mode %(mode)s."
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/hosts.py:185
+#, python-format
+msgid "Setting host %(host)s to %(state)s."
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/hosts.py:232
+msgid "Describe-resource is admin only functionality"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/hosts.py:240
+msgid "Host not found"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/hypervisors.py:184
+#: nova/api/openstack/compute/contrib/hypervisors.py:195
+#, python-format
+msgid "Hypervisor with ID '%s' could not be found."
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/hypervisors.py:203
+msgid "Virt driver does not implement uptime function."
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/hypervisors.py:218
+#: nova/api/openstack/compute/contrib/hypervisors.py:232
+#, python-format
+msgid "No hypervisor matching '%s' could be found."
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/instance_usage_audit_log.py:54
+#, python-format
+msgid "Invalid timestamp for date %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/multinic.py:52
+msgid "Missing 'networkId' argument for addFixedIp"
+msgstr "addFixedlp için 'networkld' argümanı bulunamadı"
+
+#: nova/api/openstack/compute/contrib/multinic.py:68
+msgid "Missing 'address' argument for removeFixedIp"
+msgstr "removeFixedIp için 'address' argümanı bulunamadı"
+
+#: nova/api/openstack/compute/contrib/multinic.py:77
+#, python-format
+msgid "Unable to find address %r"
+msgstr "%r adresini bulmak olanaksız"
+
+#: nova/api/openstack/compute/contrib/networks.py:72
+#, python-format
+msgid "Network does not have %s action"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/networks.py:80
+#, python-format
+msgid "Disassociating network with id %s"
+msgstr "%s id'si ile ağ ayrılıyor"
+
+#: nova/api/openstack/compute/contrib/networks.py:84
+#: nova/api/openstack/compute/contrib/networks.py:101
+#: nova/api/openstack/compute/contrib/networks.py:111
+msgid "Network not found"
+msgstr "Ağ bulunamadı"
+
+#: nova/api/openstack/compute/contrib/networks.py:97
+#, python-format
+msgid "Showing network with id %s"
+msgstr "%s id'li ağ gösteriliyor"
+
+#: nova/api/openstack/compute/contrib/networks.py:107
+#, python-format
+msgid "Deleting network with id %s"
+msgstr "%s id'li ağ siliniyor"
+
+#: nova/api/openstack/compute/contrib/networks.py:122
+msgid "Missing network in body"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/networks.py:126
+msgid "Network label is required"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/networks.py:130
+msgid "Network cidr or cidr_v6 is required"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/networks.py:132
+#, python-format
+msgid "Creating network with label %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/networks.py:148
+#, python-format
+msgid "Associating network %(network)s with project %(project)s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/networks.py:156
+#, python-format
+msgid ""
+"Cannot associate network %(network)s with project %(project)s: %(message)s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/quotas.py:63
+msgid "Quota limit must be -1 or greater."
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/scheduler_hints.py:41
+msgid "Malformed scheduler_hints attribute"
+msgstr "Bozuk scheduler_hints özelliği"
+
+#: nova/api/openstack/compute/contrib/security_groups.py:224
+msgid "Security group id should be integer"
+msgstr "Güvenlik grup id'si integer olmak zorunda"
+
+#: nova/api/openstack/compute/contrib/security_groups.py:333
+msgid "Not enough parameters to build a valid rule."
+msgstr "Geçerli bir kuralı oluşturmak için yeterli parametre yok."
+
+#: nova/api/openstack/compute/contrib/security_groups.py:339
+#, python-format
+msgid "This rule already exists in group %s"
+msgstr "Bu kural zaten grupta var %s"
+
+#: nova/api/openstack/compute/contrib/security_groups.py:420
+msgid "Security group not specified"
+msgstr "Güvenlik grubu belirlenmedi"
+
+#: nova/api/openstack/compute/contrib/security_groups.py:424
+msgid "Security group name cannot be empty"
+msgstr "Güvenlik grup adı boş bırakılamaz"
+
+#: nova/api/openstack/compute/contrib/server_start_stop.py:46
+msgid "start instance"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/server_start_stop.py:55
+msgid "stop instance"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/volumes.py:76
+#: nova/api/openstack/volume/volumes.py:105
+#, python-format
+msgid "vol=%s"
+msgstr "birim=%s"
+
+#: nova/api/openstack/compute/contrib/volumes.py:188
+#: nova/api/openstack/volume/volumes.py:223
+#, python-format
+msgid "Delete volume with id: %s"
+msgstr "%s id'li birim siliniyor"
+
+#: nova/api/openstack/compute/contrib/volumes.py:359
+#: nova/api/openstack/compute/contrib/volumes.py:439
+#, python-format
+msgid "Instance %s is not attached."
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/volumes.py:390
+#, python-format
+msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s"
+msgstr "%(device)s'daki %(server_id)s örneğine %(volume_id)s birimi ekleniyor"
+
+#: nova/api/openstack/compute/contrib/volumes.py:553
+#: nova/api/openstack/volume/snapshots.py:112
+#, python-format
+msgid "Delete snapshot with id: %s"
+msgstr "%s id'li sistem görüntüsü siliniyor"
+
+#: nova/api/openstack/compute/contrib/volumes.py:596
+#: nova/api/openstack/volume/snapshots.py:161
+#, python-format
+msgid "Create snapshot from volume %s"
+msgstr "%s biriminden sistem görüntüsü oluşturuluyor"
+
+#: nova/api/openstack/compute/contrib/volumes.py:600
+#: nova/api/openstack/volume/snapshots.py:165
+#, python-format
+msgid "Invalid value '%s' for force. "
+msgstr ""
+
+#: nova/api/openstack/compute/views/servers.py:182
+msgid "Instance has had its instance_type removed from the DB"
+msgstr ""
+
+#: nova/api/openstack/volume/volumes.py:277
+msgid "Invalid request body. 'volume' not found"
+msgstr ""
+
+#: nova/api/openstack/volume/volumes.py:307
+msgid "Invalid request body. 'size' not found"
+msgstr ""
+
+#: nova/api/openstack/volume/volumes.py:317
+msgid "Snapshot and image cannot be specified together."
+msgstr ""
+
+#: nova/api/openstack/volume/volumes.py:361
+#, python-format
+msgid "Removing options '%(bad_options)s' from query"
+msgstr ""
+
+#: nova/api/openstack/volume/contrib/admin_actions.py:72
+#, python-format
+msgid "Updating status of %(resource)s '%(id)s' to '%(status)s'"
+msgstr ""
+
+#: nova/api/openstack/volume/contrib/volume_actions.py:90
+msgid "No image_name was specified in request."
+msgstr ""
+
+#: nova/cloudpipe/pipelib.py:44
+msgid "Instance type for vpn instances"
+msgstr ""
+
+#: nova/cloudpipe/pipelib.py:47
+msgid "Template for cloudpipe instance boot script"
+msgstr ""
+
+#: nova/cloudpipe/pipelib.py:50
+msgid "Network to push into openvpn config"
+msgstr ""
+
+#: nova/cloudpipe/pipelib.py:53
+msgid "Netmask to push into openvpn config"
+msgstr ""
+
+#: nova/cloudpipe/pipelib.py:106
+#, python-format
+msgid "Launching VPN for %s"
+msgstr ""
+
+#: nova/common/deprecated.py:53
+#, python-format
+msgid "Deprecated Config: %s"
+msgstr ""
+
+#: nova/common/sqlalchemyutils.py:68
+msgid "Id not in sort_keys; is sort_keys unique?"
+msgstr ""
+
+#: nova/common/sqlalchemyutils.py:116
+msgid "Unknown sort direction, must be 'desc' or 'asc'"
+msgstr ""
+
+#: nova/compute/api.py:220
+msgid "Cannot run any more instances of this type."
+msgstr ""
+
+#: nova/compute/api.py:227
+#, python-format
+msgid "Can only run %s more instances of this type."
+msgstr ""
+
+#: nova/compute/api.py:236
+#, python-format
+msgid ""
+"%(overs)s quota exceeded for %(pid)s, tried to run %(min_count)s instances. "
+"%(msg)s"
+msgstr ""
+
+#: nova/compute/api.py:256
+#, python-format
+msgid ""
+"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata "
+"properties"
+msgstr ""
+
+#: nova/compute/api.py:266
+msgid "Metadata property key blank"
+msgstr ""
+
+#: nova/compute/api.py:270
+msgid "Metadata property key greater than 255 characters"
+msgstr ""
+
+#: nova/compute/api.py:274
+msgid "Metadata property value greater than 255 characters"
+msgstr ""
+
+#: nova/compute/api.py:498
+#, python-format
+msgid "Going to run %s instances..."
+msgstr ""
+
+#: nova/compute/api.py:569
+#, python-format
+msgid "bdm %s"
+msgstr ""
+
+#: nova/compute/api.py:596
+#, python-format
+msgid "block_device_mapping %s"
+msgstr ""
+
+#: nova/compute/api.py:828
+msgid "Going to try to soft delete instance"
+msgstr ""
+
+#: nova/compute/api.py:845
+msgid "No host for instance, deleting immediately"
+msgstr ""
+
+#: nova/compute/api.py:929
+msgid "host for instance is down, deleting from database"
+msgstr ""
+
+#: nova/compute/api.py:950
+msgid "Going to try to terminate instance"
+msgstr ""
+
+#: nova/compute/api.py:990
+msgid "Going to try to stop instance"
+msgstr ""
+
+#: nova/compute/api.py:1004
+msgid "Going to try to start instance"
+msgstr ""
+
+#: nova/compute/api.py:1068 nova/volume/api.py:264
+#: nova/volume/volume_types.py:64
+#, python-format
+msgid "Searching by: %s"
+msgstr ""
+
+#: nova/compute/api.py:1203
+#, python-format
+msgid "Image type not recognized %s"
+msgstr ""
+
+#: nova/compute/api.py:1312
+#, python-format
+msgid "snapshot for %s"
+msgstr ""
+
+#: nova/compute/api.py:1634
+msgid "flavor_id is None. Assuming migration."
+msgstr ""
+
+#: nova/compute/api.py:1643
+#, python-format
+msgid ""
+"Old instance type %(current_instance_type_name)s, new instance type "
+"%(new_instance_type_name)s"
+msgstr "Eski örnek türü %(current_instance_type_name)s, yeni örnek türü %(new_instance_type_name)s"
+
+#: nova/compute/api.py:1685
+#, python-format
+msgid ""
+"%(overs)s quota exceeded for %(pid)s, tried to resize instance. %(msg)s"
+msgstr ""
+
+#: nova/compute/api.py:1857
+msgid "Locking"
+msgstr ""
+
+#: nova/compute/api.py:1865
+msgid "Unlocking"
+msgstr ""
+
+#: nova/compute/api.py:1933
+msgid "Volume must be attached in order to detach."
+msgstr ""
+
+#: nova/compute/api.py:2018
+#, python-format
+msgid "Going to try to live migrate instance to %s"
+msgstr ""
+
+#: nova/compute/api.py:2167
+msgid "Keypair name contains unsafe characters"
+msgstr ""
+
+#: nova/compute/api.py:2171
+msgid "Keypair name must be between 1 and 255 characters long"
+msgstr "Anahtar çifti adı 1-255 karakter arası uzunluğunda olmak zorunda"
+
+#: nova/compute/api.py:2272
+#, python-format
+msgid "Security group %s is not a string or unicode"
+msgstr "%s güvenlik grubu string veya unicode değil"
+
+#: nova/compute/api.py:2275
+#, python-format
+msgid "Security group %s cannot be empty."
+msgstr "%s güvenlik grubu boş bırakılamaz."
+
+#: nova/compute/api.py:2283
+#, python-format
+msgid ""
+"Value (%(value)s) for parameter Group%(property)s is invalid. Content "
+"limited to '%(allowed)'."
+msgstr ""
+
+#: nova/compute/api.py:2289
+#, python-format
+msgid "Security group %s should not be greater than 255 characters."
+msgstr "%s güvenlik grubu 255 karakterden fazla olmamalıdır."
+
+#: nova/compute/api.py:2309
+msgid "Quota exceeded, too many security groups."
+msgstr ""
+
+#: nova/compute/api.py:2312
+#, python-format
+msgid "Create Security Group %s"
+msgstr "%s güvenlik grubunu oluşturunuz"
+
+#: nova/compute/api.py:2319
+#, python-format
+msgid "Security group %s already exists"
+msgstr "%s güvenlik grubu zaten var"
+
+#: nova/compute/api.py:2384
+msgid "Security group is still in use"
+msgstr ""
+
+#: nova/compute/api.py:2392
+msgid "Failed to update usages deallocating security group"
+msgstr ""
+
+#: nova/compute/api.py:2395
+#, python-format
+msgid "Delete security group %s"
+msgstr "%s güvenlik grubunu siliniz"
+
+#: nova/compute/api.py:2652
+#, python-format
+msgid "Rule (%s) not found"
+msgstr "Kural (%s) bulunamadı"
+
+#: nova/compute/api.py:2661
+msgid "Quota exceeded, too many security group rules."
+msgstr ""
+
+#: nova/compute/api.py:2664
+#, python-format
+msgid "Authorize security group ingress %s"
+msgstr "Yetkilendirilmiş %s güvenlik grubu girişi"
+
+#: nova/compute/api.py:2675
+#, python-format
+msgid "Revoke security group ingress %s"
+msgstr "%s güvenlik grubu girişini iptal et"
+
+#: nova/compute/instance_types.py:63
+msgid "names can only contain [a-zA-Z0-9_.- ]"
+msgstr ""
+
+#: nova/compute/instance_types.py:72 nova/compute/instance_types.py:80
+msgid "create arguments must be positive integers"
+msgstr ""
+
+#: nova/compute/instance_types.py:94 nova/volume/volume_types.py:41
+#, python-format
+msgid "DB error: %s"
+msgstr "Veritabanı hatası: %s"
+
+#: nova/compute/instance_types.py:104
+#, python-format
+msgid "Instance type %s not found for deletion"
+msgstr "Silme işlemi için %s örnek türü bulunamadı"
+
+#: nova/compute/manager.py:167
+msgid "Possibly task preempted."
+msgstr ""
+
+#: nova/compute/manager.py:230
+msgid "Compute driver option required, but not specified"
+msgstr ""
+
+#: nova/compute/manager.py:233
+#, python-format
+msgid "Loading compute driver '%s'"
+msgstr ""
+
+#: nova/compute/manager.py:239
+#, python-format
+msgid "Unable to load the virtualization driver: %s"
+msgstr "Sanallaştırma sürücüsü yükleme başarısız: %s"
+
+#: nova/compute/manager.py:274
+msgid ""
+"Instance has been destroyed from under us while trying to set it to ERROR"
+msgstr ""
+
+#: nova/compute/manager.py:302
+#, python-format
+msgid "Current state is %(drv_state)s, state in DB is %(db_state)s."
+msgstr "Mevcut durum %(drv_state)s, veritabanındaki durum %(db_state)s."
+
+#: nova/compute/manager.py:316
+msgid "Rebooting instance after nova-compute restart."
+msgstr ""
+
+#: nova/compute/manager.py:330
+msgid "Hypervisor driver does not support resume guests"
+msgstr ""
+
+#: nova/compute/manager.py:340
+msgid "Hypervisor driver does not support firewall rules"
+msgstr "Misafir sistem sürücüsü güvenlik duvarı kurallarını desteklemiyor"
+
+#: nova/compute/manager.py:352
+msgid "Checking state"
+msgstr "Durum kontrol ediliyor"
+
+#: nova/compute/manager.py:425
+#, python-format
+msgid "Setting up bdm %s"
+msgstr "%s blok aygıt haritalandırması ayarlanıyor"
+
+#: nova/compute/manager.py:518
+msgid "Failed to dealloc network for deleted instance"
+msgstr ""
+
+#: nova/compute/manager.py:550
+#, python-format
+msgid "Build error: %s"
+msgstr ""
+
+#: nova/compute/manager.py:567
+msgid "Error trying to reschedule"
+msgstr ""
+
+#: nova/compute/manager.py:584
+msgid "Retry info not present, will not reschedule"
+msgstr ""
+
+#: nova/compute/manager.py:589
+msgid "No request spec, will not reschedule"
+msgstr ""
+
+#: nova/compute/manager.py:595
+#, python-format
+msgid "Re-scheduling instance: attempt %d"
+msgstr ""
+
+#: nova/compute/manager.py:620
+msgid "Instance build timed out. Set to error state."
+msgstr ""
+
+#: nova/compute/manager.py:651
+msgid "Instance has already been created"
+msgstr ""
+
+#: nova/compute/manager.py:694
+#, python-format
+msgid ""
+"image_id=%(image_id)s, image_size_bytes=%(size_bytes)d, "
+"allowed_size_bytes=%(allowed_size_bytes)d"
+msgstr "İmaj id=%(image_id)s, İmaj boyutu=%(size_bytes)d, İzin verilen boyut=%(allowed_size_bytes)d"
+
+#: nova/compute/manager.py:700
+#, python-format
+msgid ""
+"Image '%(image_id)s' size %(size_bytes)d exceeded instance_type allowed size"
+" %(allowed_size_bytes)d"
+msgstr "İmaj '%(image_id)s' boyut%(size_bytes)d izin verilen örnek %(allowed_size_bytes)d"
+
+#: nova/compute/manager.py:710
+msgid "Starting instance..."
+msgstr "Örnek başlatılıyor.."
+
+#: nova/compute/manager.py:731
+msgid "Instance failed network setup"
+msgstr "Ağ kurulumundaki örnek başarısız"
+
+#: nova/compute/manager.py:735
+#, python-format
+msgid "Instance network_info: |%s|"
+msgstr "Örnek ağ_bilgisi: |%s|"
+
+#: nova/compute/manager.py:748
+msgid "Instance failed block device setup"
+msgstr "Örnek blok aygıtı kurulumu hatalı"
+
+#: nova/compute/manager.py:766
+msgid "Instance failed to spawn"
+msgstr "Örnek oluşturmada hata"
+
+#: nova/compute/manager.py:790
+msgid "Deallocating network for instance"
+msgstr "Örnek için ağ serbest bırakılıyor"
+
+#: nova/compute/manager.py:862
+#, python-format
+msgid "%(action_str)s instance"
+msgstr "%(action_str)s örneği"
+
+#: nova/compute/manager.py:893
+#, python-format
+msgid "Ignoring DiskNotFound: %s"
+msgstr "DiskBulunamadı yok sayılıyor: %s"
+
+#: nova/compute/manager.py:896
+#, python-format
+msgid "Ignoring VolumeNotFound: %s"
+msgstr ""
+
+#: nova/compute/manager.py:903
+#, python-format
+msgid "terminating bdm %s"
+msgstr "Blok cihazı haritalandırması kapatılıyor %s"
+
+#: nova/compute/manager.py:927
+#, python-format
+msgid "Ignoring volume cleanup failure due to %s"
+msgstr ""
+
+#: nova/compute/manager.py:967 nova/compute/manager.py:1808
+#: nova/compute/manager.py:2993
+#, python-format
+msgid "%s. Setting instance vm_state to ERROR"
+msgstr ""
+
+#: nova/compute/manager.py:1049
+msgid "Rebuilding instance"
+msgstr ""
+
+#: nova/compute/manager.py:1128
+msgid "Rebooting instance"
+msgstr ""
+
+#: nova/compute/manager.py:1152
+#, python-format
+msgid ""
+"trying to reboot a non-running instance: (state: %(state)s expected: "
+"%(running)s)"
+msgstr ""
+
+#: nova/compute/manager.py:1161
+#, python-format
+msgid "Cannot reboot instance: %(exc)s"
+msgstr ""
+
+#: nova/compute/manager.py:1198
+msgid "instance snapshotting"
+msgstr ""
+
+#: nova/compute/manager.py:1204
+#, python-format
+msgid ""
+"trying to snapshot a non-running instance: (state: %(state)s expected: "
+"%(running)s)"
+msgstr ""
+
+#: nova/compute/manager.py:1274
+#, python-format
+msgid "Found %(num_images)d images (rotation: %(rotation)d)"
+msgstr "%(num_images)d tane imaj bulundu(dönüş: %(rotation)d)"
+
+#: nova/compute/manager.py:1280
+#, python-format
+msgid "Rotating out %d backups"
+msgstr ""
+
+#: nova/compute/manager.py:1285
+#, python-format
+msgid "Deleting image %s"
+msgstr "İmaj siliniyor %s"
+
+#: nova/compute/manager.py:1316
+#, python-format
+msgid "Failed to set admin password. Instance %s is not running"
+msgstr "Yönetici parolası oluşturmada hata. %s örneği çalışmıyor."
+
+#: nova/compute/manager.py:1323
+msgid "Root password set"
+msgstr ""
+
+#: nova/compute/manager.py:1333
+msgid "set_admin_password is not implemented by this driver."
+msgstr ""
+
+#: nova/compute/manager.py:1349
+#, python-format
+msgid "set_admin_password failed: %s"
+msgstr ""
+
+#: nova/compute/manager.py:1357
+msgid "error setting admin password"
+msgstr ""
+
+#: nova/compute/manager.py:1372
+#, python-format
+msgid ""
+"trying to inject a file into a non-running (state: %(current_power_state)s "
+"expected: %(expected_state)s)"
+msgstr ""
+
+#: nova/compute/manager.py:1376
+#, python-format
+msgid "injecting file to %(path)s"
+msgstr ""
+
+#: nova/compute/manager.py:1389
+msgid "Rescuing"
+msgstr ""
+
+#: nova/compute/manager.py:1416
+msgid "Unrescuing"
+msgstr ""
+
+#: nova/compute/manager.py:1437
+#, python-format
+msgid "Changing instance metadata according to %(diff)r"
+msgstr ""
+
+#: nova/compute/manager.py:1619
+msgid "destination same as source!"
+msgstr "dedef kaynak ile aynı!"
+
+#: nova/compute/manager.py:1637
+msgid "Migrating"
+msgstr ""
+
+#: nova/compute/manager.py:1805
+#, python-format
+msgid "Failed to rollback quota for failed finish_resize: %(qr_error)s"
+msgstr ""
+
+#: nova/compute/manager.py:1861
+msgid "Pausing"
+msgstr ""
+
+#: nova/compute/manager.py:1878
+msgid "Unpausing"
+msgstr ""
+
+#: nova/compute/manager.py:1916
+msgid "Retrieving diagnostics"
+msgstr ""
+
+#: nova/compute/manager.py:1946
+msgid "Resuming"
+msgstr ""
+
+#: nova/compute/manager.py:1962
+msgid "Reset network"
+msgstr ""
+
+#: nova/compute/manager.py:1967
+msgid "Inject network info"
+msgstr ""
+
+#: nova/compute/manager.py:1970
+#, python-format
+msgid "network_info to inject: |%s|"
+msgstr ""
+
+#: nova/compute/manager.py:1987
+msgid "Get console output"
+msgstr ""
+
+#: nova/compute/manager.py:2012
+msgid "Getting vnc console"
+msgstr ""
+
+#: nova/compute/manager.py:2040
+#, python-format
+msgid "Booting with volume %(volume_id)s at %(mountpoint)s"
+msgstr "%(mountpoint)s'de %(volume_id)s bölümü ön yükleniyor"
+
+#: nova/compute/manager.py:2083
+#, python-format
+msgid "Attaching volume %(volume_id)s to %(mountpoint)s"
+msgstr "%(mountpoint)s'e %(volume_id)s bölümü bağlanıyor"
+
+#: nova/compute/manager.py:2092
+#, python-format
+msgid ""
+"Failed to connect to volume %(volume_id)s while attaching at %(mountpoint)s"
+msgstr ""
+
+#: nova/compute/manager.py:2107
+#, python-format
+msgid "Failed to attach volume %(volume_id)s at %(mountpoint)s"
+msgstr ""
+
+#: nova/compute/manager.py:2136
+#, python-format
+msgid "Detach volume %(volume_id)s from mountpoint %(mp)s"
+msgstr "%(mp)s bağlama noktasındaki %(volume_id)s bölümü ayrılıyor"
+
+#: nova/compute/manager.py:2140
+msgid "Detaching volume from unknown instance"
+msgstr ""
+
+#: nova/compute/manager.py:2153
+#, python-format
+msgid "Faild to detach volume %(volume_id)s from %(mp)s"
+msgstr ""
+
+#: nova/compute/manager.py:2246
+msgid "Instance has no volume."
+msgstr "Örneğin hiç bölümü yok."
+
+#: nova/compute/manager.py:2306
+#, python-format
+msgid "Pre live migration failed at %(dest)s"
+msgstr "Güncel göç işlemi %(dest)s'da bir hata ile karşılaştı"
+
+#: nova/compute/manager.py:2332
+msgid "_post_live_migration() is started.."
+msgstr ""
+
+#: nova/compute/manager.py:2365
+msgid "No floating_ip found"
+msgstr ""
+
+#: nova/compute/manager.py:2373
+msgid "No floating_ip found."
+msgstr ""
+
+#: nova/compute/manager.py:2375
+#, python-format
+msgid ""
+"Live migration: Unexpected error: cannot inherit floating ip.\n"
+"%(e)s"
+msgstr ""
+
+#: nova/compute/manager.py:2401
+#, python-format
+msgid "Migrating instance to %(dest)s finished successfully."
+msgstr "%(dest)s'a örnek göçü işlemi başarıyla tamamlandı."
+
+#: nova/compute/manager.py:2403
+msgid ""
+"You may see the error \"libvirt: QEMU error: Domain not found: no domain "
+"with matching name.\" This error can be safely ignored."
+msgstr ""
+
+#: nova/compute/manager.py:2417
+msgid "Post operation of migration started"
+msgstr ""
+
+#: nova/compute/manager.py:2548
+msgid "Updated the info_cache for instance"
+msgstr ""
+
+#: nova/compute/manager.py:2574
+#, python-format
+msgid ""
+"Found %(migration_count)d unconfirmed migrations older than "
+"%(confirm_window)d seconds"
+msgstr ""
+
+#: nova/compute/manager.py:2579
+#, python-format
+msgid "Setting migration %(migration_id)s to error: %(reason)s"
+msgstr ""
+
+#: nova/compute/manager.py:2588
+#, python-format
+msgid ""
+"Automatically confirming migration %(migration_id)s for instance "
+"%(instance_uuid)s"
+msgstr ""
+
+#: nova/compute/manager.py:2595
+#, python-format
+msgid "Instance %(instance_uuid)s not found"
+msgstr ""
+
+#: nova/compute/manager.py:2599
+msgid "In ERROR state"
+msgstr ""
+
+#: nova/compute/manager.py:2606
+#, python-format
+msgid "In states %(vm_state)s/%(task_state)s, notRESIZED/None"
+msgstr ""
+
+#: nova/compute/manager.py:2614
+#, python-format
+msgid "Error auto-confirming resize: %(e)s. Will retry later."
+msgstr ""
+
+#: nova/compute/manager.py:2631
+#, python-format
+msgid ""
+"Running instance usage audit for host %(host)s from %(begin_time)s to "
+"%(end_time)s. %(number_instances)s instances."
+msgstr ""
+
+#: nova/compute/manager.py:2649
+#, python-format
+msgid "Failed to generate usage audit for instance on host %s"
+msgstr ""
+
+#: nova/compute/manager.py:2672
+msgid "Updating bandwidth usage cache"
+msgstr "Bant genişliği kullanım önbelleği güncelleniyor"
+
+#: nova/compute/manager.py:2737
+msgid "Updating host status"
+msgstr "Sunucu durumu güncelleniyor"
+
+#: nova/compute/manager.py:2763
+#, python-format
+msgid ""
+"Found %(num_db_instances)s in the database and %(num_vm_instances)s on the "
+"hypervisor."
+msgstr "Veritabanında %(num_db_instances)s ve misafir sistemde %(num_vm_instances)s bulundu"
+
+#: nova/compute/manager.py:2769 nova/compute/manager.py:2807
+msgid "During sync_power_state the instance has a pending task. Skip."
+msgstr ""
+
+#: nova/compute/manager.py:2794
+#, python-format
+msgid ""
+"During the sync_power process the instance has moved from host %(src)s to "
+"host %(dst)s"
+msgstr ""
+
+#: nova/compute/manager.py:2831
+msgid "Instance shutdown by itself. Calling the stop API."
+msgstr ""
+
+#: nova/compute/manager.py:2843 nova/compute/manager.py:2854
+#: nova/compute/manager.py:2868
+msgid "error during stop() in sync_power_state."
+msgstr ""
+
+#: nova/compute/manager.py:2848
+msgid "Instance is paused or suspended unexpectedly. Calling the stop API."
+msgstr ""
+
+#: nova/compute/manager.py:2861
+msgid "Instance is not stopped. Calling the stop API."
+msgstr ""
+
+#: nova/compute/manager.py:2877
+msgid "Instance is not (soft-)deleted."
+msgstr ""
+
+#: nova/compute/manager.py:2885
+msgid "FLAGS.reclaim_instance_interval <= 0, skipping..."
+msgstr ""
+
+#: nova/compute/manager.py:2898
+msgid "Reclaiming deleted instance"
+msgstr "Silinen örnek kurtarılıyor"
+
+#: nova/compute/manager.py:2947
+#, python-format
+msgid ""
+"Detected instance with name label '%(name)s' which is marked as DELETED but "
+"still present on host."
+msgstr ""
+
+#: nova/compute/manager.py:2954
+#, python-format
+msgid ""
+"Destroying instance with name label '%(name)s' which is marked as DELETED "
+"but still present on host."
+msgstr ""
+
+#: nova/compute/manager.py:2961
+#, python-format
+msgid ""
+"Unrecognized value '%(action)s' for FLAGS.running_deleted_instance_action"
+msgstr ""
+
+#: nova/compute/resource_tracker.py:151
+msgid ""
+"Host field should be not be set on the instance until resources have been "
+"claimed."
+msgstr ""
+
+#: nova/compute/resource_tracker.py:168
+#, python-format
+msgid ""
+"Attempting claim: memory %(memory_mb)d MB, disk %(disk_gb)d GB, VCPUs "
+"%(vcpus)d"
+msgstr ""
+
+#: nova/compute/resource_tracker.py:210
+#, python-format
+msgid ""
+"Total memory: %(total_mem)d MB, used: %(used_mem)d MB, free: %(free_mem)d MB"
+msgstr ""
+
+#: nova/compute/resource_tracker.py:219
+msgid "Memory limit not specified, defaulting to unlimited"
+msgstr ""
+
+#: nova/compute/resource_tracker.py:225
+#, python-format
+msgid "Memory limit: %(memory_mb_limit)d MB, free: %(free_ram_mb)d MB"
+msgstr ""
+
+#: nova/compute/resource_tracker.py:232
+#, python-format
+msgid ""
+"Unable to claim resources. Free memory %(free_ram_mb)d MB < requested "
+"memory %(memory_mb)d MB"
+msgstr ""
+
+#: nova/compute/resource_tracker.py:241
+#, python-format
+msgid ""
+"Total disk: %(total_disk)d GB, used: %(used_disk)d GB, free: %(free_disk)d "
+"GB"
+msgstr ""
+
+#: nova/compute/resource_tracker.py:250
+msgid "Disk limit not specified, defaulting to unlimited"
+msgstr ""
+
+#: nova/compute/resource_tracker.py:256
+#, python-format
+msgid "Disk limit: %(disk_gb_limit)d GB, free: %(free_disk_gb)d GB"
+msgstr ""
+
+#: nova/compute/resource_tracker.py:262
+#, python-format
+msgid ""
+"Unable to claim resources. Free disk %(free_disk_gb)d GB < requested disk "
+"%(disk_gb)d GB"
+msgstr ""
+
+#: nova/compute/resource_tracker.py:273
+#, python-format
+msgid "Total VCPUs: %(total_vcpus)d, used: %(used_vcpus)d"
+msgstr ""
+
+#: nova/compute/resource_tracker.py:280
+msgid "VCPU limit not specified, defaulting to unlimited"
+msgstr ""
+
+#: nova/compute/resource_tracker.py:284
+#, python-format
+msgid "CPU limit: %(vcpu_limit)d"
+msgstr ""
+
+#: nova/compute/resource_tracker.py:291
+#, python-format
+msgid ""
+"Unable to claim resources. Free CPU %(free_vcpus)d < requested CPU "
+"%(vcpus)d"
+msgstr ""
+
+#: nova/compute/resource_tracker.py:310
+#, python-format
+msgid "Finishing claim: %s"
+msgstr ""
+
+#: nova/compute/resource_tracker.py:325
+#, python-format
+msgid "Aborting claim: %s"
+msgstr ""
+
+#: nova/compute/resource_tracker.py:363
+msgid ""
+"Virt driver does not support 'get_available_resource' Compute tracking is "
+"disabled."
+msgstr ""
+
+#: nova/compute/resource_tracker.py:401
+#, python-format
+msgid "Compute_service record created for %s "
+msgstr ""
+
+#: nova/compute/resource_tracker.py:406
+#, python-format
+msgid "Compute_service record updated for %s "
+msgstr ""
+
+#: nova/compute/resource_tracker.py:425
+#, python-format
+msgid "No service record for host %s"
+msgstr ""
+
+#: nova/compute/resource_tracker.py:435
+#, python-format
+msgid "Hypervisor: free ram (MB): %s"
+msgstr ""
+
+#: nova/compute/resource_tracker.py:436
+#, python-format
+msgid "Hypervisor: free disk (GB): %s"
+msgstr ""
+
+#: nova/compute/resource_tracker.py:441
+#, python-format
+msgid "Hypervisor: free VCPUs: %s"
+msgstr ""
+
+#: nova/compute/resource_tracker.py:443
+msgid "Hypervisor: VCPU information unavailable"
+msgstr ""
+
+#: nova/compute/resource_tracker.py:450
+#, python-format
+msgid "Free ram (MB): %s"
+msgstr ""
+
+#: nova/compute/resource_tracker.py:451
+#, python-format
+msgid "Free disk (GB): %s"
+msgstr ""
+
+#: nova/compute/resource_tracker.py:456
+#, python-format
+msgid "Free VCPUS: %s"
+msgstr ""
+
+#: nova/compute/resource_tracker.py:458
+msgid "Free VCPU information unavailable"
+msgstr ""
+
+#: nova/compute/resource_tracker.py:533
+#, python-format
+msgid "Missing keys: %s"
+msgstr ""
+
+#: nova/compute/rpcapi.py:44
+msgid "No compute host specified"
+msgstr ""
+
+#: nova/compute/rpcapi.py:47
+#, python-format
+msgid "Unable to find host for Instance %s"
+msgstr "%s örneği için sunucu bulma başarısız"
+
+#: nova/compute/utils.py:87
+#, python-format
+msgid "Using %(prefix)s instead of %(req_prefix)s"
+msgstr ""
+
+#: nova/console/manager.py:81 nova/console/vmrc_manager.py:71
+msgid "Adding console"
+msgstr "Konsol ekleniyor"
+
+#: nova/console/manager.py:102 nova/console/vmrc_manager.py:123
+#, python-format
+msgid "Tried to remove non-existent console %(console_id)s."
+msgstr "Var olmayan %(console_id)s konsolu kaldırılmaya çalışıldı."
+
+#: nova/console/vmrc_manager.py:126
+#, python-format
+msgid "Removing console %(console_id)s."
+msgstr "%(console_id)s konsolu kaldırılıyor."
+
+#: nova/console/xvp.py:98
+msgid "Rebuilding xvp conf"
+msgstr "XVP ayarı yeniden oluşturuluyor"
+
+#: nova/console/xvp.py:116
+#, python-format
+msgid "Re-wrote %s"
+msgstr "%s yeniden yazıldı"
+
+#: nova/console/xvp.py:121
+msgid "Stopping xvp"
+msgstr "XVP durduruluyor"
+
+#: nova/console/xvp.py:134
+msgid "Starting xvp"
+msgstr "XVP başlatılıyor"
+
+#: nova/console/xvp.py:141
+#, python-format
+msgid "Error starting xvp: %s"
+msgstr "XVP başlatılırken hata oluştu: %s"
+
+#: nova/console/xvp.py:144
+msgid "Restarting xvp"
+msgstr "XVP tekar başlatılıyor"
+
+#: nova/console/xvp.py:146
+msgid "xvp not running..."
+msgstr "XVP çalışmıyor..."
+
+#: nova/consoleauth/manager.py:70
+#, python-format
+msgid "Received Token: %(token)s, %(token_dict)s)"
+msgstr "Token alındı: %(token)s, %(token_dict)s)"
+
+#: nova/consoleauth/manager.py:75
+#, python-format
+msgid "Checking Token: %(token)s, %(token_valid)s)"
+msgstr "Anahtar kontrol edliyor: %(token)s, %(token_valid)s)"
+
+#: nova/db/sqlalchemy/api.py:206
+#, python-format
+msgid "Unrecognized read_deleted value '%s'"
+msgstr "Tanınmayan silinmiş okuma değeri '%s'"
+
+#: nova/db/sqlalchemy/api.py:2790
+#, python-format
+msgid ""
+"Change will make usage less than 0 for the following resources: %(unders)s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:4691
+msgid "Backend exists"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:4711 nova/db/sqlalchemy/api.py:4737
+#, python-format
+msgid "No backend config with id %(sm_backend_id)s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:4749
+#, python-format
+msgid "No backend config with sr uuid %(sr_uuid)s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:4783
+msgid "Flavor exists"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:4798
+#, python-format
+msgid "%(sm_flavor_id) flavor not found"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:4817
+#, python-format
+msgid "No sm_flavor called %(sm_flavor_id)s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:4834
+#, python-format
+msgid "No sm_flavor called %(sm_flavor_label)s"
+msgstr ""
+
+#: nova/db/sqlalchemy/api.py:4872
+#, python-format
+msgid "No sm_volume with id %(volume_id)s"
+msgstr ""
+
+#: nova/db/sqlalchemy/migration.py:74
+msgid "version should be an integer"
+msgstr "Sürüm tam sayı olmak zorunda"
+
+#: nova/db/sqlalchemy/migration.py:101
+msgid "Upgrade DB using Essex release first."
+msgstr ""
+
+#: nova/db/sqlalchemy/session.py:162
+#, python-format
+msgid "SQL connection failed. %s attempts left."
+msgstr ""
+
+#: nova/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:50
+#: nova/db/sqlalchemy/migrate_repo/versions/092_add_instance_system_metadata.py:60
+#: nova/db/sqlalchemy/migrate_repo/versions/097_quota_usages_reservations.py:53
+#: nova/db/sqlalchemy/migrate_repo/versions/097_quota_usages_reservations.py:86
+#: nova/db/sqlalchemy/migrate_repo/versions/115_make_user_quotas_key_and_value.py:74
+#: nova/db/sqlalchemy/migrate_repo/versions/116_drop_user_quotas_key_and_value.py:97
+#: nova/db/sqlalchemy/migrate_repo/versions/132_add_instance_type_projects.py:52
+#, python-format
+msgid "Table |%s| not created!"
+msgstr ""
+
+#: nova/db/sqlalchemy/migrate_repo/versions/083_quota_class.py:62
+msgid "quota_classes table not dropped"
+msgstr ""
+
+#: nova/db/sqlalchemy/migrate_repo/versions/088_change_instance_id_to_uuid_in_block_device_mapping.py:56
+#: nova/db/sqlalchemy/migrate_repo/versions/100_instance_metadata_uses_uuid.py:56
+#: nova/db/sqlalchemy/migrate_repo/versions/101_security_group_instance_association_uses_uuid.py:56
+#: nova/db/sqlalchemy/migrate_repo/versions/102_consoles_uses_uuid.py:56
+#: nova/db/sqlalchemy/migrate_repo/versions/105_instance_info_caches_uses_uuid.py:44
+#: nova/db/sqlalchemy/migrate_repo/versions/105_instance_info_caches_uses_uuid.py:67
+#: nova/db/sqlalchemy/migrate_repo/versions/109_drop_dns_domains_project_id_fkey.py:43
+#: nova/db/sqlalchemy/migrate_repo/versions/113_fixed_ips_uses_uuid.py:56
+#: nova/db/sqlalchemy/migrate_repo/versions/113_fixed_ips_uses_uuid.py:87
+#: nova/db/sqlalchemy/migrate_repo/versions/114_vifs_uses_uuid.py:56
+#: nova/db/sqlalchemy/migrate_repo/versions/114_vifs_uses_uuid.py:87
+msgid "foreign key constraint couldn't be removed"
+msgstr ""
+
+#: nova/db/sqlalchemy/migrate_repo/versions/090_modify_volume_id_datatype.py:84
+#: nova/db/sqlalchemy/migrate_repo/versions/090_modify_volume_id_datatype.py:128
+#: nova/db/sqlalchemy/migrate_repo/versions/090_modify_volume_id_datatype.py:178
+#: nova/db/sqlalchemy/migrate_repo/versions/090_modify_volume_id_datatype.py:236
+msgid "Foreign Key constraint couldn't be removed"
+msgstr ""
+
+#: nova/db/sqlalchemy/migrate_repo/versions/095_change_fk_instance_id_to_uuid.py:57
+msgid "foreign key could not be dropped"
+msgstr ""
+
+#: nova/db/sqlalchemy/migrate_repo/versions/095_change_fk_instance_id_to_uuid.py:91
+msgid "foreign key could not be created"
+msgstr ""
+
+#: nova/db/sqlalchemy/migrate_repo/versions/097_quota_usages_reservations.py:98
+msgid "quota_usages table not dropped"
+msgstr ""
+
+#: nova/db/sqlalchemy/migrate_repo/versions/097_quota_usages_reservations.py:105
+msgid "reservations table not dropped"
+msgstr ""
+
+#: nova/db/sqlalchemy/migrate_repo/versions/106_add_foreign_keys.py:45
+#: nova/db/sqlalchemy/migrate_repo/versions/113_fixed_ips_uses_uuid.py:66
+#: nova/db/sqlalchemy/migrate_repo/versions/113_fixed_ips_uses_uuid.py:107
+#: nova/db/sqlalchemy/migrate_repo/versions/114_vifs_uses_uuid.py:66
+#: nova/db/sqlalchemy/migrate_repo/versions/114_vifs_uses_uuid.py:107
+msgid "foreign key constraint couldn't be created"
+msgstr ""
+
+#: nova/db/sqlalchemy/migrate_repo/versions/106_add_foreign_keys.py:66
+msgid "foreign key constraint couldn't be dropped"
+msgstr ""
+
+#: nova/db/sqlalchemy/migrate_repo/versions/115_make_user_quotas_key_and_value.py:93
+#: nova/db/sqlalchemy/migrate_repo/versions/116_drop_user_quotas_key_and_value.py:43
+msgid "user_quotas table not dropped"
+msgstr ""
+
+#: nova/image/glance.py:143
+#, python-format
+msgid ""
+"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', "
+"%(extra)s."
+msgstr ""
+
+#: nova/image/s3.py:311
+#, python-format
+msgid "Failed to download %(image_location)s to %(image_path)s"
+msgstr "%(image_path)si için %(image_location)s indirme işlemi başarısız"
+
+#: nova/image/s3.py:328
+#, python-format
+msgid "Failed to decrypt %(image_location)s to %(image_path)s"
+msgstr "%(image_path)s için %(image_location)s çözümü başarısız"
+
+#: nova/image/s3.py:338
+#, python-format
+msgid "Failed to untar %(image_location)s to %(image_path)s"
+msgstr "%(image_path)s için %(image_location)s çıkarma işlemi başarısız"
+
+#: nova/image/s3.py:348
+#, python-format
+msgid "Failed to upload %(image_location)s to %(image_path)s"
+msgstr "%(image_path)s için %(image_location)s yükleme işlemi başarısız"
+
+#: nova/image/s3.py:372
+#, python-format
+msgid "Failed to decrypt private key: %s"
+msgstr "Özel anahtar çözümü başarısız: %s"
+
+#: nova/image/s3.py:379
+#, python-format
+msgid "Failed to decrypt initialization vector: %s"
+msgstr "Başlatma vektörü çözümü başarısız: %s"
+
+#: nova/image/s3.py:390
+#, python-format
+msgid "Failed to decrypt image file %(image_file)s: %(err)s"
+msgstr "%(image_file)s imaj dosyası çözümü başarısız: %(err)s"
+
+#: nova/image/s3.py:402
+msgid "Unsafe filenames in image"
+msgstr "İmajda güvenliksiz dosya isimleri var"
+
+#: nova/ipv6/account_identifier.py:38 nova/ipv6/rfc2462.py:34
+#, python-format
+msgid "Bad mac for to_global_ipv6: %s"
+msgstr "Global ipv6 için yanlış mac numarası: %s"
+
+#: nova/ipv6/account_identifier.py:40 nova/ipv6/rfc2462.py:36
+#, python-format
+msgid "Bad prefix for to_global_ipv6: %s"
+msgstr "Global ipv6 için yanlış sonek: %s"
+
+#: nova/ipv6/account_identifier.py:42
+#, python-format
+msgid "Bad project_id for to_global_ipv6: %s"
+msgstr "Global ipv6 için yanlış proje numarası: %s"
+
+#: nova/network/api.py:53
+msgid "instance is a required argument to use @refresh_cache"
+msgstr ""
+
+#: nova/network/api.py:80
+#, python-format
+msgid "args: %s"
+msgstr ""
+
+#: nova/network/api.py:81
+#, python-format
+msgid "kwargs: %s"
+msgstr ""
+
+#: nova/network/api.py:169
+#, python-format
+msgid "re-assign floating IP %(address)s from instance %(instance_id)s"
+msgstr ""
+
+#: nova/network/ldapdns.py:317
+msgid "This driver only supports type 'a' entries."
+msgstr "Bu türücü sadece 'a' türü girdileri destekliyor."
+
+#: nova/network/linux_net.py:179
+#, python-format
+msgid "Attempted to remove chain %s which does not exist"
+msgstr "Var olmayan %s zinciri kaldırılmaya çalışılıyor"
+
+#: nova/network/linux_net.py:214
+#, python-format
+msgid "Unknown chain: %r"
+msgstr "Tanınmayan zincir: %r"
+
+#: nova/network/linux_net.py:239
+#, python-format
+msgid ""
+"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r %(top)r"
+msgstr ""
+
+#: nova/network/linux_net.py:374
+msgid "IPTablesManager.apply completed with success"
+msgstr ""
+
+#: nova/network/linux_net.py:580
+#, python-format
+msgid "arping error for ip %s"
+msgstr ""
+
+#: nova/network/linux_net.py:790
+#, python-format
+msgid "Pid %d is stale, skip killing dnsmasq"
+msgstr ""
+
+#: nova/network/linux_net.py:830
+#, python-format
+msgid "Hupping dnsmasq threw %s"
+msgstr ""
+
+#: nova/network/linux_net.py:832
+#, python-format
+msgid "Pid %d is stale, relaunching dnsmasq"
+msgstr ""
+
+#: nova/network/linux_net.py:894
+#, python-format
+msgid "killing radvd threw %s"
+msgstr ""
+
+#: nova/network/linux_net.py:896
+#, python-format
+msgid "Pid %d is stale, relaunching radvd"
+msgstr "%d süreç numarası bozuldu, radvd tekrar başlatılıyor"
+
+#: nova/network/linux_net.py:1126
+#, python-format
+msgid "Starting VLAN inteface %s"
+msgstr "%s VLAN arayüzü başlatılıyor "
+
+#: nova/network/linux_net.py:1162
+#, python-format
+msgid "Starting Bridge interface for %s"
+msgstr ""
+
+#: nova/network/linux_net.py:1205
+#, python-format
+msgid "Failed to add interface: %s"
+msgstr ""
+
+#: nova/network/linux_net.py:1306
+#, python-format
+msgid "Starting bridge %s "
+msgstr ""
+
+#: nova/network/linux_net.py:1314
+#, python-format
+msgid "Done starting bridge %s"
+msgstr ""
+
+#: nova/network/linux_net.py:1333
+#, python-format
+msgid "Failed unplugging gateway interface '%s'"
+msgstr ""
+
+#: nova/network/linux_net.py:1335
+#, python-format
+msgid "Unplugged gateway interface '%s'"
+msgstr ""
+
+#: nova/network/manager.py:277
+#, python-format
+msgid "Fixed ip %(fixed_ip_id)s not found"
+msgstr ""
+
+#: nova/network/manager.py:286 nova/network/manager.py:545
+#, python-format
+msgid "Interface %(interface)s not found"
+msgstr "%(interface)s arayüzü bulunamadı"
+
+#: nova/network/manager.py:301
+#, python-format
+msgid "floating IP allocation for instance |%s|"
+msgstr "|%s| örneği için değişken IP tahsisi"
+
+#: nova/network/manager.py:365
+msgid "Floating IP is not associated. Ignore."
+msgstr ""
+
+#: nova/network/manager.py:383
+#, python-format
+msgid "Address |%(address)s| is not allocated"
+msgstr "|%(address)s| adresi tahsis edilemedi"
+
+#: nova/network/manager.py:387
+#, python-format
+msgid "Address |%(address)s| is not allocated to your project |%(project)s|"
+msgstr "Sizin |%(project)s| projeniz için |%(address)s| adresi tahsis edilemedi"
+
+#: nova/network/manager.py:408
+#, python-format
+msgid "Quota exceeded for %(pid)s, tried to allocate floating IP"
+msgstr ""
+
+#: nova/network/manager.py:469
+msgid "Failed to update usages deallocating floating IP"
+msgstr ""
+
+#: nova/network/manager.py:650
+#, python-format
+msgid "Starting migration network for instance %(instance_uuid)s"
+msgstr ""
+
+#: nova/network/manager.py:657
+#, python-format
+msgid ""
+"Floating ip address |%(address)s| no longer belongs to instance "
+"%(instance_uuid)s. Will notmigrate it "
+msgstr ""
+
+#: nova/network/manager.py:678
+#, python-format
+msgid "Finishing migration network for instance %(instance_uuid)s"
+msgstr ""
+
+#: nova/network/manager.py:686
+#, python-format
+msgid ""
+"Floating ip address |%(address)s| no longer belongs to instance "
+"%(instance_uuid)s. Will notsetup it."
+msgstr ""
+
+#: nova/network/manager.py:733
+#, python-format
+msgid ""
+"Database inconsistency: DNS domain |%s| is registered in the Nova db but not"
+" visible to either the floating or instance DNS driver. It will be ignored."
+msgstr ""
+
+#: nova/network/manager.py:779
+#, python-format
+msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|."
+msgstr ""
+
+#: nova/network/manager.py:789
+#, python-format
+msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|."
+msgstr ""
+
+#: nova/network/manager.py:903
+#, python-format
+msgid "Disassociated %s stale fixed ip(s)"
+msgstr ""
+
+#: nova/network/manager.py:907
+msgid "setting network host"
+msgstr ""
+
+#: nova/network/manager.py:1021
+msgid "network allocations"
+msgstr ""
+
+#: nova/network/manager.py:1026
+#, python-format
+msgid "networks retrieved for instance: |%(networks)s|"
+msgstr ""
+
+#: nova/network/manager.py:1056
+msgid "network deallocation for instance"
+msgstr ""
+
+#: nova/network/manager.py:1279
+#, python-format
+msgid ""
+"instance-dns-zone is |%(domain)s|, which is in availability zone |%(zone)s|."
+" Instance is in zone |%(zone2)s|. No DNS record will be created."
+msgstr ""
+
+#: nova/network/manager.py:1358
+#, python-format
+msgid "Unable to release %s because vif doesn't exist."
+msgstr ""
+
+#: nova/network/manager.py:1379
+#, python-format
+msgid "Leased IP |%(address)s|"
+msgstr ""
+
+#: nova/network/manager.py:1383
+#, python-format
+msgid "IP %s leased that is not associated"
+msgstr ""
+
+#: nova/network/manager.py:1391
+#, python-format
+msgid "IP |%s| leased that isn't allocated"
+msgstr ""
+
+#: nova/network/manager.py:1396
+#, python-format
+msgid "Released IP |%(address)s|"
+msgstr ""
+
+#: nova/network/manager.py:1400
+#, python-format
+msgid "IP %s released that is not associated"
+msgstr ""
+
+#: nova/network/manager.py:1403
+#, python-format
+msgid "IP %s released that was not leased"
+msgstr ""
+
+#: nova/network/manager.py:1422
+#, python-format
+msgid "%s must be an integer"
+msgstr ""
+
+#: nova/network/manager.py:1446
+msgid "Maximum allowed length for 'label' is 255."
+msgstr ""
+
+#: nova/network/manager.py:1466
+#, python-format
+msgid ""
+"Subnet(s) too large, defaulting to /%s. To override, specify network_size "
+"flag."
+msgstr ""
+
+#: nova/network/manager.py:1547
+msgid "cidr already in use"
+msgstr ""
+
+#: nova/network/manager.py:1550
+#, python-format
+msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)"
+msgstr ""
+
+#: nova/network/manager.py:1561
+#, python-format
+msgid ""
+"requested cidr (%(cidr)s) conflicts with existing smaller cidr (%(smaller)s)"
+msgstr ""
+
+#: nova/network/manager.py:1620
+msgid "Network already exists!"
+msgstr ""
+
+#: nova/network/manager.py:1640
+#, python-format
+msgid "Network must be disassociated from project %s before delete"
+msgstr ""
+
+#: nova/network/manager.py:2097
+msgid ""
+"The sum between the number of networks and the vlan start cannot be greater "
+"than 4094"
+msgstr ""
+
+#: nova/network/manager.py:2104
+#, python-format
+msgid ""
+"The network range is not big enough to fit %(num_networks)s. Network size is"
+" %(network_size)s"
+msgstr ""
+
+#: nova/network/minidns.py:65
+msgid "This driver only supports type 'a'"
+msgstr ""
+
+#: nova/network/model.py:337
+msgid "v4 subnets are required for legacy nw_info"
+msgstr "miras nw_info için v4 alt ağları gerekiyor"
+
+#: nova/network/quantum/nova_ipam_lib.py:75
+msgid "Error creating network entry"
+msgstr ""
+
+#: nova/network/quantum/nova_ipam_lib.py:90
+#, python-format
+msgid "No network with net_id = %s"
+msgstr ""
+
+#: nova/network/quantum/nova_ipam_lib.py:256
+#, python-format
+msgid "No fixed IPs to deallocate for vif %s"
+msgstr ""
+
+#: nova/network/quantumv2/__init__.py:41
+msgid "_get_auth_token() failed"
+msgstr ""
+
+#: nova/network/quantumv2/api.py:97
+#, python-format
+msgid "allocate_for_instance() for %s"
+msgstr ""
+
+#: nova/network/quantumv2/api.py:100
+#, python-format
+msgid "empty project id for instance %s"
+msgstr ""
+
+#: nova/network/quantumv2/api.py:153
+#, python-format
+msgid "Fail to delete port %(portid)s with failure: %(exception)s"
+msgstr ""
+
+#: nova/network/quantumv2/api.py:164
+#, python-format
+msgid "deallocate_for_instance() for %s"
+msgstr ""
+
+#: nova/network/quantumv2/api.py:173
+#, python-format
+msgid "Failed to delete quantum port %(portid)s "
+msgstr ""
+
+#: nova/network/quantumv2/api.py:182
+#, python-format
+msgid "get_instance_nw_info() for %s"
+msgstr ""
+
+#: nova/network/quantumv2/api.py:197
+#, python-format
+msgid "validate_networks() for %s"
+msgstr ""
+
+#: nova/network/quantumv2/api.py:438
+#, python-format
+msgid "Multiple floating IP pools matches found for name '%s'"
+msgstr ""
+
+#: nova/openstack/common/lockutils.py:98
+#, python-format
+msgid "Could not release the acquired lock `%s`"
+msgstr ""
+
+#: nova/openstack/common/lockutils.py:184
+#, python-format
+msgid "Got semaphore \"%(lock)s\" for method \"%(method)s\"..."
+msgstr ""
+
+#: nova/openstack/common/lockutils.py:188
+#, python-format
+msgid "Attempting to grab file lock \"%(lock)s\" for method \"%(method)s\"..."
+msgstr ""
+
+#: nova/openstack/common/lockutils.py:216
+#, python-format
+msgid "Got file lock \"%(lock)s\" at %(path)s for method \"%(method)s\"..."
+msgstr ""
+
+#: nova/openstack/common/log.py:298
+#, python-format
+msgid "syslog facility must be one of: %s"
+msgstr "syslog servisi bunlardan biri olmak zorundadır: %s"
+
+#: nova/openstack/common/policy.py:394
+#, python-format
+msgid "Failed to understand rule %(rule)s"
+msgstr ""
+
+#: nova/openstack/common/policy.py:404
+#, python-format
+msgid "No handler for matches of kind %s"
+msgstr ""
+
+#: nova/openstack/common/policy.py:679
+#, python-format
+msgid "Failed to understand rule %(rule)r"
+msgstr ""
+
+#: nova/openstack/common/notifier/api.py:125
+#, python-format
+msgid "%s not in valid priorities"
+msgstr ""
+
+#: nova/openstack/common/notifier/api.py:141
+#, python-format
+msgid ""
+"Problem '%(e)s' attempting to send to notification system. "
+"Payload=%(payload)s"
+msgstr ""
+
+#: nova/openstack/common/notifier/api.py:170
+#, python-format
+msgid "Failed to load notifier %s. These notifications will not be sent."
+msgstr ""
+
+#: nova/openstack/common/notifier/rabbit_notifier.py:45
+#, python-format
+msgid "Could not send notification to %(topic)s. Payload=%(message)s"
+msgstr ""
+
+#: nova/openstack/common/plugin/pluginmanager.py:64
+#, python-format
+msgid "Failed to load plugin %(plug)s: %(exc)s"
+msgstr ""
+
+#: nova/openstack/common/rpc/amqp.py:209
+#, python-format
+msgid "unpacked context: %s"
+msgstr ""
+
+#: nova/openstack/common/rpc/amqp.py:253
+#, python-format
+msgid "received %s"
+msgstr ""
+
+#: nova/openstack/common/rpc/amqp.py:259
+#, python-format
+msgid "no method for message: %s"
+msgstr ""
+
+#: nova/openstack/common/rpc/amqp.py:260
+#, python-format
+msgid "No method for message: %s"
+msgstr ""
+
+#: nova/openstack/common/rpc/amqp.py:352
+#, python-format
+msgid "Making asynchronous call on %s ..."
+msgstr ""
+
+#: nova/openstack/common/rpc/amqp.py:355
+#, python-format
+msgid "MSG_ID is %s"
+msgstr ""
+
+#: nova/openstack/common/rpc/amqp.py:377
+#, python-format
+msgid "Making asynchronous cast on %s..."
+msgstr ""
+
+#: nova/openstack/common/rpc/amqp.py:385
+msgid "Making asynchronous fanout cast..."
+msgstr ""
+
+#: nova/openstack/common/rpc/amqp.py:411
+#, python-format
+msgid "Sending %(event_type)s on %(topic)s"
+msgstr ""
+
+#: nova/openstack/common/rpc/common.py:34
+msgid "An unknown RPC related exception occurred."
+msgstr ""
+
+#: nova/openstack/common/rpc/common.py:64
+#, python-format
+msgid ""
+"Remote error: %(exc_type)s %(value)s\n"
+"%(traceback)s."
+msgstr ""
+
+#: nova/openstack/common/rpc/common.py:81
+msgid "Timeout while waiting on RPC response."
+msgstr ""
+
+#: nova/openstack/common/rpc/common.py:85
+msgid "Invalid reuse of an RPC connection."
+msgstr "Geçersi RPC bağlantısı kullanımı."
+
+#: nova/openstack/common/rpc/common.py:89
+#, python-format
+msgid "Specified RPC version, %(version)s, not supported by this endpoint."
+msgstr ""
+
+#: nova/openstack/common/rpc/common.py:206
+#, python-format
+msgid "Returning exception %s to caller"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_kombu.py:168
+#: nova/openstack/common/rpc/impl_qpid.py:144
+msgid "Failed to process message... skipping it."
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_kombu.py:468
+#, python-format
+msgid "Reconnecting to AMQP server on %(hostname)s:%(port)d"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_kombu.py:490
+#, python-format
+msgid "Connected to AMQP server on %(hostname)s:%(port)d"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_kombu.py:527
+#, python-format
+msgid ""
+"Unable to connect to AMQP server on %(hostname)s:%(port)d after "
+"%(max_retries)d tries: %(err_str)s"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_kombu.py:543
+#, python-format
+msgid ""
+"AMQP server on %(hostname)s:%(port)d is unreachable: %(err_str)s. Trying "
+"again in %(sleep_time)d seconds."
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_kombu.py:595
+#: nova/openstack/common/rpc/impl_qpid.py:403
+#, python-format
+msgid "Failed to declare consumer for topic '%(topic)s': %(err_str)s"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_kombu.py:613
+#: nova/openstack/common/rpc/impl_qpid.py:418
+#, python-format
+msgid "Timed out waiting for RPC response: %s"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_kombu.py:617
+#: nova/openstack/common/rpc/impl_qpid.py:422
+#, python-format
+msgid "Failed to consume message from queue: %s"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_kombu.py:651
+#: nova/openstack/common/rpc/impl_qpid.py:452
+#, python-format
+msgid "Failed to publish message to topic '%(topic)s': %(err_str)s"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_qpid.py:359
+#, python-format
+msgid "Unable to connect to AMQP server: %s"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_qpid.py:364
+#, python-format
+msgid "Connected to AMQP server on %s"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_qpid.py:372
+msgid "Re-established AMQP queues"
+msgstr "AMQP kuyrukları tekrar kuruluyor"
+
+#: nova/openstack/common/rpc/impl_qpid.py:430
+msgid "Error processing message. Skipping it."
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:89
+msgid "JSON serialization failed."
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:97
+#, python-format
+msgid "Deserializing: %s"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:133
+#, python-format
+msgid "Connecting to %(addr)s with %(type)s"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:134
+#, python-format
+msgid "-> Subscribed to %(subscribe)s"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:135
+#, python-format
+msgid "-> bind: %(bind)s"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:143
+msgid "Could not open socket."
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:155
+#, python-format
+msgid "Subscribing to %s"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:193
+msgid "You cannot recv on this socket."
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:198
+msgid "You cannot send on this socket."
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:251
+#, python-format
+msgid "Running func with context: %s"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:276
+msgid "Sending reply"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:344
+msgid "Registering reactor"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:356
+msgid "In reactor registered"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:371
+msgid "Out reactor registered"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:375
+msgid "Consuming socket"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:422
+#, python-format
+msgid "CONSUMER GOT %s"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:432
+#, python-format
+msgid "->response->%s"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:442
+#, python-format
+msgid "Created topic proxy: %s"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:449
+#, python-format
+msgid "ROUTER RELAY-OUT START %(data)s"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:451
+#, python-format
+msgid "ROUTER RELAY-OUT SUCCEEDED %(data)s"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:467
+#, python-format
+msgid "CONSUMER RECEIVED DATA: %s"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:469
+#, python-format
+msgid "ROUTER RELAY-OUT %(data)s"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:495
+#, python-format
+msgid "Create Consumer for topic (%(topic)s)"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:511
+#, python-format
+msgid "Consumer is a zmq.%s"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:554
+msgid "Creating payload"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:567
+msgid "Creating queue socket for reply waiter"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:578
+msgid "Sending cast"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:581
+msgid "Cast sent; Waiting reply"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:584
+#, python-format
+msgid "Received message: %s"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:585
+msgid "Unpacking response"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:612
+#, python-format
+msgid "%(msg)s"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:615
+#, python-format
+msgid "Sending message(s) to: %s"
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:619
+msgid "No matchmaker results. Not casting."
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:709
+msgid ""
+"Matchmaker could not be loaded.\n"
+"rpc_zmq_matchmaker is not a class."
+msgstr ""
+
+#: nova/openstack/common/rpc/impl_zmq.py:711
+msgid "Error loading Matchmaker."
+msgstr ""
+
+#: nova/openstack/common/rpc/matchmaker.py:45
+msgid "Match not found by MatchMaker."
+msgstr ""
+
+#: nova/openstack/common/rpc/matchmaker.py:177
+#: nova/openstack/common/rpc/matchmaker.py:195
+#, python-format
+msgid "No key defining hosts for topic '%s', see ringfile"
+msgstr ""
+
+#: nova/scheduler/chance.py:49 nova/scheduler/simple.py:96
+msgid "Is the appropriate service running?"
+msgstr "Uygun servis çalışıyor mu?"
+
+#: nova/scheduler/chance.py:54
+msgid "Could not find another compute"
+msgstr "Başka bir işlem bulunamadı"
+
+#: nova/scheduler/driver.py:64
+msgid "Exception during scheduler.run_instance"
+msgstr ""
+
+#: nova/scheduler/driver.py:68 nova/scheduler/manager.py:181
+#, python-format
+msgid "Setting instance to %(state)s state."
+msgstr ""
+
+#: nova/scheduler/driver.py:101
+#, python-format
+msgid "Casted '%(method)s' to volume '%(host)s'"
+msgstr ""
+
+#: nova/scheduler/driver.py:124
+#, python-format
+msgid "Casted '%(method)s' to compute '%(host)s'"
+msgstr ""
+
+#: nova/scheduler/driver.py:141
+#, python-format
+msgid "Casted '%(method)s' to %(topic)s '%(host)s'"
+msgstr ""
+
+#: nova/scheduler/driver.py:189
+msgid "Driver must implement schedule_prep_resize"
+msgstr ""
+
+#: nova/scheduler/driver.py:197
+msgid "Driver must implement schedule_run_instance"
+msgstr ""
+
+#: nova/scheduler/driver.py:202
+msgid "Driver must implement schedule_create_volune"
+msgstr ""
+
+#: nova/scheduler/driver.py:334
+#, python-format
+msgid ""
+"Unable to migrate %(instance_uuid)s to %(dest)s: Lack of "
+"memory(host:%(avail)s <= instance:%(mem_inst)s)"
+msgstr ""
+
+#: nova/scheduler/filter_scheduler.py:49
+#, python-format
+msgid "No host selection for %s defined."
+msgstr ""
+
+#: nova/scheduler/filter_scheduler.py:65
+#, python-format
+msgid "Attempting to build %(num_instances)d instance(s)"
+msgstr "%(num_instances)d örnek(leri) kurulmaya çalışılıyor."
+
+#: nova/scheduler/filter_scheduler.py:186
+msgid "Invalid value for 'scheduler_max_attempts', must be >= 1"
+msgstr ""
+
+#: nova/scheduler/filter_scheduler.py:213
+#, python-format
+msgid ""
+"Exceeded max scheduling attempts %(max_attempts)d for instance "
+"%(instance_uuid)s"
+msgstr ""
+
+#: nova/scheduler/filter_scheduler.py:224
+msgid "Scheduler only understands Compute nodes (for now)"
+msgstr ""
+
+#: nova/scheduler/filter_scheduler.py:276
+#, python-format
+msgid "Filtered %(hosts)s"
+msgstr "%(hosts)s filtrelendi"
+
+#: nova/scheduler/filter_scheduler.py:286
+#, python-format
+msgid "Weighted %(weighted_host)s"
+msgstr ""
+
+#: nova/scheduler/host_manager.py:237
+#, python-format
+msgid "Host filter fails for ignored host %(host)s"
+msgstr ""
+
+#: nova/scheduler/host_manager.py:244
+#, python-format
+msgid "Host filter fails for non-forced host %(host)s"
+msgstr ""
+
+#: nova/scheduler/host_manager.py:250
+#, python-format
+msgid "Host filter function %(func)s failed for %(host)s"
+msgstr ""
+
+#: nova/scheduler/host_manager.py:256
+#, python-format
+msgid "Host filter passes for %(host)s"
+msgstr ""
+
+#: nova/scheduler/host_manager.py:318
+#, python-format
+msgid "Received %(service_name)s service update from %(host)s."
+msgstr ""
+
+#: nova/scheduler/host_manager.py:341
+msgid "host_manager only implemented for 'compute'"
+msgstr ""
+
+#: nova/scheduler/host_manager.py:351
+#, python-format
+msgid "No service for compute ID %s"
+msgstr "%s işlem ID'si için servis yok"
+
+#: nova/scheduler/manager.py:79
+#, python-format
+msgid "Failed to schedule create_volume: %(ex)s"
+msgstr ""
+
+#: nova/scheduler/manager.py:165
+#, python-format
+msgid "Failed to schedule_%(method)s: %(ex)s"
+msgstr "%(method)s zamanlaması başarısız: %(ex)s"
+
+#: nova/scheduler/scheduler_options.py:69
+#, python-format
+msgid "Could not stat scheduler options file %(filename)s: '%(e)s'"
+msgstr "%(filename)s dosyası için zamanlayıcı seçenekleri başlatılamadı: '%(e)s'"
+
+#: nova/scheduler/scheduler_options.py:78
+#, python-format
+msgid "Could not decode scheduler options: '%(e)s'"
+msgstr "Zamanlayıcı seçenekleri çözülemedi: '%(e)s'"
+
+#: nova/scheduler/simple.py:52
+msgid ""
+"SimpleScheduler now only covers volume scheduling and is deprecated in "
+"Folsom. Non-volume functionality in SimpleScheduler has been replaced by "
+"FilterScheduler"
+msgstr ""
+
+#: nova/scheduler/simple.py:62
+msgid ""
+"nova-volume functionality is deprecated in Folsom and will be removed in "
+"Grizzly. Volumes are now handled by Cinder"
+msgstr ""
+
+#: nova/scheduler/simple.py:89
+msgid "Not enough allocatable volume gigabytes remaining"
+msgstr ""
+
+#: nova/scheduler/filters/aggregate_instance_extra_specs.py:49
+#: nova/scheduler/filters/aggregate_instance_extra_specs.py:56
+#: nova/scheduler/filters/compute_capabilities_filter.py:48
+#, python-format
+msgid "%(host_state)s fails instance_type extra_specs requirements"
+msgstr ""
+
+#: nova/scheduler/filters/compute_filter.py:38
+#, python-format
+msgid "%(host_state)s is disabled or has not been heard from in a while"
+msgstr ""
+
+#: nova/scheduler/filters/compute_filter.py:42
+#, python-format
+msgid "%(host_state)s is disabled via capabilities"
+msgstr ""
+
+#: nova/scheduler/filters/core_filter.py:45
+msgid "VCPUs not set; assuming CPU collection broken"
+msgstr "VCPU ayarlanamadı, CPU koleksiyonunun bozulduğu varsayılıyor"
+
+#: nova/scheduler/filters/disk_filter.py:47
+#, python-format
+msgid ""
+"%(host_state)s does not have %(requested_disk)s MB usable disk, it only has "
+"%(usable_disk_mb)s MB usable disk."
+msgstr ""
+
+#: nova/scheduler/filters/image_props_filter.py:48
+#, python-format
+msgid ""
+"Instance contains properties %(image_props)s, but no corresponding "
+"capabilities are advertised by the compute node"
+msgstr ""
+
+#: nova/scheduler/filters/image_props_filter.py:61
+#, python-format
+msgid ""
+"Instance properties %(image_props)s are satisfied by compute host "
+"capabilities %(capabilities)s"
+msgstr ""
+
+#: nova/scheduler/filters/image_props_filter.py:66
+#, python-format
+msgid ""
+"Instance contains properties %(image_props)s that are not provided by the "
+"compute node capabilities %(capabilities)s"
+msgstr ""
+
+#: nova/scheduler/filters/image_props_filter.py:82
+#, python-format
+msgid "%(host_state)s does not support requested instance_properties"
+msgstr ""
+
+#: nova/scheduler/filters/io_ops_filter.py:42
+#, python-format
+msgid ""
+"%(host_state)s fails I/O ops check: Max IOs per host is set to "
+"%(max_io_ops)s"
+msgstr ""
+
+#: nova/scheduler/filters/num_instances_filter.py:39
+#, python-format
+msgid ""
+"%(host_state)s fails num_instances check: Max instances per host is set to "
+"%(max_instances)s"
+msgstr ""
+
+#: nova/scheduler/filters/ram_filter.py:46
+#, python-format
+msgid ""
+"%(host_state)s does not have %(requested_ram)s MB usable ram, it only has "
+"%(usable_ram)s MB usable ram."
+msgstr ""
+
+#: nova/scheduler/filters/retry_filter.py:38
+#, python-format
+msgid "Previously tried hosts: %(hosts)s. (host=%(host)s)"
+msgstr ""
+
+#: nova/scheduler/filters/trusted_filter.py:201
+#, python-format
+msgid "TCP: trust state of %(host)s:%(level)s(%(trust)s)"
+msgstr ""
+
+#: nova/tests/fake_ldap.py:34
+msgid "Attempted to instantiate singleton"
+msgstr ""
+
+#: nova/tests/fake_utils.py:72
+#, python-format
+msgid "Faking execution of cmd (subprocess): %s"
+msgstr "Komutun(alt sürecin) taklit çalıştırılması: %s"
+
+#: nova/tests/fake_utils.py:80
+#, python-format
+msgid "Faked command matched %s"
+msgstr "Sahte komut eşleşti %s"
+
+#: nova/tests/fake_utils.py:96
+#, python-format
+msgid "Faked command raised an exception %s"
+msgstr "Sahte komut bir istisna fırlattı %s"
+
+#: nova/tests/fake_utils.py:101
+#, python-format
+msgid "Reply to faked command is stdout='%(stdout)s' stderr='%(stderr)s'"
+msgstr "Sahte komuta verilen yanıt: standartçıktı='%(stdout)s' standarthata='%(stderr)s'"
+
+#: nova/tests/fake_volume.py:177 nova/volume/api.py:90 nova/volume/api.py:318
+#: nova/volume/cinder.py:159
+msgid "status must be available"
+msgstr ""
+
+#: nova/tests/fake_volume.py:180 nova/volume/api.py:321
+#: nova/volume/cinder.py:162
+msgid "already attached"
+msgstr ""
+
+#: nova/tests/fake_volume.py:185 nova/volume/api.py:328
+#: nova/volume/cinder.py:168
+msgid "already detached"
+msgstr ""
+
+#: nova/tests/fakelibvirt.py:849
+msgid "Please extend mock libvirt module to support flags"
+msgstr "Lütfen sahte libvirt modülü desteklenen bayraklar için genişletiniz"
+
+#: nova/tests/fakelibvirt.py:853
+msgid "Expected a list for 'auth' parameter"
+msgstr ""
+
+#: nova/tests/fakelibvirt.py:857
+msgid "Expected a function in 'auth[0]' parameter"
+msgstr ""
+
+#: nova/tests/fakelibvirt.py:861
+msgid "Expected a function in 'auth[1]' parameter"
+msgstr ""
+
+#: nova/tests/test_misc.py:62
+#, python-format
+msgid ""
+"The following migrations are missing a downgrade:\n"
+"\t%s"
+msgstr ""
+
+#: nova/tests/test_nova_manage.py:202
+msgid "id"
+msgstr "id"
+
+#: nova/tests/test_nova_manage.py:203
+msgid "IPv4"
+msgstr "IPv4"
+
+#: nova/tests/test_nova_manage.py:204
+msgid "IPv6"
+msgstr "IPv6"
+
+#: nova/tests/test_nova_manage.py:205
+msgid "start address"
+msgstr "Başlangıç adresi"
+
+#: nova/tests/test_nova_manage.py:206
+msgid "DNS1"
+msgstr "DNS1"
+
+#: nova/tests/test_nova_manage.py:207
+msgid "DNS2"
+msgstr "DNS2"
+
+#: nova/tests/test_nova_manage.py:208
+msgid "VlanID"
+msgstr "VlanID"
+
+#: nova/tests/test_nova_manage.py:209
+msgid "project"
+msgstr "proje"
+
+#: nova/tests/test_nova_manage.py:210
+msgid "uuid"
+msgstr "uuid"
+
+#: nova/tests/test_storwize_svc.py:177
+#, python-format
+msgid "unrecognized argument %s"
+msgstr ""
+
+#: nova/tests/test_storwize_svc.py:897
+#, python-format
+msgid "Run CLI command: %s"
+msgstr ""
+
+#: nova/tests/test_storwize_svc.py:900
+#, python-format
+msgid ""
+"CLI output:\n"
+" stdout: %(out)s\n"
+" stderr: %(err)s"
+msgstr ""
+
+#: nova/tests/test_storwize_svc.py:905
+#, python-format
+msgid ""
+"CLI Exception output:\n"
+" stdout: %(out)s\n"
+" stderr: %(err)s"
+msgstr ""
+
+#: nova/tests/test_volume_types.py:58
+#, python-format
+msgid "Given data: %s"
+msgstr ""
+
+#: nova/tests/test_volume_types.py:59
+#, python-format
+msgid "Result data: %s"
+msgstr ""
+
+#: nova/tests/test_xenapi.py:691
+#, python-format
+msgid "Creating files in %s to simulate guest agent"
+msgstr "Konuk aracı benzetmek için %s'de dosyalar oluşturuluyor"
+
+#: nova/tests/test_xenapi.py:702
+#, python-format
+msgid "Removing simulated guest agent files in %s"
+msgstr "%s'deki benzetilmiş konuk aracı dosyaları kaldırılıyor"
+
+#: nova/tests/api/test_auth.py:79
+msgid "200 Role Match"
+msgstr ""
+
+#: nova/tests/api/test_auth.py:81
+msgid "200 No Roles"
+msgstr ""
+
+#: nova/tests/api/test_auth.py:84
+msgid "unexpected role header"
+msgstr ""
+
+#: nova/tests/api/openstack/compute/test_servers.py:2996
+msgid ""
+"Quota exceeded for instances: Requested 1, but already used 10 of 10 "
+"instances"
+msgstr ""
+
+#: nova/tests/api/openstack/compute/test_servers.py:3001
+msgid ""
+"Quota exceeded for ram: Requested 4096, but already used 8192 of 10240 ram"
+msgstr ""
+
+#: nova/tests/api/openstack/compute/test_servers.py:3006
+msgid "Quota exceeded for cores: Requested 2, but already used 9 of 10 cores"
+msgstr ""
+
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:56
+#, python-format
+msgid "_create: %s"
+msgstr "oluştur: %s"
+
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:65
+#, python-format
+msgid "_delete: %s"
+msgstr "sil: %s"
+
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:74
+#, python-format
+msgid "_get: %s"
+msgstr "getir: %s"
+
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:84
+#, python-format
+msgid "_get_all: %s"
+msgstr "hepsini getir: %s"
+
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:126
+#, python-format
+msgid "test_snapshot_create: param=%s"
+msgstr "Test sistem görüntüsü oluşturulması: parametre=%s"
+
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:135
+#, python-format
+msgid "test_snapshot_create: resp_dict=%s"
+msgstr "Test sistem görüntüsü oluşturulması: yanıt dizini=%s"
+
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:157
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:183
+#, python-format
+msgid "test_snapshot_create_force: param=%s"
+msgstr "Zorla test sistem görüntüsü oluşturulması: parametre=%s"
+
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:166
+#, python-format
+msgid "test_snapshot_create_force: resp_dict=%s"
+msgstr "Zorla test sistem görüntüsü oluşturulması: yanıt dizini=%s"
+
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:219
+#, python-format
+msgid "test_snapshot_show: resp=%s"
+msgstr "Test sistem görüntüsü gösterimi: yanıt=%s"
+
+#: nova/tests/api/openstack/compute/contrib/test_snapshots.py:245
+#, python-format
+msgid "test_snapshot_detail: resp_dict=%s"
+msgstr "Test sistem görüntüsü ayrıntısı: yanıt dizin=%s"
+
+#: nova/tests/compute/test_compute.py:630
+#: nova/tests/compute/test_compute.py:648
+#: nova/tests/compute/test_compute.py:684
+#: nova/tests/compute/test_compute.py:709
+#: nova/tests/compute/test_compute.py:2372
+#, python-format
+msgid "Running instances: %s"
+msgstr "Örnekler çalışıyor: %s"
+
+#: nova/tests/compute/test_compute.py:636
+#: nova/tests/compute/test_compute.py:671
+#: nova/tests/compute/test_compute.py:697
+#: nova/tests/compute/test_compute.py:727
+#, python-format
+msgid "After terminating instances: %s"
+msgstr "Örnekleri sonlandırmanın ardından: %s"
+
+#: nova/tests/compute/test_compute.py:1104
+msgid "Internal error"
+msgstr "İçsel hata"
+
+#: nova/tests/compute/test_compute.py:2383
+#, python-format
+msgid "After force-killing instances: %s"
+msgstr "Zorla öldürülen örneklerin ardından: %s"
+
+#: nova/tests/hyperv/hypervutils.py:141 nova/virt/hyperv/vmops.py:473
+#, python-format
+msgid "Failed to change vm state of %(vm_name)s to %(req_state)s"
+msgstr ""
+
+#: nova/tests/hyperv/hypervutils.py:192 nova/virt/hyperv/vmops.py:408
+#, python-format
+msgid "Failed to destroy vm %s"
+msgstr ""
+
+#: nova/tests/hyperv/hypervutils.py:235 nova/virt/hyperv/snapshotops.py:92
+#, python-format
+msgid "Failed to get info for disk %s"
+msgstr ""
+
+#: nova/tests/integrated/test_api_samples.py:138
+#, python-format
+msgid "Result: %(result)s is not a dict."
+msgstr ""
+
+#: nova/tests/integrated/test_api_samples.py:142
+#, python-format
+msgid ""
+"Key mismatch:\n"
+"%(ex_keys)s\n"
+"%(res_keys)s"
+msgstr ""
+
+#: nova/tests/integrated/test_api_samples.py:150
+#, python-format
+msgid "Result: %(result)s is not a list."
+msgstr ""
+
+#: nova/tests/integrated/test_api_samples.py:153
+#, python-format
+msgid ""
+"Length mismatch: %(result)s\n"
+"%(expected)s."
+msgstr ""
+
+#: nova/tests/integrated/test_api_samples.py:164
+#, python-format
+msgid "Result: %(res_obj)s not in %(expected)s."
+msgstr ""
+
+#: nova/tests/integrated/test_api_samples.py:181
+#: nova/tests/integrated/test_api_samples.py:194
+#, python-format
+msgid ""
+"Values do not match:\n"
+"%(expected)s\n"
+"%(result)s"
+msgstr ""
+
+#: nova/tests/integrated/test_login.py:31
+#, python-format
+msgid "flavor: %s"
+msgstr "Örnek türü: %s"
+
+#: nova/tests/integrated/api/client.py:38
+#, python-format
+msgid ""
+"%(message)s\n"
+"Status Code: %(_status)s\n"
+"Body: %(_body)s"
+msgstr "%(message)s\nDurum Kodu: %(_status)s\nGövde: %(_body)s"
+
+#: nova/tests/integrated/api/client.py:47
+msgid "Authentication error"
+msgstr "Kimlik doğrulama hatası"
+
+#: nova/tests/integrated/api/client.py:55
+msgid "Authorization error"
+msgstr "Yetkilendirme hatası"
+
+#: nova/tests/integrated/api/client.py:63
+msgid "Item not found"
+msgstr "Öğe bulunamadı"
+
+#: nova/tests/integrated/api/client.py:105
+#, python-format
+msgid "Doing %(method)s on %(relative_url)s"
+msgstr "%(relative_url)s'de %(method)s işlemleri yapılıyor"
+
+#: nova/tests/integrated/api/client.py:107
+#, python-format
+msgid "Body: %s"
+msgstr "Gövde: %s"
+
+#: nova/tests/integrated/api/client.py:125
+#, python-format
+msgid "%(auth_uri)s => code %(http_status)s"
+msgstr "%(auth_uri)s => kodu %(http_status)s"
+
+#: nova/tests/integrated/api/client.py:155
+#, python-format
+msgid "%(relative_uri)s => code %(http_status)s"
+msgstr ""
+
+#: nova/tests/integrated/api/client.py:165
+msgid "Unexpected status code"
+msgstr ""
+
+#: nova/tests/integrated/api/client.py:172
+#, python-format
+msgid "Decoding JSON: %s"
+msgstr ""
+
+#: nova/virt/configdrive.py:77
+#, python-format
+msgid "Added %(filepath)s to config drive"
+msgstr ""
+
+#: nova/virt/firewall.py:176 nova/virt/libvirt/firewall.py:249
+msgid "Attempted to unfilter instance which is not filtered"
+msgstr ""
+
+#: nova/virt/firewall.py:187
+msgid "Filters added to instance"
+msgstr ""
+
+#: nova/virt/firewall.py:189
+msgid "Provider Firewall Rules refreshed"
+msgstr ""
+
+#: nova/virt/firewall.py:357
+#, python-format
+msgid "Adding security group rule: %r"
+msgstr ""
+
+#: nova/virt/firewall.py:489 nova/virt/xenapi/firewall.py:80
+#, python-format
+msgid "Adding provider rule: %s"
+msgstr ""
+
+#: nova/virt/images.py:101
+msgid "'qemu-img info' parsing failed."
+msgstr ""
+
+#: nova/virt/images.py:107
+#, python-format
+msgid "fmt=%(fmt)s backed by: %(backing_file)s"
+msgstr ""
+
+#: nova/virt/images.py:118
+#, python-format
+msgid "Converted to raw, but format is now %s"
+msgstr ""
+
+#: nova/virt/baremetal/dom.py:93
+msgid "No domains exist."
+msgstr ""
+
+#: nova/virt/baremetal/dom.py:95
+#, python-format
+msgid "============= initial domains =========== : %s"
+msgstr ""
+
+#: nova/virt/baremetal/dom.py:99
+msgid "Building domain: to be removed"
+msgstr ""
+
+#: nova/virt/baremetal/dom.py:103
+msgid "Not running domain: remove"
+msgstr ""
+
+#: nova/virt/baremetal/dom.py:111
+msgid "domain running on an unknown node: discarded"
+msgstr ""
+
+#: nova/virt/baremetal/dom.py:127
+#, python-format
+msgid "No such domain (%s)"
+msgstr ""
+
+#: nova/virt/baremetal/dom.py:134
+#, python-format
+msgid "Failed power down Bare-metal node %s"
+msgstr ""
+
+#: nova/virt/baremetal/dom.py:143
+msgid "deactivate -> activate fails"
+msgstr ""
+
+#: nova/virt/baremetal/dom.py:153
+msgid "destroy_domain: no such domain"
+msgstr ""
+
+#: nova/virt/baremetal/dom.py:154
+#, python-format
+msgid "No such domain %s"
+msgstr ""
+
+#: nova/virt/baremetal/dom.py:161
+#, python-format
+msgid "Domains: %s"
+msgstr ""
+
+#: nova/virt/baremetal/dom.py:164
+#, python-format
+msgid "After storing domains: %s"
+msgstr ""
+
+#: nova/virt/baremetal/dom.py:167
+msgid "deactivation/removing domain failed"
+msgstr ""
+
+#: nova/virt/baremetal/dom.py:174
+msgid "===== Domain is being created ====="
+msgstr ""
+
+#: nova/virt/baremetal/dom.py:177
+msgid "Same domain name already exists"
+msgstr ""
+
+#: nova/virt/baremetal/dom.py:179
+msgid "create_domain: before get_idle_node"
+msgstr ""
+
+#: nova/virt/baremetal/dom.py:196
+#, python-format
+msgid "Created new domain: %s"
+msgstr ""
+
+#: nova/virt/baremetal/dom.py:211
+#, python-format
+msgid "Failed to boot Bare-metal node %s"
+msgstr ""
+
+#: nova/virt/baremetal/dom.py:220
+msgid "No such domain exists"
+msgstr ""
+
+#: nova/virt/baremetal/dom.py:224
+#, python-format
+msgid "change_domain_state: to new state %s"
+msgstr ""
+
+#: nova/virt/baremetal/dom.py:231
+#, python-format
+msgid "Stored fake domains to the file: %s"
+msgstr ""
+
+#: nova/virt/baremetal/dom.py:242
+msgid "domain does not exist"
+msgstr ""
+
+#: nova/virt/baremetal/driver.py:117
+#, python-format
+msgid "Error encountered when destroying instance '%(name)s': %(ex)s"
+msgstr ""
+
+#: nova/virt/baremetal/driver.py:131
+#, python-format
+msgid "instance %(instance_name)s: deleting instance files %(target)s"
+msgstr ""
+
+#: nova/virt/baremetal/driver.py:158
+#, python-format
+msgid "instance %s: rebooted"
+msgstr ""
+
+#: nova/virt/baremetal/driver.py:162
+msgid "_wait_for_reboot failed"
+msgstr ""
+
+#: nova/virt/baremetal/driver.py:191
+#, python-format
+msgid "instance %s: rescued"
+msgstr ""
+
+#: nova/virt/baremetal/driver.py:195
+msgid "_wait_for_rescue failed"
+msgstr ""
+
+#: nova/virt/baremetal/driver.py:212
+msgid "<============= spawn of baremetal =============>"
+msgstr ""
+
+#: nova/virt/baremetal/driver.py:225
+#, python-format
+msgid "instance %s: is building"
+msgstr ""
+
+#: nova/virt/baremetal/driver.py:231
+msgid "Key is injected but instance is not running yet"
+msgstr ""
+
+#: nova/virt/baremetal/driver.py:240
+#, python-format
+msgid "instance %s: booted"
+msgstr ""
+
+#: nova/virt/baremetal/driver.py:247
+#, python-format
+msgid "~~~~~~ current state = %s ~~~~~~"
+msgstr ""
+
+#: nova/virt/baremetal/driver.py:249
+#, python-format
+msgid "instance %s spawned successfully"
+msgstr ""
+
+#: nova/virt/baremetal/driver.py:252
+#, python-format
+msgid "instance %s:not booted"
+msgstr ""
+
+#: nova/virt/baremetal/driver.py:255
+msgid "Baremetal assignment is overcommitted."
+msgstr ""
+
+#: nova/virt/baremetal/driver.py:339
+#, python-format
+msgid "instance %s: Creating image"
+msgstr ""
+
+#: nova/virt/baremetal/driver.py:457
+#, python-format
+msgid "instance %(inst_name)s: injecting %(injection)s into image %(img_id)s"
+msgstr ""
+
+#: nova/virt/baremetal/driver.py:467
+#, python-format
+msgid ""
+"instance %(inst_name)s: ignoring error injecting data into image %(img_id)s "
+"(%(e)s)"
+msgstr ""
+
+#: nova/virt/baremetal/driver.py:513
+#, python-format
+msgid "instance %s: starting toXML method"
+msgstr ""
+
+#: nova/virt/baremetal/driver.py:516
+#, python-format
+msgid "instance %s: finished toXML method"
+msgstr ""
+
+#: nova/virt/baremetal/driver.py:560 nova/virt/hyperv/vmops.py:486
+#: nova/virt/libvirt/driver.py:1987
+msgid ""
+"Cannot get the number of cpu, because this function is not implemented for "
+"this platform. This error can be safely ignored for now."
+msgstr ""
+
+#: nova/virt/baremetal/driver.py:683
+#, python-format
+msgid "#### RLK: cpu_arch = %s "
+msgstr ""
+
+#: nova/virt/baremetal/driver.py:700
+msgid "Updating!"
+msgstr ""
+
+#: nova/virt/baremetal/driver.py:727 nova/virt/libvirt/driver.py:3033
+#: nova/virt/xenapi/host.py:148
+msgid "Updating host stats"
+msgstr ""
+
+#: nova/virt/baremetal/nodes.py:42
+#, python-format
+msgid "Unknown baremetal driver %(d)s"
+msgstr ""
+
+#: nova/virt/baremetal/tilera.py:184
+msgid "free_node...."
+msgstr ""
+
+#: nova/virt/baremetal/tilera.py:215
+#, python-format
+msgid "deactivate_node is called for node_id = %(id)s node_ip = %(ip)s"
+msgstr ""
+
+#: nova/virt/baremetal/tilera.py:220
+msgid "status of node is set to 0"
+msgstr ""
+
+#: nova/virt/baremetal/tilera.py:231
+msgid "rootfs is already removed"
+msgstr ""
+
+#: nova/virt/baremetal/tilera.py:263
+msgid "Before ping to the bare-metal node"
+msgstr ""
+
+#: nova/virt/baremetal/tilera.py:274
+#, python-format
+msgid "TILERA_BOARD_#%(node_id)s %(node_ip)s is ready"
+msgstr ""
+
+#: nova/virt/baremetal/tilera.py:278
+#, python-format
+msgid ""
+"TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready, out_msg=%(out_msg)s"
+msgstr ""
+
+#: nova/virt/baremetal/tilera.py:290
+msgid "Noting to do for tilera nodes: vmlinux is in CF"
+msgstr ""
+
+#: nova/virt/baremetal/tilera.py:313
+msgid "activate_node"
+msgstr ""
+
+#: nova/virt/baremetal/tilera.py:327
+msgid "Node is unknown error state."
+msgstr ""
+
+#: nova/virt/disk/api.py:198
+msgid "no capable image handler configured"
+msgstr ""
+
+#: nova/virt/disk/api.py:245
+#, python-format
+msgid "no disk image handler for: %s"
+msgstr ""
+
+#: nova/virt/disk/api.py:257
+msgid "image already mounted"
+msgstr ""
+
+#: nova/virt/disk/api.py:323
+#, python-format
+msgid ""
+"Failed to mount container filesystem '%(image)s' on '%(target)s': %(errors)s"
+msgstr ""
+
+#: nova/virt/disk/api.py:340
+#, python-format
+msgid "Failed to unmount container filesystem: %s"
+msgstr ""
+
+#: nova/virt/disk/api.py:373
+msgid "injected file path not valid"
+msgstr ""
+
+#: nova/virt/disk/api.py:518
+msgid "Not implemented on Windows"
+msgstr ""
+
+#: nova/virt/disk/api.py:552
+#, python-format
+msgid "User %(username)s not found in password file."
+msgstr ""
+
+#: nova/virt/disk/api.py:568
+#, python-format
+msgid "User %(username)s not found in shadow file."
+msgstr ""
+
+#: nova/virt/disk/guestfs.py:41
+#, python-format
+msgid "unsupported partition: %s"
+msgstr ""
+
+#: nova/virt/disk/guestfs.py:66 nova/virt/disk/guestfs.py:80
+#: nova/virt/disk/mount.py:132
+#, python-format
+msgid "Failed to mount filesystem: %s"
+msgstr ""
+
+#: nova/virt/disk/guestfs.py:79
+msgid "unknown guestmount error"
+msgstr ""
+
+#: nova/virt/disk/guestfs.py:119
+#, python-format
+msgid "Failed to umount image at %s, guestmount was still running after 10s"
+msgstr ""
+
+#: nova/virt/disk/loop.py:31
+#, python-format
+msgid "Could not attach image to loopback: %s"
+msgstr ""
+
+#: nova/virt/disk/mount.py:83
+#, python-format
+msgid "partition search unsupported with %s"
+msgstr ""
+
+#: nova/virt/disk/mount.py:99
+#, python-format
+msgid "partition %s not found"
+msgstr ""
+
+#: nova/virt/disk/mount.py:100
+#, python-format
+msgid "Failed to map partitions: %s"
+msgstr ""
+
+#: nova/virt/disk/nbd.py:59
+msgid "nbd unavailable: module not loaded"
+msgstr ""
+
+#: nova/virt/disk/nbd.py:64
+msgid "No free nbd devices"
+msgstr ""
+
+#: nova/virt/disk/nbd.py:86
+#, python-format
+msgid "qemu-nbd error: %s"
+msgstr ""
+
+#: nova/virt/disk/nbd.py:98
+#, python-format
+msgid "nbd device %s did not show up"
+msgstr ""
+
+#: nova/virt/hyperv/driver.py:189 nova/virt/hyperv/driver.py:192
+msgid "plug_vifs called"
+msgstr ""
+
+#: nova/virt/hyperv/driver.py:195
+msgid "ensure_filtering_rules_for_instance called"
+msgstr ""
+
+#: nova/virt/hyperv/driver.py:200
+msgid "unfilter_instance called"
+msgstr ""
+
+#: nova/virt/hyperv/driver.py:204
+msgid "confirm_migration called"
+msgstr ""
+
+#: nova/virt/hyperv/driver.py:209
+msgid "finish_revert_migration called"
+msgstr ""
+
+#: nova/virt/hyperv/driver.py:215
+msgid "finish_migration called"
+msgstr ""
+
+#: nova/virt/hyperv/driver.py:218
+msgid "get_console_output called"
+msgstr ""
+
+#: nova/virt/hyperv/livemigrationops.py:52
+msgid ""
+"Live migration is not supported \" \"by this version of "
+"Hyper-V"
+msgstr ""
+
+#: nova/virt/hyperv/livemigrationops.py:61
+msgid "Live migration is not enabled on this host"
+msgstr ""
+
+#: nova/virt/hyperv/livemigrationops.py:64
+msgid "Live migration networks are not configured on this host"
+msgstr ""
+
+#: nova/virt/hyperv/livemigrationops.py:68
+msgid "live_migration called"
+msgstr ""
+
+#: nova/virt/hyperv/livemigrationops.py:94
+#, python-format
+msgid "Getting live migration networks for remote host: %s"
+msgstr ""
+
+#: nova/virt/hyperv/livemigrationops.py:113
+#, python-format
+msgid "Starting live migration for instance: %s"
+msgstr ""
+
+#: nova/virt/hyperv/livemigrationops.py:126
+#, python-format
+msgid "Failed to live migrate VM %s"
+msgstr ""
+
+#: nova/virt/hyperv/livemigrationops.py:129
+#, python-format
+msgid "Calling live migration recover_method for instance: %s"
+msgstr ""
+
+#: nova/virt/hyperv/livemigrationops.py:133
+#, python-format
+msgid "Calling live migration post_method for instance: %s"
+msgstr ""
+
+#: nova/virt/hyperv/livemigrationops.py:139
+msgid "pre_live_migration called"
+msgstr ""
+
+#: nova/virt/hyperv/livemigrationops.py:157
+msgid "post_live_migration_at_destination called"
+msgstr ""
+
+#: nova/virt/hyperv/livemigrationops.py:161
+#, python-format
+msgid "compare_cpu called %s"
+msgstr ""
+
+#: nova/virt/hyperv/snapshotops.py:57
+#, python-format
+msgid "Creating snapshot for instance %s"
+msgstr ""
+
+#: nova/virt/hyperv/snapshotops.py:71
+#, python-format
+msgid "Failed to create snapshot for VM %s"
+msgstr ""
+
+#: nova/virt/hyperv/snapshotops.py:83
+#, python-format
+msgid "Getting info for VHD %s"
+msgstr ""
+
+#: nova/virt/hyperv/snapshotops.py:106
+#, python-format
+msgid "Copying VHD %(src_vhd_path)s to %(dest_vhd_path)s"
+msgstr ""
+
+#: nova/virt/hyperv/snapshotops.py:116
+#, python-format
+msgid "Copying base disk %(src_vhd_path)s to %(dest_base_disk_path)s"
+msgstr ""
+
+#: nova/virt/hyperv/snapshotops.py:120
+#, python-format
+msgid ""
+"Reconnecting copied base VHD %(dest_base_disk_path)s and diff VHD "
+"%(dest_vhd_path)s"
+msgstr ""
+
+#: nova/virt/hyperv/snapshotops.py:134
+#, python-format
+msgid ""
+"Failed to reconnect base disk %(dest_base_disk_path)s and diff disk "
+"%(dest_vhd_path)s"
+msgstr ""
+
+#: nova/virt/hyperv/snapshotops.py:139
+#, python-format
+msgid ""
+"Merging base disk %(dest_base_disk_path)s and diff disk %(dest_vhd_path)s"
+msgstr ""
+
+#: nova/virt/hyperv/snapshotops.py:151
+#, python-format
+msgid ""
+"Failed to merge base disk %(dest_base_disk_path)s and diff disk "
+"%(dest_vhd_path)s"
+msgstr ""
+
+#: nova/virt/hyperv/snapshotops.py:164
+#, python-format
+msgid ""
+"Updating Glance image %(image_id)s with content from merged disk "
+"%(image_vhd_path)s"
+msgstr ""
+
+#: nova/virt/hyperv/snapshotops.py:169
+#, python-format
+msgid "Snapshot image %(image_id)s updated for VM %(instance_name)s"
+msgstr ""
+
+#: nova/virt/hyperv/snapshotops.py:172
+#, python-format
+msgid "Removing snapshot %s"
+msgstr ""
+
+#: nova/virt/hyperv/snapshotops.py:181
+#, python-format
+msgid "Failed to remove snapshot for VM %s"
+msgstr ""
+
+#: nova/virt/hyperv/snapshotops.py:186
+#, python-format
+msgid "Removing folder %s "
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:69
+msgid "get_info called for instance"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:93
+#, python-format
+msgid "hyperv vm state: %s"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:99
+#, python-format
+msgid ""
+"Got Info for vm %(instance_name)s: state=%(state)s, mem=%(memusage)s, "
+"num_cpu=%(numprocs)s, uptime=%(uptime)s"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:135
+#, python-format
+msgid "cache image failed: %s"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:154
+#, python-format
+msgid "Starting VM %s "
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:156
+#, python-format
+msgid "Started VM %s "
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:158
+#, python-format
+msgid "spawn vm failed: %s"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:177
+#, python-format
+msgid "Failed to create VM %s"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:180
+#, python-format
+msgid "Created VM %s..."
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:197
+#, python-format
+msgid "Set memory for vm %s..."
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:210
+#, python-format
+msgid "Set vcpus for vm %s..."
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:214
+#, python-format
+msgid "Creating a scsi controller for %(vm_name)s for volume attaching"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:223
+msgid "Controller not found"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:231
+#, python-format
+msgid "Failed to add scsi controller to VM %s"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:236
+#, python-format
+msgid "Creating disk for %(vm_name)s by attaching disk file %(vhdfile)s"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:263
+#, python-format
+msgid "Failed to add diskdrive to VM %s"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:266
+#, python-format
+msgid "New disk drive path is %s"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:285
+#, python-format
+msgid "Failed to add vhd file to VM %s"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:287
+#, python-format
+msgid "Created disk for %s"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:291
+#, python-format
+msgid "Creating nic for %s "
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:296
+msgid "Cannot find vSwitch"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:316
+msgid "Failed creating a port on the external vswitch"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:317
+#, python-format
+msgid "Failed creating port for %s"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:320
+#, python-format
+msgid "Created switch port %(vm_name)s on switch %(ext_path)s"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:332
+#, python-format
+msgid "Failed to add nic to VM %s"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:334
+#, python-format
+msgid "Created nic for %s "
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:341 nova/virt/hyperv/vmops.py:344
+#, python-format
+msgid "Attempting to bind NIC to %s "
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:349
+msgid "No vSwitch specified, attaching to default"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:374
+#, python-format
+msgid "Got request to destroy vm %s"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:418
+#, python-format
+msgid "Del: disk %(vhdfile)s vm %(instance_name)s"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:424
+msgid "Pause instance"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:429
+msgid "Unpause instance"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:435
+msgid "Suspend instance"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:440
+msgid "Resume instance"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:445
+msgid "Power off instance"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:450
+msgid "Power on instance"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:470
+#, python-format
+msgid "Successfully changed vm state of %(vm_name)s to %(req_state)s"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:555
+#, python-format
+msgid "Windows version: %s "
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:567
+msgid "get_available_resource called"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:604
+#, python-format
+msgid "use_cow_image:%s"
+msgstr ""
+
+#: nova/virt/hyperv/vmops.py:624
+#, python-format
+msgid "Failed to create Difference Disk from %(base)s to %(target)s"
+msgstr ""
+
+#: nova/virt/hyperv/vmutils.py:54
+#, python-format
+msgid "duplicate name found: %s"
+msgstr ""
+
+#: nova/virt/hyperv/vmutils.py:68
+#, python-format
+msgid ""
+"WMI job failed: %(ErrorSummaryDescription)s - %(ErrorDescription)s - "
+"%(ErrorCode)s"
+msgstr ""
+
+#: nova/virt/hyperv/vmutils.py:73
+#, python-format
+msgid "WMI job succeeded: %(desc)s, Elapsed=%(elap)s "
+msgstr ""
+
+#: nova/virt/hyperv/vmutils.py:80 nova/virt/hyperv/vmutils.py:96
+#, python-format
+msgid "Creating folder %s "
+msgstr ""
+
+#: nova/virt/hyperv/vmutils.py:94
+#, python-format
+msgid "Removing existing folder %s "
+msgstr ""
+
+#: nova/virt/hyperv/volumeops.py:69 nova/virt/xenapi/vm_utils.py:483
+#, python-format
+msgid "block device info: %s"
+msgstr ""
+
+#: nova/virt/hyperv/volumeops.py:97
+#, python-format
+msgid "Attach boot from volume failed: %s"
+msgstr ""
+
+#: nova/virt/hyperv/volumeops.py:100
+#, python-format
+msgid "Unable to attach boot volume to instance %s"
+msgstr ""
+
+#: nova/virt/hyperv/volumeops.py:109 nova/virt/xenapi/volumeops.py:115
+#, python-format
+msgid "Attach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s"
+msgstr ""
+
+#: nova/virt/hyperv/volumeops.py:134
+#, python-format
+msgid "Attach volume failed: %s"
+msgstr ""
+
+#: nova/virt/hyperv/volumeops.py:137 nova/virt/xenapi/volumeops.py:191
+#, python-format
+msgid "Unable to attach volume to instance %s"
+msgstr ""
+
+#: nova/virt/hyperv/volumeops.py:156
+#, python-format
+msgid "Failed to add volume to VM %s"
+msgstr ""
+
+#: nova/virt/hyperv/volumeops.py:170
+#, python-format
+msgid "Detach_volume: %(connection_info)s, %(instance_name)s, %(mountpoint)s"
+msgstr ""
+
+#: nova/virt/hyperv/volumeops.py:187
+#, python-format
+msgid "Mounted disk to detach is: %s"
+msgstr ""
+
+#: nova/virt/hyperv/volumeops.py:188
+#, python-format
+msgid "host_resource disk detached is: %s"
+msgstr ""
+
+#: nova/virt/hyperv/volumeops.py:191
+#, python-format
+msgid "Physical disk detached is: %s"
+msgstr ""
+
+#: nova/virt/hyperv/volumeops.py:198
+#, python-format
+msgid "Failed to remove volume from VM %s"
+msgstr ""
+
+#: nova/virt/hyperv/volumeops.py:207 nova/virt/libvirt/driver.py:604
+msgid "Could not determine iscsi initiator name"
+msgstr ""
+
+#: nova/virt/hyperv/volumeops.py:224
+#, python-format
+msgid "device.InitiatorName: %s"
+msgstr ""
+
+#: nova/virt/hyperv/volumeops.py:225
+#, python-format
+msgid "device.TargetName: %s"
+msgstr ""
+
+#: nova/virt/hyperv/volumeops.py:226
+#, python-format
+msgid "device.ScsiPortNumber: %s"
+msgstr ""
+
+#: nova/virt/hyperv/volumeops.py:227
+#, python-format
+msgid "device.ScsiPathId: %s"
+msgstr ""
+
+#: nova/virt/hyperv/volumeops.py:228
+#, python-format
+msgid "device.ScsiTargetId): %s"
+msgstr ""
+
+#: nova/virt/hyperv/volumeops.py:229
+#, python-format
+msgid "device.ScsiLun: %s"
+msgstr ""
+
+#: nova/virt/hyperv/volumeops.py:230
+#, python-format
+msgid "device.DeviceInterfaceGuid :%s"
+msgstr ""
+
+#: nova/virt/hyperv/volumeops.py:232
+#, python-format
+msgid "device.DeviceInterfaceName: %s"
+msgstr ""
+
+#: nova/virt/hyperv/volumeops.py:234
+#, python-format
+msgid "device.LegacyName: %s"
+msgstr ""
+
+#: nova/virt/hyperv/volumeops.py:235
+#, python-format
+msgid "device.DeviceType: %s"
+msgstr ""
+
+#: nova/virt/hyperv/volumeops.py:236
+#, python-format
+msgid "device.DeviceNumber %s"
+msgstr ""
+
+#: nova/virt/hyperv/volumeops.py:237
+#, python-format
+msgid "device.PartitionNumber :%s"
+msgstr ""
+
+#: nova/virt/hyperv/volumeops.py:243 nova/virt/hyperv/volumeops.py:262
+#, python-format
+msgid "Unable to find a mounted disk for target_iqn: %s"
+msgstr ""
+
+#: nova/virt/hyperv/volumeops.py:245
+#, python-format
+msgid "Device number : %s"
+msgstr ""
+
+#: nova/virt/hyperv/volumeops.py:246
+#, python-format
+msgid "Target lun : %s"
+msgstr ""
+
+#: nova/virt/hyperv/volumeops.py:252 nova/virt/hyperv/volumeops.py:259
+#, python-format
+msgid "Mounted disk is: %s"
+msgstr ""
+
+#: nova/virt/hyperv/volumeops.py:276
+#, python-format
+msgid "Drive number to disconnect is: %s"
+msgstr ""
+
+#: nova/virt/hyperv/volumeops.py:283
+#, python-format
+msgid "DeviceNumber : %s"
+msgstr ""
+
+#: nova/virt/hyperv/volumeops.py:288
+#, python-format
+msgid "Disk path to parse: %s"
+msgstr ""
+
+#: nova/virt/hyperv/volumeops.py:290
+#, python-format
+msgid "start_device_id: %s"
+msgstr ""
+
+#: nova/virt/hyperv/volumeops.py:292
+#, python-format
+msgid "end_device_id: %s"
+msgstr ""
+
+#: nova/virt/hyperv/volumeutils.py:51
+#, python-format
+msgid "An error has occurred when calling the iscsi initiator: %s"
+msgstr ""
+
+#: nova/virt/hyperv/volumeutils.py:68
+msgid "The ISCSI initiator name can't be found. Choosing the default one"
+msgstr ""
+
+#: nova/virt/hyperv/volumeutils.py:121 nova/virt/libvirt/driver.py:1463
+#: nova/virt/xenapi/vm_utils.py:476
+#, python-format
+msgid "block_device_list %s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:333
+#, python-format
+msgid ""
+"Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:339
+#, python-format
+msgid "Connecting to libvirt: %s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:360
+msgid "Connection to libvirt broke"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:382 nova/virt/libvirt/driver.py:385
+#, python-format
+msgid "Can not handle authentication request for %d credentials"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:467
+#, python-format
+msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:481
+msgid "During wait destroy, instance disappeared."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:486
+msgid "Instance destroyed successfully."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:508
+msgid "Error from libvirt during undefineFlags. Retrying with undefine"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:523
+#, python-format
+msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:536
+#, python-format
+msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:553
+#, python-format
+msgid "Deleting instance files %(target)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:567
+#, python-format
+msgid "Failed to cleanup directory %(target)s: %(e)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:729
+msgid "During detach_volume, instance disappeared."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:739
+msgid "attaching LXC block device"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:752
+msgid "detaching LXC block device"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:884
+msgid "Instance soft rebooted successfully."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:888
+msgid "Failed to soft reboot instance."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:920
+msgid "Instance shutdown successfully."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:955
+msgid "Instance rebooted successfully."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:1085
+msgid "Instance is running"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:1092 nova/virt/powervm/operator.py:253
+msgid "Instance spawned successfully."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:1108
+#, python-format
+msgid "data: %(data)r, fpath: %(fpath)r"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:1154
+msgid "Guest does not have a console available"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:1198
+#, python-format
+msgid "Path '%(path)s' supports direct I/O"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:1202
+#, python-format
+msgid "Path '%(path)s' does not support direct I/O: '%(ex)s'"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:1206 nova/virt/libvirt/driver.py:1210
+#, python-format
+msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:1276
+msgid "Creating image"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:1402
+msgid "Using config drive"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:1412
+#, python-format
+msgid "Creating config drive at %(path)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:1426
+#, python-format
+msgid "Injecting %(injection)s into image %(img_id)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:1436
+#, python-format
+msgid "Ignoring error injecting data into image %(img_id)s (%(e)s)"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:1510
+#, python-format
+msgid ""
+"Config requested an explicit CPU model, but the current libvirt hypervisor "
+"'%s' does not support selecting CPU models"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:1516
+msgid "Config requested a custom CPU model, but no model name was provided"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:1520
+msgid "A CPU model name should not be set when a host CPU model is requested"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:1524
+#, python-format
+msgid "CPU mode '%(mode)s' model '%(model)s' was chosen"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:1540
+msgid ""
+"Passthrough of the host CPU was requested but this libvirt version does not "
+"support this feature"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:1832
+msgid "Starting toXML method"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:1836
+msgid "Finished toXML method"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:1853
+#, python-format
+msgid ""
+"Error from libvirt while looking up %(instance_name)s: [Error Code "
+"%(error_code)s] %(ex)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:2105
+msgid "libvirt version is too old (does not support getVersion)"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:2292
+msgid "Block migration can not be used with shared storage."
+msgstr "Blok göçü paylaşılan hafıza ile kullanılamaz."
+
+#: nova/virt/libvirt/driver.py:2300
+msgid "Live migration can not be used without shared storage."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:2340
+#, python-format
+msgid ""
+"Unable to migrate %(instance_uuid)s: Disk of instance is too large(available"
+" on destination host:%(available)s < need:%(necessary)s)"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:2360
+#, python-format
+msgid ""
+"Instance launched has CPU info:\n"
+"%s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:2372
+#, python-format
+msgid ""
+"CPU doesn't have compatibility.\n"
+"\n"
+"%(ret)s\n"
+"\n"
+"Refer to %(u)s"
+msgstr "CPU uyumluluğu yok. \\n \\n %(ret)s \\n \\n Bkz: %(u)s"
+
+#: nova/virt/libvirt/driver.py:2389
+#, python-format
+msgid ""
+"Creating tmpfile %s to notify to other compute nodes that they should mount "
+"the same storage."
+msgstr "Diğer hesaplama düğümlerini haberdar etmek için %s geçici dosyası oluşturuluyor. Düğümler aynı belleğe bağlanmalıdır."
+
+#: nova/virt/libvirt/driver.py:2437
+#, python-format
+msgid "The firewall filter for %s does not exist"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:2507
+#, python-format
+msgid "Live Migration failure: %(e)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:2551
+#, python-format
+msgid "plug_vifs() failed %(cnt)d.Retry up to %(max_retry)d for %(hostname)s."
+msgstr "plug_vifs() %(cnt)d kez başarısız oldu. %(hostname)s için %(max_retry)d denemeye kadar tekrar deneniyor."
+
+#: nova/virt/libvirt/driver.py:2678
+#, python-format
+msgid "skipping %(path)s since it looks like volume"
+msgstr "Birim gibi göründüğünden beri %(path)s atlanıyor"
+
+#: nova/virt/libvirt/driver.py:2727
+#, python-format
+msgid "Getting disk size of %(i_name)s: %(e)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:2789
+msgid "Starting migrate_disk_and_power_off"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:2848
+msgid "Instance running successfully."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:2855
+msgid "Starting finish_migration"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:2906
+msgid "Starting finish_revert_migration"
+msgstr ""
+
+#: nova/virt/libvirt/firewall.py:34
+msgid ""
+"Libvirt module could not be loaded. NWFilterFirewall will not work "
+"correctly."
+msgstr "Libvirt modülü yüklenemedi. Ağ filtre güvenlik duvarı doğru çalışmıyor."
+
+#: nova/virt/libvirt/firewall.py:102
+msgid "Called setup_basic_filtering in nwfilter"
+msgstr "Ağ filtresinde temel filtreleme kurulumu çağırıldı"
+
+#: nova/virt/libvirt/firewall.py:110
+msgid "Ensuring static filters"
+msgstr "Durağan filtreler koruma altına alındı"
+
+#: nova/virt/libvirt/firewall.py:191
+#, python-format
+msgid "The nwfilter(%(instance_filter_name)s) is not found."
+msgstr "(%(instance_filter_name)s) ağ filtresi bulunamadı."
+
+#: nova/virt/libvirt/firewall.py:214
+#, python-format
+msgid "The nwfilter(%(instance_filter_name)s) for%(name)s is not found."
+msgstr "%(name)s için (%(instance_filter_name)s) ağ filtresi bulunamadı."
+
+#: nova/virt/libvirt/firewall.py:230
+msgid "iptables firewall: Setup Basic Filtering"
+msgstr "iptabloları güvenlik duvarı: Temel Filtreleme Kurulumu"
+
+#: nova/virt/libvirt/imagebackend.py:213
+msgid "You should specify libvirt_images_volume_group flag to use LVM images."
+msgstr ""
+
+#: nova/virt/libvirt/imagebackend.py:276
+#, python-format
+msgid "Unknown image_type=%s"
+msgstr ""
+
+#: nova/virt/libvirt/imagecache.py:164
+#, python-format
+msgid "%s is a valid instance name"
+msgstr "%s geçerli bir örnek ismidir"
+
+#: nova/virt/libvirt/imagecache.py:167
+#, python-format
+msgid "%s has a disk file"
+msgstr "%s bir disk dosyasına sahiptir"
+
+#: nova/virt/libvirt/imagecache.py:169
+#, python-format
+msgid "Instance %(instance)s is backed by %(backing)s"
+msgstr "%(instance)s örneği %(backing)s tarafından desteklenmiştir"
+
+#: nova/virt/libvirt/imagecache.py:182
+#, python-format
+msgid ""
+"Instance %(instance)s is using a backing file %(backing)s which does not "
+"appear in the image service"
+msgstr "%(instance)s örneği imaj servisinde görünmeyen bir %(backing)s destek dosyasını kullanıyor"
+
+#: nova/virt/libvirt/imagecache.py:234
+#, python-format
+msgid "%(id)s (%(base_file)s): image verification failed"
+msgstr ""
+
+#: nova/virt/libvirt/imagecache.py:244
+#, python-format
+msgid "%(id)s (%(base_file)s): image verification skipped, no hash stored"
+msgstr ""
+
+#: nova/virt/libvirt/imagecache.py:263
+#, python-format
+msgid "Cannot remove %(base_file)s, it does not exist"
+msgstr ""
+
+#: nova/virt/libvirt/imagecache.py:275
+#, python-format
+msgid "Base file too young to remove: %s"
+msgstr "Temel dosya kaldırmak için çok yeni: %s"
+
+#: nova/virt/libvirt/imagecache.py:278
+#, python-format
+msgid "Removing base file: %s"
+msgstr "Temel dosya kaldırılıyor: %s"
+
+#: nova/virt/libvirt/imagecache.py:285
+#, python-format
+msgid "Failed to remove %(base_file)s, error was %(error)s"
+msgstr "%(base_file)s dosyasını kaldırma başarısız, %(error)s"
+
+#: nova/virt/libvirt/imagecache.py:296
+#, python-format
+msgid "%(id)s (%(base_file)s): checking"
+msgstr ""
+
+#: nova/virt/libvirt/imagecache.py:315
+#, python-format
+msgid ""
+"%(id)s (%(base_file)s): in use: on this node %(local)d local, %(remote)d on "
+"other nodes"
+msgstr ""
+
+#: nova/virt/libvirt/imagecache.py:327
+#, python-format
+msgid ""
+"%(id)s (%(base_file)s): warning -- an absent base file is in use! instances:"
+" %(instance_list)s"
+msgstr ""
+
+#: nova/virt/libvirt/imagecache.py:335
+#, python-format
+msgid "%(id)s (%(base_file)s): in use on (%(remote)d on other nodes)"
+msgstr ""
+
+#: nova/virt/libvirt/imagecache.py:345
+#, python-format
+msgid "%(id)s (%(base_file)s): image is not in use"
+msgstr ""
+
+#: nova/virt/libvirt/imagecache.py:351
+#, python-format
+msgid "%(id)s (%(base_file)s): image is in use"
+msgstr ""
+
+#: nova/virt/libvirt/imagecache.py:374
+#, python-format
+msgid "Skipping verification, no base directory at %s"
+msgstr "Doğrulama atlanıyor, %s'de temel dizin yok"
+
+#: nova/virt/libvirt/imagecache.py:378
+msgid "Verify base images"
+msgstr "Temel imajları doğrulayın"
+
+#: nova/virt/libvirt/imagecache.py:385
+#, python-format
+msgid "Image id %(id)s yields fingerprint %(fingerprint)s"
+msgstr ""
+
+#: nova/virt/libvirt/imagecache.py:403
+#, python-format
+msgid "Unknown base file: %s"
+msgstr "Bilinmeyen temel dosya: %s"
+
+#: nova/virt/libvirt/imagecache.py:408
+#, python-format
+msgid "Active base files: %s"
+msgstr "Aktif temel dosyalar: %s"
+
+#: nova/virt/libvirt/imagecache.py:411
+#, python-format
+msgid "Corrupt base files: %s"
+msgstr "Bozuk temel dosyalar: %s"
+
+#: nova/virt/libvirt/imagecache.py:415
+#, python-format
+msgid "Removable base files: %s"
+msgstr "Taşınabilir temel dosyalar: %s"
+
+#: nova/virt/libvirt/imagecache.py:423
+msgid "Verification complete"
+msgstr "Doğrulama tamamlandı"
+
+#: nova/virt/libvirt/snapshots.py:83 nova/virt/libvirt/snapshots.py:86
+#: nova/virt/libvirt/snapshots.py:89
+msgid "LVM snapshots not implemented"
+msgstr ""
+
+#: nova/virt/libvirt/utils.py:131
+#, python-format
+msgid ""
+"Insufficient Space on Volume Group %(vg)s. Only %(free_space)db available, "
+"but %(size)db required by volume %(lv)s."
+msgstr ""
+
+#: nova/virt/libvirt/utils.py:140
+#, python-format
+msgid ""
+"Volume group %(vg)s will not be able to hold sparse volume %(lv)s. Virtual "
+"volume size is %(size)db, but free space on volume group is only "
+"%(free_space)db."
+msgstr ""
+
+#: nova/virt/libvirt/utils.py:187
+#, python-format
+msgid "Path %s must be LVM logical volume"
+msgstr ""
+
+#: nova/virt/libvirt/utils.py:410
+msgid "Can't retrieve root device path from instance libvirt configuration"
+msgstr ""
+
+#: nova/virt/libvirt/utils.py:499
+#, python-format
+msgid "Reading image info file: %s"
+msgstr ""
+
+#: nova/virt/libvirt/utils.py:503
+#, python-format
+msgid "Read: %s"
+msgstr ""
+
+#: nova/virt/libvirt/utils.py:509
+#, python-format
+msgid "Error reading image info file %(filename)s: %(error)s"
+msgstr ""
+
+#: nova/virt/libvirt/utils.py:533
+#, python-format
+msgid "Writing image info file: %s"
+msgstr ""
+
+#: nova/virt/libvirt/utils.py:534
+#, python-format
+msgid "Wrote: %s"
+msgstr ""
+
+#: nova/virt/libvirt/vif.py:95
+#, python-format
+msgid "Ensuring vlan %(vlan)s and bridge %(bridge)s"
+msgstr "%(vlan)s vlan ve %(bridge)s köprüsü koruma altına alınıyor"
+
+#: nova/virt/libvirt/vif.py:105
+#, python-format
+msgid "Ensuring bridge %s"
+msgstr "%s köprüsü koruma altına alınıyor"
+
+#: nova/virt/libvirt/vif.py:181 nova/virt/libvirt/vif.py:247
+#: nova/virt/libvirt/vif.py:307
+msgid "Failed while unplugging vif"
+msgstr ""
+
+#: nova/virt/libvirt/volume.py:174
+#, python-format
+msgid "iSCSI device not found at %s"
+msgstr "%s'de iSCSI cihazı bulunamadı"
+
+#: nova/virt/libvirt/volume.py:177
+#, python-format
+msgid ""
+"ISCSI volume not yet found at: %(mount_device)s. Will rescan & retry. Try "
+"number: %(tries)s"
+msgstr ""
+
+#: nova/virt/libvirt/volume.py:189
+#, python-format
+msgid "Found iSCSI node %(mount_device)s (after %(tries)s rescans)"
+msgstr ""
+
+#: nova/virt/libvirt/volume_nfs.py:81 nova/volume/nfs.py:276
+#, python-format
+msgid "%s is already mounted"
+msgstr ""
+
+#: nova/virt/powervm/common.py:54
+msgid "Connection error connecting PowerVM manager"
+msgstr ""
+
+#: nova/virt/powervm/common.py:111
+msgid "File transfer to PowerVM manager failed"
+msgstr ""
+
+#: nova/virt/powervm/exception.py:21
+msgid "Connection to PowerVM manager failed"
+msgstr ""
+
+#: nova/virt/powervm/exception.py:25
+msgid "File '%(file_path)' transfer to PowerVM manager failed"
+msgstr ""
+
+#: nova/virt/powervm/exception.py:29
+#, python-format
+msgid "LPAR instance '%(instance_name)s' could not be found"
+msgstr ""
+
+#: nova/virt/powervm/exception.py:33
+#, python-format
+msgid "LPAR instance '%(instance_name)s' creation failed"
+msgstr ""
+
+#: nova/virt/powervm/exception.py:37
+msgid "No space left on any volume group"
+msgstr ""
+
+#: nova/virt/powervm/exception.py:45
+#, python-format
+msgid "Operation '%(operation)s' on LPAR '%(instance_name)s' timed out"
+msgstr ""
+
+#: nova/virt/powervm/exception.py:50
+msgid "Image creation failed on PowerVM"
+msgstr ""
+
+#: nova/virt/powervm/exception.py:54
+#, python-format
+msgid ""
+"Insufficient free memory on PowerVM system to spawn instance "
+"'%(instance_name)s'"
+msgstr ""
+
+#: nova/virt/powervm/exception.py:59
+#, python-format
+msgid ""
+"Insufficient available CPUs on PowerVM system to spawn instance "
+"'%(instance_name)s'"
+msgstr ""
+
+#: nova/virt/powervm/exception.py:64
+#, python-format
+msgid "PowerVM LPAR instance '%(instance_name)s' cleanup failed"
+msgstr ""
+
+#: nova/virt/powervm/operator.py:91
+#, python-format
+msgid "LPAR instance '%s' not found"
+msgstr ""
+
+#: nova/virt/powervm/operator.py:174
+msgid "Not enough free memory in the host"
+msgstr ""
+
+#: nova/virt/powervm/operator.py:184
+msgid "Insufficient available CPU on PowerVM"
+msgstr ""
+
+#: nova/virt/powervm/operator.py:208
+#, python-format
+msgid "Creating LPAR instance '%s'"
+msgstr ""
+
+#: nova/virt/powervm/operator.py:211
+#, python-format
+msgid "LPAR instance '%s' creation failed"
+msgstr ""
+
+#: nova/virt/powervm/operator.py:221
+#, python-format
+msgid "Fetching image '%s' from glance"
+msgstr ""
+
+#: nova/virt/powervm/operator.py:225
+#, python-format
+msgid "Copying image '%s' to IVM"
+msgstr ""
+
+#: nova/virt/powervm/operator.py:230
+msgid "Creating logical volume"
+msgstr ""
+
+#: nova/virt/powervm/operator.py:235
+#, python-format
+msgid "Copying image to the device '%s'"
+msgstr ""
+
+#: nova/virt/powervm/operator.py:238
+#, python-format
+msgid "PowerVM image creation failed: %s"
+msgstr ""
+
+#: nova/virt/powervm/operator.py:244
+#, python-format
+msgid "Activating the LPAR instance '%s'"
+msgstr ""
+
+#: nova/virt/powervm/operator.py:258
+#, python-format
+msgid "Instance '%s' failed to boot"
+msgstr ""
+
+#: nova/virt/powervm/operator.py:275
+#, python-format
+msgid "During destroy, LPAR instance '%s' was not found on PowerVM system."
+msgstr ""
+
+#: nova/virt/powervm/operator.py:284
+#, python-format
+msgid "Shutting down the instance '%s'"
+msgstr ""
+
+#: nova/virt/powervm/operator.py:288
+#, python-format
+msgid "Removing the logical volume '%s'"
+msgstr ""
+
+#: nova/virt/powervm/operator.py:291
+#, python-format
+msgid "Deleting the LPAR instance '%s'"
+msgstr ""
+
+#: nova/virt/powervm/operator.py:294
+msgid "PowerVM instance cleanup failed"
+msgstr ""
+
+#: nova/virt/powervm/operator.py:495
+msgid "Could not create logical volume. No space left on any volume group."
+msgstr ""
+
+#: nova/virt/powervm/operator.py:554
+msgid "Unable to get checksum"
+msgstr ""
+
+#: nova/virt/powervm/operator.py:557
+msgid "Image checksums do not match"
+msgstr ""
+
+#: nova/virt/powervm/operator.py:582
+msgid "Uncompressed image file not found"
+msgstr ""
+
+#: nova/virt/vmwareapi/driver.py:111
+msgid ""
+"Must specify vmwareapi_host_ip,vmwareapi_host_username and "
+"vmwareapi_host_password to usecompute_driver=vmwareapi.VMWareESXDriver"
+msgstr ""
+
+#: nova/virt/vmwareapi/driver.py:275
+#, python-format
+msgid "In vmwareapi:_create_session, got this exception: %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/driver.py:358
+#, python-format
+msgid "In vmwareapi:_call_method, got this exception: %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/driver.py:393
+#, python-format
+msgid "Task [%(task_name)s] %(task_ref)s status: success"
+msgstr ""
+
+#: nova/virt/vmwareapi/driver.py:398
+#, python-format
+msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s"
+msgstr ""
+
+#: nova/virt/vmwareapi/driver.py:402
+#, python-format
+msgid "In vmwareapi:_poll_task, Got this error %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/error_util.py:93
+#, python-format
+msgid "Error(s) %s occurred in the call to RetrieveProperties"
+msgstr ""
+
+#: nova/virt/vmwareapi/fake.py:44 nova/virt/xenapi/fake.py:77
+#, python-format
+msgid "%(text)s: _db_content => %(content)s"
+msgstr ""
+
+#: nova/virt/vmwareapi/fake.py:131
+#, python-format
+msgid "Property %(attr)s not set for the managed object %(name)s"
+msgstr ""
+
+#: nova/virt/vmwareapi/fake.py:436
+msgid "There is no VM registered"
+msgstr ""
+
+#: nova/virt/vmwareapi/fake.py:438 nova/virt/vmwareapi/fake.py:608
+#, python-format
+msgid "Virtual Machine with ref %s is not there"
+msgstr ""
+
+#: nova/virt/vmwareapi/fake.py:501
+#, python-format
+msgid "Logging out a session that is invalid or already logged out: %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/fake.py:516
+msgid "Session is faulty"
+msgstr ""
+
+#: nova/virt/vmwareapi/fake.py:519
+msgid "Session Invalid"
+msgstr ""
+
+#: nova/virt/vmwareapi/fake.py:605
+msgid " No Virtual Machine has been registered yet"
+msgstr ""
+
+#: nova/virt/vmwareapi/io_util.py:103
+#, python-format
+msgid "Glance image %s is in killed state"
+msgstr ""
+
+#: nova/virt/vmwareapi/io_util.py:111
+#, python-format
+msgid "Glance image %(image_id)s is in unknown state - %(state)s"
+msgstr ""
+
+#: nova/virt/vmwareapi/network_utils.py:128
+msgid ""
+"ESX SOAP server returned an empty port group for the host system in its "
+"response"
+msgstr ""
+
+#: nova/virt/vmwareapi/network_utils.py:155
+#, python-format
+msgid "Creating Port Group with name %s on the ESX host"
+msgstr ""
+
+#: nova/virt/vmwareapi/network_utils.py:169
+#, python-format
+msgid "Created Port Group with name %s on the ESX host"
+msgstr ""
+
+#: nova/virt/vmwareapi/read_write_util.py:145
+#, python-format
+msgid ""
+"Exception during HTTP connection close in VMWareHTTpWrite. Exception is %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vim.py:84
+msgid "Unable to import suds."
+msgstr ""
+
+#: nova/virt/vmwareapi/vim.py:90
+msgid "Must specify vmwareapi_wsdl_loc"
+msgstr ""
+
+#: nova/virt/vmwareapi/vim.py:145
+#, python-format
+msgid "No such SOAP method '%s' provided by VI SDK"
+msgstr ""
+
+#: nova/virt/vmwareapi/vim.py:150
+#, python-format
+msgid "httplib error in %s: "
+msgstr ""
+
+#: nova/virt/vmwareapi/vim.py:157
+#, python-format
+msgid "Socket error in %s: "
+msgstr ""
+
+#: nova/virt/vmwareapi/vim.py:162
+#, python-format
+msgid "Type error in %s: "
+msgstr ""
+
+#: nova/virt/vmwareapi/vim.py:166
+#, python-format
+msgid "Exception in %s "
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:66
+msgid "Getting list of instances"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:82
+#, python-format
+msgid "Got total of %s instances"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:126
+msgid "Couldn't get a local Datastore reference"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:196
+msgid "Creating VM on the ESX host"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:204
+msgid "Created VM on the ESX host"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:232
+#, python-format
+msgid ""
+"Creating Virtual Disk of size %(vmdk_file_size_in_kb)s KB and adapter type"
+" %(adapter_type)s on the ESX host local store %(data_store_name)s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:250
+#, python-format
+msgid ""
+"Created Virtual Disk of size %(vmdk_file_size_in_kb)s KB on the ESX host "
+"local store %(data_store_name)s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:260
+#, python-format
+msgid ""
+"Deleting the file %(flat_uploaded_vmdk_path)s on the ESX host localstore "
+"%(data_store_name)s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:273
+#, python-format
+msgid ""
+"Deleted the file %(flat_uploaded_vmdk_path)s on the ESX host local store "
+"%(data_store_name)s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:285
+#, python-format
+msgid ""
+"Downloading image file data %(image_ref)s to the ESX data store "
+"%(data_store_name)s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:301
+#, python-format
+msgid ""
+"Downloaded image file data %(image_ref)s to the ESX data store "
+"%(data_store_name)s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:319
+msgid "Reconfiguring VM instance to attach the image disk"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:326
+msgid "Reconfigured VM instance to attach the image disk"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:333
+msgid "Powering on the VM instance"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:339
+msgid "Powered on the VM instance"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:385
+msgid "Creating Snapshot of the VM instance"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:395
+msgid "Created Snapshot of the VM instance"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:438
+msgid "Copying disk data before snapshot of the VM"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:451
+msgid "Copied disk data before snapshot of the VM"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:460
+#, python-format
+msgid "Uploading image %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:474
+#, python-format
+msgid "Uploaded image %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:485
+#, python-format
+msgid "Deleting temporary vmdk file %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:494
+#, python-format
+msgid "Deleted temporary vmdk file %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:526
+msgid "instance is not powered on"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:533
+msgid "Rebooting guest OS of VM"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:536
+msgid "Rebooted guest OS of VM"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:538
+msgid "Doing hard reboot of VM"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:542
+msgid "Did hard reboot of VM"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:554
+msgid "instance not present"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:573
+msgid "Powering off the VM"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:578
+msgid "Powered off the VM"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:582
+msgid "Unregistering the VM"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:585
+msgid "Unregistered the VM"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:587
+#, python-format
+msgid ""
+"In vmwareapi:vmops:destroy, got this exception while un-registering the VM: "
+"%s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:599
+#, python-format
+msgid "Deleting contents of the VM from datastore %(datastore_name)s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:609
+#, python-format
+msgid "Deleted contents of the VM from datastore %(datastore_name)s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:614
+#, python-format
+msgid ""
+"In vmwareapi:vmops:destroy, got this exception while deleting the VM "
+"contents from the disk: %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:623
+msgid "pause not supported for vmwareapi"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:627
+msgid "unpause not supported for vmwareapi"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:641
+msgid "Suspending the VM"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:645
+msgid "Suspended the VM"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:648
+msgid "instance is powered off and can not be suspended."
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:651
+msgid "VM was already in suspended state. So returning without doing anything"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:664
+msgid "Resuming the VM"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:669
+msgid "Resumed the VM"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:671
+msgid "instance is not in a suspended state"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:707
+msgid "get_diagnostics not implemented for vmwareapi"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:765
+#, python-format
+msgid "Reconfiguring VM instance to set the machine id with ip - %(ip_addr)s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:773
+#, python-format
+msgid "Reconfigured VM instance to set the machine id with ip - %(ip_addr)s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:810
+#, python-format
+msgid "Creating directory with path %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:814
+#, python-format
+msgid "Created directory with path %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmware_images.py:91
+#, python-format
+msgid "Downloading image %s from glance image server"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmware_images.py:108
+#, python-format
+msgid "Downloaded image %s from glance image server"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmware_images.py:114
+#, python-format
+msgid "Uploading image %s to the Glance image server"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmware_images.py:137
+#, python-format
+msgid "Uploaded image %s to the Glance image server"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmware_images.py:148
+#, python-format
+msgid "Getting image size for the image %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmware_images.py:153
+#, python-format
+msgid "Got image size of %(size)s for the image %(image)s"
+msgstr ""
+
+#: nova/virt/xenapi/agent.py:80 nova/virt/xenapi/vmops.py:1432
+#, python-format
+msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r"
+msgstr ""
+
+#: nova/virt/xenapi/agent.py:84 nova/virt/xenapi/vmops.py:1436
+#, python-format
+msgid ""
+"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. "
+"args=%(args)r"
+msgstr ""
+
+#: nova/virt/xenapi/agent.py:89 nova/virt/xenapi/vmops.py:1441
+#, python-format
+msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r"
+msgstr ""
+
+#: nova/virt/xenapi/agent.py:99
+#, python-format
+msgid ""
+"The agent call to %(method)s returned an invalid response: %(ret)r. "
+"path=%(path)s; args=%(args)r"
+msgstr ""
+
+#: nova/virt/xenapi/agent.py:109
+#, python-format
+msgid "Failed to query agent version: %(resp)r"
+msgstr ""
+
+#: nova/virt/xenapi/agent.py:121
+msgid "Querying agent version"
+msgstr ""
+
+#: nova/virt/xenapi/agent.py:135
+msgid "Reached maximum time attempting to query agent version"
+msgstr ""
+
+#: nova/virt/xenapi/agent.py:144
+#, python-format
+msgid "Updating agent to %s"
+msgstr ""
+
+#: nova/virt/xenapi/agent.py:151
+#, python-format
+msgid "Failed to update agent: %(resp)r"
+msgstr ""
+
+#: nova/virt/xenapi/agent.py:166
+msgid "Setting admin password"
+msgstr ""
+
+#: nova/virt/xenapi/agent.py:176
+#, python-format
+msgid "Failed to exchange keys: %(resp)r"
+msgstr ""
+
+#: nova/virt/xenapi/agent.py:195
+#, python-format
+msgid "Failed to update password: %(resp)r"
+msgstr ""
+
+#: nova/virt/xenapi/agent.py:203
+#, python-format
+msgid "Injecting file path: %r"
+msgstr ""
+
+#: nova/virt/xenapi/agent.py:215
+#, python-format
+msgid "Failed to inject file: %(resp)r"
+msgstr ""
+
+#: nova/virt/xenapi/agent.py:223
+msgid "Resetting network"
+msgstr ""
+
+#: nova/virt/xenapi/agent.py:228
+#, python-format
+msgid "Failed to reset network: %(resp)r"
+msgstr ""
+
+#: nova/virt/xenapi/agent.py:248
+msgid ""
+"XenServer tools installed in this image are capable of network injection. "
+"Networking files will not bemanipulated"
+msgstr ""
+
+#: nova/virt/xenapi/agent.py:256
+msgid ""
+"XenServer tools are present in this image but are not capable of network "
+"injection"
+msgstr ""
+
+#: nova/virt/xenapi/agent.py:260
+msgid "XenServer tools are not installed in this image"
+msgstr ""
+
+#: nova/virt/xenapi/agent.py:312
+#, python-format
+msgid "OpenSSL error: %s"
+msgstr ""
+
+#: nova/virt/xenapi/driver.py:135
+msgid ""
+"Must specify xenapi_connection_url, xenapi_connection_username (optionally),"
+" and xenapi_connection_password to use compute_driver=xenapi.XenAPIDriver"
+msgstr ""
+
+#: nova/virt/xenapi/driver.py:162
+msgid "Failure while cleaning up attached VDIs"
+msgstr ""
+
+#: nova/virt/xenapi/driver.py:355
+#, python-format
+msgid "Could not determine key: %s"
+msgstr ""
+
+#: nova/virt/xenapi/driver.py:565
+msgid "Host startup on XenServer is not supported."
+msgstr ""
+
+#: nova/virt/xenapi/driver.py:617
+msgid "Unable to log in to XenAPI (is the Dom0 disk full?)"
+msgstr ""
+
+#: nova/virt/xenapi/driver.py:655
+msgid "Host is member of a pool, but DB says otherwise"
+msgstr ""
+
+#: nova/virt/xenapi/driver.py:739 nova/virt/xenapi/driver.py:753
+#, python-format
+msgid "Got exception: %s"
+msgstr ""
+
+#: nova/virt/xenapi/fake.py:669 nova/virt/xenapi/fake.py:771
+#: nova/virt/xenapi/fake.py:790 nova/virt/xenapi/fake.py:858
+msgid "Raising NotImplemented"
+msgstr ""
+
+#: nova/virt/xenapi/fake.py:671
+#, python-format
+msgid "xenapi.fake does not have an implementation for %s"
+msgstr ""
+
+#: nova/virt/xenapi/fake.py:705
+#, python-format
+msgid "Calling %(localname)s %(impl)s"
+msgstr ""
+
+#: nova/virt/xenapi/fake.py:710
+#, python-format
+msgid "Calling getter %s"
+msgstr ""
+
+#: nova/virt/xenapi/fake.py:713
+#, python-format
+msgid "Calling setter %s"
+msgstr ""
+
+#: nova/virt/xenapi/fake.py:773
+#, python-format
+msgid ""
+"xenapi.fake does not have an implementation for %s or it has been called "
+"with the wrong number of arguments"
+msgstr ""
+
+#: nova/virt/xenapi/host.py:70
+#, python-format
+msgid ""
+"Instance %(name)s running on %(host)s could not be found in the database: "
+"assuming it is a worker VM and skip ping migration to a new host"
+msgstr ""
+
+#: nova/virt/xenapi/host.py:156
+#, python-format
+msgid "Unable to get SR for this host: %s"
+msgstr ""
+
+#: nova/virt/xenapi/host.py:190
+#, python-format
+msgid "Failed to extract instance support from %s"
+msgstr ""
+
+#: nova/virt/xenapi/host.py:207
+msgid "Unable to get updated status"
+msgstr ""
+
+#: nova/virt/xenapi/host.py:210
+#, python-format
+msgid "The call to %(method)s returned an error: %(e)s."
+msgstr ""
+
+#: nova/virt/xenapi/network_utils.py:29
+#, python-format
+msgid "Found non-unique network for name_label %s"
+msgstr ""
+
+#: nova/virt/xenapi/network_utils.py:47
+#, python-format
+msgid "Found non-unique network for bridge %s"
+msgstr ""
+
+#: nova/virt/xenapi/network_utils.py:49
+#, python-format
+msgid "Found no network for bridge %s"
+msgstr ""
+
+#: nova/virt/xenapi/pool.py:75
+#, python-format
+msgid ""
+"Aggregate %(aggregate_id)s: unrecoverable state during operation on %(host)s"
+msgstr ""
+
+#: nova/virt/xenapi/pool.py:166
+#, python-format
+msgid "Unable to eject %(host)s from the pool; pool not empty"
+msgstr ""
+
+#: nova/virt/xenapi/pool.py:182
+#, python-format
+msgid "Unable to eject %(host)s from the pool; No master found"
+msgstr ""
+
+#: nova/virt/xenapi/pool.py:199
+#, python-format
+msgid "Pool-Join failed: %(e)s"
+msgstr ""
+
+#: nova/virt/xenapi/pool.py:202
+#, python-format
+msgid "Unable to join %(host)s in the pool"
+msgstr ""
+
+#: nova/virt/xenapi/pool.py:218
+#, python-format
+msgid "Pool-eject failed: %(e)s"
+msgstr ""
+
+#: nova/virt/xenapi/pool.py:230
+#, python-format
+msgid "Unable to set up pool: %(e)s."
+msgstr ""
+
+#: nova/virt/xenapi/pool.py:241
+#, python-format
+msgid "Pool-set_name_label failed: %(e)s"
+msgstr ""
+
+#: nova/virt/xenapi/vif.py:104
+#, python-format
+msgid "Found no PIF for device %s"
+msgstr ""
+
+#: nova/virt/xenapi/vif.py:123
+#, python-format
+msgid ""
+"PIF %(pif_rec['uuid'])s for network %(bridge)s has VLAN id %(pif_vlan)d. "
+"Expected %(vlan_num)d"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:257
+msgid "Created VM"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:269
+msgid "VM destroyed"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:276
+msgid "VM already halted, skipping shutdown..."
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:280
+msgid "Shutting down VM"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:312
+#, python-format
+msgid "VBD not found in instance %s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:329
+#, python-format
+msgid "VBD %s already detached"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:332
+#, python-format
+msgid ""
+"VBD %(vbd_ref)s detach rejected, attempt %(num_attempt)d/%(max_attempts)d"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:337
+#, python-format
+msgid "Unable to unplug VBD %s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:342
+#, python-format
+msgid "Reached maximum number of retries trying to unplug VBD %s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:353
+#, python-format
+msgid "Unable to destroy VBD %s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:372
+#, python-format
+msgid "Creating %(vbd_type)s-type VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... "
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:375
+#, python-format
+msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s."
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:391
+#, python-format
+msgid "Unable to destroy VDI %s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:423
+#, python-format
+msgid ""
+"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) on"
+" %(sr_ref)s."
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:454
+msgid "SR not present and could not be introduced"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:555
+#, python-format
+msgid "Cloned VDI %(vdi_ref)s from VDI %(vdi_to_clone_ref)s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:575
+#, python-format
+msgid "No primary VDI found for %(vm_ref)s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:584
+msgid "Starting snapshot for VM"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:632
+#, python-format
+msgid "Destroying cached VDI '%(vdi_uuid)s'"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:690
+#, python-format
+msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:875
+#, python-format
+msgid ""
+"Fast cloning is only supported on default local SR of type ext. SR on this "
+"system was found to be of type %(sr_type)s. Ignoring the cow flag."
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:933
+#, python-format
+msgid "Unrecognized cache_images value '%s', defaulting to True"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:967
+#, python-format
+msgid "Fetched VDIs of type '%(vdi_type)s' with UUID '%(vdi_uuid)s'"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:979
+#, python-format
+msgid ""
+"download_vhd %(image_id)s, attempt %(attempt_num)d/%(max_attempts)d, params:"
+" %(params)s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:992
+#, python-format
+msgid "download_vhd failed: %r"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1028
+#, python-format
+msgid "Invalid value '%s' for xenapi_torrent_images"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1039
+#, python-format
+msgid "Asking xapi to fetch vhd image %(image_id)s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1103
+#, python-format
+msgid "vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=%(vdi_size_bytes)d"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1119
+#, python-format
+msgid ""
+"image_size_bytes=%(size_bytes)d, allowed_size_bytes=%(allowed_size_bytes)d"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1123
+#, python-format
+msgid ""
+"Image size %(size_bytes)d exceeded instance_type allowed size "
+"%(allowed_size_bytes)d"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1145
+#, python-format
+msgid "Fetching image %(image_id)s, type %(image_type_str)s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1158
+#, python-format
+msgid "Size for image %(image_id)s: %(virtual_size)d"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1167
+#, python-format
+msgid ""
+"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d "
+"bytes"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1186
+#, python-format
+msgid "Copying VDI %s to /boot/guest on dom0"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1200
+#, python-format
+msgid "Kernel/Ramdisk VDI %s destroyed"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1209
+msgid "Failed to fetch glance image"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1247
+#, python-format
+msgid "Detected %(image_type_str)s format for image %(image_ref)s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1268
+#, python-format
+msgid "Looking up vdi %s for PV kernel"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1286
+#, python-format
+msgid "Unknown image format %(disk_image_type)s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1317
+#, python-format
+msgid "VDI %s is still available"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1413
+#, python-format
+msgid "Unable to parse rrd of %(vm_uuid)s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1440
+#, python-format
+msgid "Re-scanning SR %s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1468
+#, python-format
+msgid "Flag sr_matching_filter '%s' does not respect formatting convention"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1486
+msgid ""
+"XenAPI is unable to find a Storage Repository to install guest instances on."
+" Please check your configuration and/or configure the flag "
+"'sr_matching_filter'"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1499
+msgid "Cannot find SR of content-type ISO"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1507
+#, python-format
+msgid "ISO: looking at SR %(sr_rec)s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1509
+msgid "ISO: not iso content"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1512
+msgid "ISO: iso content_type, no 'i18n-key' key"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1515
+msgid "ISO: iso content_type, i18n-key value not 'local-storage-iso'"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1519
+msgid "ISO: SR MATCHing our criteria"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1521
+msgid "ISO: ISO, looking to see if it is host local"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1524
+#, python-format
+msgid "ISO: PBD %(pbd_ref)s disappeared"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1527
+#, python-format
+msgid "ISO: PBD matching, want %(pbd_rec)s, have %(host)s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1530
+msgid "ISO: SR with local PBD"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1552
+#, python-format
+msgid ""
+"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: %(server)s."
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1568
+#, python-format
+msgid "Unable to obtain RRD XML updates with server details: %(server)s."
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1622
+#, python-format
+msgid "Invalid statistics data from Xenserver: %s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1682
+#, python-format
+msgid "VHD %(vdi_uuid)s has parent %(parent_uuid)s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1769
+#, python-format
+msgid ""
+"Parent %(parent_uuid)s doesn't match original parent "
+"%(original_parent_uuid)s, waiting for coalesce..."
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1779
+#, python-format
+msgid "VHD coalesce attempts exceeded (%(max_attempts)d), giving up..."
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1814
+#, python-format
+msgid "Timeout waiting for device %s to be created"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1834
+#, python-format
+msgid "Disconnecting stale VDI %s from compute domU"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1847
+#, python-format
+msgid "Plugging VBD %s ... "
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1850
+#, python-format
+msgid "Plugging VBD %s done."
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1852
+#, python-format
+msgid "VBD %(vbd_ref)s plugged as %(orig_dev)s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1855
+#, python-format
+msgid "VBD %(vbd_ref)s plugged into wrong dev, remapping to %(dev)s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1860
+#, python-format
+msgid "Destroying VBD for VDI %s ... "
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1868
+#, python-format
+msgid "Destroying VBD for VDI %s done."
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1881
+#, python-format
+msgid "Running pygrub against %s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1888
+#, python-format
+msgid "Found Xen kernel %s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1890
+msgid "No Xen kernel found. Booting HVM."
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1903
+msgid "Partitions:"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1909
+#, python-format
+msgid " %(num)s: %(ptype)s %(size)d sectors"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1934
+#, python-format
+msgid ""
+"Writing partition table %(primary_first)d %(primary_last)d to "
+"%(dev_path)s..."
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:1947
+#, python-format
+msgid "Writing partition table %s done."
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:2001
+#, python-format
+msgid ""
+"Starting sparse_copy src=%(src_path)s dst=%(dst_path)s "
+"virtual_size=%(virtual_size)d block_size=%(block_size)d"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:2033
+#, python-format
+msgid ""
+"Finished sparse_copy in %(duration).2f secs, %(compression_pct).2f%% "
+"reduction in size"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:2082
+msgid "Manipulating interface files directly"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:2091
+#, python-format
+msgid "Failed to mount filesystem (expected for non-linux instances): %s"
+msgstr ""
+
+#: nova/virt/xenapi/vm_utils.py:2203
+msgid ""
+"This domU must be running on the host specified by xenapi_connection_url"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:126 nova/virt/xenapi/vmops.py:638
+#, python-format
+msgid "Updating progress to %(progress)d"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:224
+msgid "Starting instance"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:298
+msgid "Removing kernel/ramdisk files from dom0"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:358
+#, python-format
+msgid "Block device information present: %s"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:381
+msgid "Failed to spawn, rolling back"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:445
+msgid "Detected ISO image type, creating blank VM for install"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:462
+msgid "Auto configuring disk, attempting to resize partition..."
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:488
+msgid "Starting VM"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:495
+#, python-format
+msgid ""
+"Latest agent build for %(hypervisor)s/%(os)s/%(architecture)s is %(version)s"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:498
+#, python-format
+msgid "No agent build found for %(hypervisor)s/%(os)s/%(architecture)s"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:505
+msgid "Waiting for instance state to become running"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:519
+#, python-format
+msgid "Instance agent version: %s"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:549
+msgid "Setting VCPU weight"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:557
+#, python-format
+msgid "Could not find VM with name %s"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:607
+msgid "Finished snapshot and upload for VM"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:611
+#, python-format
+msgid "Migrating VHD '%(vdi_uuid)s' with seq_num %(seq_num)d"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:619
+msgid "Failed to transfer vhd to new host"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:655
+#, python-format
+msgid "Resizing down VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:779
+#, python-format
+msgid "Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to %(new_gb)dGB"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:784
+msgid "Resize complete"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:828
+msgid "Starting halted instance found during reboot"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:911
+msgid "Unable to find root VBD/VDI for VM"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:938
+#, python-format
+msgid "Destroying VDIs for Instance %(instance_uuid)s"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:966
+msgid "Using RAW or VHD, skipping kernel and ramdisk deletion"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:973
+msgid "instance has a kernel or ramdisk but not both"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:980
+msgid "kernel/ramdisk files removed"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:1006
+msgid "Destroying VM"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:1032
+msgid "VM is not present, skipping destroy..."
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:1083
+#, python-format
+msgid "Instance is already in Rescue Mode: %s"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:1118
+msgid "VM is not present, skipping power off..."
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:1154
+#, python-format
+msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:1158
+msgid "Automatically hard rebooting"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:1257
+msgid "Fetching VM ref while BUILDING failed"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:1340
+msgid "Injecting network info to xenstore"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:1359
+msgid "Creating vifs"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:1368
+#, python-format
+msgid "Creating VIF for network %(network_ref)s"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:1371
+#, python-format
+msgid "Created VIF %(vif_ref)s, network %(network_ref)s"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:1395
+msgid "Injecting hostname to xenstore"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:1491
+#, python-format
+msgid ""
+"Destination host:%(hostname)s must be in the same aggregate as the source "
+"server"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:1523
+msgid "Migrate Receive failed"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:1571
+msgid "VM.assert_can_migratefailed"
+msgstr ""
+
+#: nova/virt/xenapi/vmops.py:1607
+msgid "Migrate Send failed"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:41
+msgid "creating sr within volume_utils"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:44 nova/virt/xenapi/volume_utils.py:72
+#, python-format
+msgid "type is = %s"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:47 nova/virt/xenapi/volume_utils.py:75
+#, python-format
+msgid "name = %s"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:60
+#, python-format
+msgid "Created %(label)s as %(sr_ref)s."
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:65 nova/virt/xenapi/volume_utils.py:163
+msgid "Unable to create Storage Repository"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:69
+msgid "introducing sr within volume_utils"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:92 nova/virt/xenapi/volume_utils.py:159
+#: nova/virt/xenapi/volumeops.py:151
+#, python-format
+msgid "Introduced %(label)s as %(sr_ref)s."
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:95
+msgid "Creating pbd for SR"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:97
+msgid "Plugging SR"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:105 nova/virt/xenapi/volumeops.py:155
+msgid "Unable to introduce Storage Repository"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:116 nova/virt/xenapi/volumeops.py:46
+msgid "Unable to get SR using uuid"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:118
+#, python-format
+msgid "Forgetting SR %s..."
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:126
+msgid "Unable to forget Storage Repository"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:146
+#, python-format
+msgid "Introducing %s..."
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:175
+#, python-format
+msgid "Unable to find SR from VBD %s"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:193
+#, python-format
+msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:199
+#, python-format
+msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:223
+#, python-format
+msgid "Unable to introduce VDI on SR %s"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:231
+#, python-format
+msgid "Unable to get record of VDI %s on"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:253
+#, python-format
+msgid "Unable to introduce VDI for SR %s"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:263
+#, python-format
+msgid "Error finding vdis in SR %s"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:270
+#, python-format
+msgid "Unable to find vbd for vdi %s"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:281
+#, python-format
+msgid "Unable to obtain target information %(mountpoint)s"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:309
+#, python-format
+msgid "Unable to obtain target information %(connection_data)s"
+msgstr ""
+
+#: nova/virt/xenapi/volume_utils.py:335
+#, python-format
+msgid "Mountpoint cannot be translated: %s"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:62
+msgid "Could not find VDI ref"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:67
+#, python-format
+msgid "Creating SR %s"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:70
+msgid "Could not create SR"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:73
+msgid "Could not retrieve SR record"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:78
+#, python-format
+msgid "Introducing SR %s"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:81
+msgid "SR found in xapi database. No need to introduce"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:86
+msgid "Could not introduce SR"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:90
+#, python-format
+msgid "Checking for SR %s"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:100
+#, python-format
+msgid "SR %s not found in the xapi database"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:106
+msgid "Could not forget SR"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:173
+#, python-format
+msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:183
+#, python-format
+msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:194
+#, python-format
+msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:204
+#, python-format
+msgid "Detach_volume: %(instance_name)s, %(mountpoint)s"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:212
+#, python-format
+msgid "Unable to locate volume %s"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:221
+#, python-format
+msgid "Unable to detach volume %s"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:226
+#, python-format
+msgid "Unable to destroy vbd %s"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:233
+#, python-format
+msgid "Error purging SR %s"
+msgstr ""
+
+#: nova/virt/xenapi/volumeops.py:235
+#, python-format
+msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s"
+msgstr ""
+
+#: nova/vnc/xvp_proxy.py:96 nova/vnc/xvp_proxy.py:101
+#, python-format
+msgid "Error in handshake: %s"
+msgstr ""
+
+#: nova/vnc/xvp_proxy.py:117
+#, python-format
+msgid "Invalid request: %s"
+msgstr ""
+
+#: nova/vnc/xvp_proxy.py:137
+#, python-format
+msgid "Request: %s"
+msgstr ""
+
+#: nova/vnc/xvp_proxy.py:140
+#, python-format
+msgid "Request made with missing token: %s"
+msgstr ""
+
+#: nova/vnc/xvp_proxy.py:150
+#, python-format
+msgid "Request made with invalid token: %s"
+msgstr ""
+
+#: nova/vnc/xvp_proxy.py:157
+#, python-format
+msgid "Unexpected error: %s"
+msgstr ""
+
+#: nova/vnc/xvp_proxy.py:177
+#, python-format
+msgid "Starting nova-xvpvncproxy node (version %s)"
+msgstr ""
+
+#: nova/volume/api.py:109
+#, python-format
+msgid "Volume size '%s' must be an integer and greater than 0"
+msgstr ""
+
+#: nova/volume/api.py:128
+#, python-format
+msgid ""
+"Quota exceeded for %(pid)s, tried to create %(size)sG volume (%(consumed)dG "
+"of %(quota)dG already consumed)"
+msgstr ""
+
+#: nova/volume/api.py:134
+#, python-format
+msgid ""
+"Quota exceeded for %(pid)s, tried to create volume (%(consumed)d volumes "
+"already consumed)"
+msgstr ""
+
+#: nova/volume/api.py:145
+msgid "Size of specified image is larger than volume size."
+msgstr ""
+
+#: nova/volume/api.py:215
+msgid "Failed to update quota for deleting volume."
+msgstr ""
+
+#: nova/volume/api.py:223
+msgid "Volume status must be available or error"
+msgstr ""
+
+#: nova/volume/api.py:228
+#, python-format
+msgid "Volume still has %d dependent snapshots"
+msgstr ""
+
+#: nova/volume/api.py:395
+msgid "must be available"
+msgstr ""
+
+#: nova/volume/api.py:428
+msgid "Volume Snapshot status must be available or error"
+msgstr ""
+
+#: nova/volume/api.py:479
+msgid "Volume status must be available/in-use."
+msgstr ""
+
+#: nova/volume/api.py:482
+msgid "Volume status is in-use."
+msgstr ""
+
+#: nova/volume/cinder.py:68
+#, python-format
+msgid "Cinderclient connection created using URL: %s"
+msgstr ""
+
+#: nova/volume/driver.py:103 nova/volume/netapp_nfs.py:255
+#, python-format
+msgid "Recovering from a failed execute. Try number %s"
+msgstr ""
+
+#: nova/volume/driver.py:113
+#, python-format
+msgid "volume group %s doesn't exist"
+msgstr ""
+
+#: nova/volume/driver.py:324
+#, python-format
+msgid "Skipping ensure_export. No iscsi_target provisioned for volume: %s"
+msgstr ""
+
+#: nova/volume/driver.py:397
+#, python-format
+msgid "Skipping remove_export. No iscsi_target provisioned for volume: %s"
+msgstr ""
+
+#: nova/volume/driver.py:414
+#, python-format
+msgid ""
+"Skipping remove_export. No iscsi_target is presently exported for volume: %s"
+msgstr ""
+
+#: nova/volume/driver.py:423
+msgid "ISCSI provider_location not stored, using discovery"
+msgstr ""
+
+#: nova/volume/driver.py:470
+#, python-format
+msgid "Could not find iSCSI export for volume %s"
+msgstr ""
+
+#: nova/volume/driver.py:474
+#, python-format
+msgid "ISCSI Discovery: Found %s"
+msgstr ""
+
+#: nova/volume/driver.py:565
+#, python-format
+msgid "Cannot confirm exported volume id:%(volume_id)s."
+msgstr ""
+
+#: nova/volume/driver.py:606
+#, python-format
+msgid "FAKE ISCSI: %s"
+msgstr ""
+
+#: nova/volume/driver.py:618
+#, python-format
+msgid "rbd has no pool %s"
+msgstr ""
+
+#: nova/volume/driver.py:740
+#, python-format
+msgid "Image %s is not stored in rbd"
+msgstr ""
+
+#: nova/volume/driver.py:744
+#, python-format
+msgid "Image %s has blank components"
+msgstr ""
+
+#: nova/volume/driver.py:747
+#, python-format
+msgid "Image %s is not an rbd snapshot"
+msgstr ""
+
+#: nova/volume/driver.py:762
+#, python-format
+msgid "%s is in a different ceph cluster"
+msgstr ""
+
+#: nova/volume/driver.py:773
+#, python-format
+msgid "Unable to read image %s"
+msgstr ""
+
+#: nova/volume/driver.py:815
+#, python-format
+msgid "Sheepdog is not working: %s"
+msgstr ""
+
+#: nova/volume/driver.py:820
+msgid "Sheepdog is not working"
+msgstr ""
+
+#: nova/volume/driver.py:924 nova/volume/driver.py:929
+#, python-format
+msgid "LoggingVolumeDriver: %s"
+msgstr ""
+
+#: nova/volume/iscsi.py:122
+#, python-format
+msgid "Creating volume: %s"
+msgstr ""
+
+#: nova/volume/iscsi.py:136
+#, python-format
+msgid "Failed to create iscsi target for volume id:%(vol_id)s."
+msgstr ""
+
+#: nova/volume/iscsi.py:146
+#, python-format
+msgid ""
+"Failed to create iscsi target for volume id:%(vol_id)s. Please ensure your "
+"tgtd config file contains 'include %(volumes_dir)s/*'"
+msgstr ""
+
+#: nova/volume/iscsi.py:154 nova/volume/iscsi.py:196
+#, python-format
+msgid "Removing volume: %s"
+msgstr ""
+
+#: nova/volume/iscsi.py:168
+#, python-format
+msgid "Failed to create iscsi target for volume id:%(volume_id)s."
+msgstr ""
+
+#: nova/volume/iscsi.py:177
+msgid "valid iqn needed for show_target"
+msgstr ""
+
+#: nova/volume/manager.py:102
+#, python-format
+msgid "Re-exporting %s volumes"
+msgstr ""
+
+#: nova/volume/manager.py:107
+#, python-format
+msgid "volume %s: skipping export"
+msgstr ""
+
+#: nova/volume/manager.py:109
+msgid "Resuming any in progress delete operations"
+msgstr ""
+
+#: nova/volume/manager.py:112
+#, python-format
+msgid "Resuming delete on volume: %s"
+msgstr ""
+
+#: nova/volume/manager.py:121
+#, python-format
+msgid "volume %s: creating"
+msgstr ""
+
+#: nova/volume/manager.py:136
+#, python-format
+msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG"
+msgstr ""
+
+#: nova/volume/manager.py:159
+#, python-format
+msgid "volume %s: creating export"
+msgstr ""
+
+#: nova/volume/manager.py:172
+#, python-format
+msgid "volume %s: created successfully"
+msgstr ""
+
+#: nova/volume/manager.py:190
+msgid "Volume is not local to this node"
+msgstr ""
+
+#: nova/volume/manager.py:195
+#, python-format
+msgid "volume %s: removing export"
+msgstr ""
+
+#: nova/volume/manager.py:197
+#, python-format
+msgid "volume %s: deleting"
+msgstr ""
+
+#: nova/volume/manager.py:200
+#, python-format
+msgid "volume %s: volume is busy"
+msgstr ""
+
+#: nova/volume/manager.py:217
+msgid "Failed to update usages deleting volume"
+msgstr ""
+
+#: nova/volume/manager.py:220
+#, python-format
+msgid "volume %s: deleted successfully"
+msgstr ""
+
+#: nova/volume/manager.py:233
+#, python-format
+msgid "snapshot %s: creating"
+msgstr ""
+
+#: nova/volume/manager.py:237
+#, python-format
+msgid "snapshot %(snap_name)s: creating"
+msgstr ""
+
+#: nova/volume/manager.py:252
+#, python-format
+msgid "snapshot %s: created successfully"
+msgstr ""
+
+#: nova/volume/manager.py:261
+#, python-format
+msgid "snapshot %s: deleting"
+msgstr ""
+
+#: nova/volume/manager.py:264
+#, python-format
+msgid "snapshot %s: snapshot is busy"
+msgstr ""
+
+#: nova/volume/manager.py:276
+#, python-format
+msgid "snapshot %s: deleted successfully"
+msgstr ""
+
+#: nova/volume/manager.py:323
+#, python-format
+msgid "Downloaded image %(image_id)s to %(volume_id)s successfully"
+msgstr ""
+
+#: nova/volume/manager.py:342
+#, python-format
+msgid "Uploaded volume %(volume_id)s to image (%(image_id)s) successfully"
+msgstr ""
+
+#: nova/volume/manager.py:426
+msgid "Checking volume capabilities"
+msgstr ""
+
+#: nova/volume/manager.py:430
+#, python-format
+msgid "New capabilities found: %s"
+msgstr ""
+
+#: nova/volume/manager.py:441
+msgid "Clear capabilities"
+msgstr ""
+
+#: nova/volume/manager.py:445
+#, python-format
+msgid "Notification {%s} received"
+msgstr ""
+
+#: nova/volume/netapp.py:108
+#, python-format
+msgid "API %(name)s failed: %(reason)s"
+msgstr ""
+
+#: nova/volume/netapp.py:119 nova/volume/netapp.py:1033
+#, python-format
+msgid "Using WSDL: %s"
+msgstr ""
+
+#: nova/volume/netapp.py:129
+#, python-format
+msgid "Using DFM server: %s"
+msgstr ""
+
+#: nova/volume/netapp.py:134
+#, python-format
+msgid "Using storage service: %s"
+msgstr ""
+
+#: nova/volume/netapp.py:139
+#, python-format
+msgid "Using storage service prefix: %s"
+msgstr ""
+
+#: nova/volume/netapp.py:145
+#, python-format
+msgid "Using vfiler: %s"
+msgstr ""
+
+#: nova/volume/netapp.py:154 nova/volume/netapp.py:1048
+#: nova/volume/netapp_nfs.py:110 nova/volume/storwize_svc.py:326
+#, python-format
+msgid "%s is not set"
+msgstr ""
+
+#: nova/volume/netapp.py:157
+msgid ""
+"Either netapp_storage_service or netapp_storage_service_prefix must be set"
+msgstr ""
+
+#: nova/volume/netapp.py:184
+msgid "Connected to DFM server"
+msgstr ""
+
+#: nova/volume/netapp.py:262
+#, python-format
+msgid "Discovered %(dataset_count)s datasets and %(lun_count)s LUNs"
+msgstr ""
+
+#: nova/volume/netapp.py:297
+#, python-format
+msgid "Job failed: %s"
+msgstr ""
+
+#: nova/volume/netapp.py:326
+msgid ""
+"Attempt to use volume_type without specifying netapp_storage_service_prefix "
+"flag."
+msgstr ""
+
+#: nova/volume/netapp.py:330
+msgid ""
+"You must set the netapp_storage_service flag in order to create volumes with"
+" no volume_type."
+msgstr ""
+
+#: nova/volume/netapp.py:399
+msgid "Failed to provision dataset member"
+msgstr ""
+
+#: nova/volume/netapp.py:414
+msgid "No LUN was created by the provision job"
+msgstr ""
+
+#: nova/volume/netapp.py:452
+msgid "Failed to remove and delete dataset member"
+msgstr ""
+
+#: nova/volume/netapp.py:493
+#, python-format
+msgid "No entry in LUN table for volume %s"
+msgstr ""
+
+#: nova/volume/netapp.py:513
+#, python-format
+msgid "Failed to get LUN details for LUN ID %s"
+msgstr ""
+
+#: nova/volume/netapp.py:530
+#, python-format
+msgid "Failed to get host details for host ID %s"
+msgstr ""
+
+#: nova/volume/netapp.py:767 nova/volume/netapp.py:814
+#, python-format
+msgid "No LUN ID for volume %s"
+msgstr ""
+
+#: nova/volume/netapp.py:776
+#, python-format
+msgid "Failed to get target portal for filer: %s"
+msgstr ""
+
+#: nova/volume/netapp.py:781
+#, python-format
+msgid "Failed to get target IQN for filer: %s"
+msgstr ""
+
+#: nova/volume/netapp.py:968 nova/volume/netapp_nfs.py:74
+#, python-format
+msgid ""
+"Cannot create volume of size %(vol_size)s from snapshot of size "
+"%(snap_size)s"
+msgstr ""
+
+#: nova/volume/netapp.py:980
+#, python-format
+msgid ""
+"Cannot create volume of type %(new_type)s from snapshot of type %(old_type)s"
+msgstr ""
+
+#: nova/volume/netapp.py:1014
+#, python-format
+msgid "No metadata property %(prop)s defined for the LUN %(name)s"
+msgstr ""
+
+#: nova/volume/netapp.py:1078
+msgid "Success getting LUN list from server"
+msgstr ""
+
+#: nova/volume/netapp.py:1100
+#, python-format
+msgid "Created LUN with name %s"
+msgstr ""
+
+#: nova/volume/netapp.py:1109 nova/volume/netapp.py:1217
+#, python-format
+msgid "Destroyed LUN %s"
+msgstr ""
+
+#: nova/volume/netapp.py:1146
+#, python-format
+msgid "Mapped LUN %(handle)s to the initiator %(initiator_name)s"
+msgstr ""
+
+#: nova/volume/netapp.py:1151
+#, python-format
+msgid ""
+"Succesfully fetched target details for LUN %(handle)s and initiator "
+"%(initiator_name)s"
+msgstr ""
+
+#: nova/volume/netapp.py:1156
+#, python-format
+msgid "Failed to get LUN target details for the LUN %s"
+msgstr ""
+
+#: nova/volume/netapp.py:1160
+#, python-format
+msgid "Failed to get target portal for the LUN %s"
+msgstr ""
+
+#: nova/volume/netapp.py:1164
+#, python-format
+msgid "Failed to get target IQN for the LUN %s"
+msgstr ""
+
+#: nova/volume/netapp.py:1197
+#, python-format
+msgid "Unmapped LUN %(handle)s from the initiator %(initiator_name)s"
+msgstr ""
+
+#: nova/volume/netapp.py:1253
+msgid "Object is not a NetApp LUN."
+msgstr ""
+
+#: nova/volume/netapp.py:1263
+#, python-format
+msgid "Cloned LUN with new name %s"
+msgstr ""
+
+#: nova/volume/netapp.py:1280
+#, python-format
+msgid "Could not find handle for LUN named %s"
+msgstr ""
+
+#: nova/volume/nfs.py:60
+msgid "There's no NFS config file configured "
+msgstr ""
+
+#: nova/volume/nfs.py:62
+msgid "NFS config file doesn't exist"
+msgstr ""
+
+#: nova/volume/nfs.py:85
+#, python-format
+msgid "casted to %s"
+msgstr ""
+
+#: nova/volume/nfs.py:95
+#, python-format
+msgid "Volume %s does not have provider_location specified, skipping"
+msgstr ""
+
+#: nova/volume/nfs.py:106
+#, python-format
+msgid ""
+"Trying to delete non-existing volume %(volume)s at path %(mounted_path)s"
+msgstr ""
+
+#: nova/volume/san.py:116 nova/volume/san.py:156
+msgid "Specify san_password or san_private_key"
+msgstr ""
+
+#: nova/volume/san.py:160
+msgid "san_ip must be set"
+msgstr ""
+
+#: nova/volume/san.py:230
+#, python-format
+msgid "Cannot parse list-view output: %s"
+msgstr ""
+
+#: nova/volume/san.py:324
+#, python-format
+msgid "LUID not found for %(zfs_poolname)s. Output=%(out)s"
+msgstr ""
+
+#: nova/volume/san.py:457
+#, python-format
+msgid "CLIQ command returned %s"
+msgstr ""
+
+#: nova/volume/san.py:463
+#, python-format
+msgid ""
+"Malformed response to CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s"
+msgstr ""
+
+#: nova/volume/san.py:471
+#, python-format
+msgid "Error running CLIQ command %(verb)s %(cliq_args)s. Result=%(out)s"
+msgstr ""
+
+#: nova/volume/san.py:501
+#, python-format
+msgid ""
+"Unexpected number of virtual ips for cluster %(cluster_name)s. "
+"Result=%(_xml)s"
+msgstr ""
+
+#: nova/volume/san.py:554
+#, python-format
+msgid "Volume info: %(volume_name)s => %(volume_attributes)s"
+msgstr ""
+
+#: nova/volume/san.py:610
+msgid "local_path not supported"
+msgstr ""
+
+#: nova/volume/solidfire.py:123
+#, python-format
+msgid "Payload for SolidFire API call: %s"
+msgstr ""
+
+#: nova/volume/solidfire.py:140
+#, python-format
+msgid "Call to json.loads() raised an exception: %s"
+msgstr ""
+
+#: nova/volume/solidfire.py:145
+#, python-format
+msgid "Results of SolidFire API call: %s"
+msgstr ""
+
+#: nova/volume/solidfire.py:159
+#, python-format
+msgid "Found solidfire account: %s"
+msgstr ""
+
+#: nova/volume/solidfire.py:173
+#, python-format
+msgid "solidfire account: %s does not exist, create it..."
+msgstr ""
+
+#: nova/volume/solidfire.py:279
+#, python-format
+msgid "More than one valid preset was detected, using %s"
+msgstr ""
+
+#: nova/volume/solidfire.py:306
+msgid "Enter SolidFire delete_volume..."
+msgstr ""
+
+#: nova/volume/solidfire.py:334
+#, python-format
+msgid "Deleting volumeID: %s"
+msgstr ""
+
+#: nova/volume/solidfire.py:342
+msgid "Leaving SolidFire delete_volume"
+msgstr ""
+
+#: nova/volume/solidfire.py:345
+msgid "Executing SolidFire ensure_export..."
+msgstr ""
+
+#: nova/volume/solidfire.py:349
+msgid "Executing SolidFire create_export..."
+msgstr ""
+
+#: nova/volume/solidfire.py:354
+msgid "Enter SolidFire create_snapshot..."
+msgstr ""
+
+#: nova/volume/storwize_svc.py:123
+#, python-format
+msgid ""
+"_get_hdr_dic: attribute headers and values do not match.\n"
+" Headers: %(header)s\n"
+" Values: %(row)s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:141
+msgid "enter: check_for_setup_error"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:147 nova/volume/storwize_svc.py:163
+#: nova/volume/storwize_svc.py:171 nova/volume/storwize_svc.py:218
+#: nova/volume/storwize_svc.py:227
+#, python-format
+msgid ""
+"check_for_setup_error: failed with unexpected CLI output.\n"
+" Command: %(cmd)s\n"
+" stdout: %(out)s\n"
+" stderr: %(err)s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:155
+#, python-format
+msgid "pool %s doesn't exist"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:182 nova/volume/storwize_svc.py:238
+#, python-format
+msgid ""
+"check_for_setup_error: failed with unexpected CLI output.\n"
+" Command: %(cmd)s\n"
+" stdout: %(out)s\n"
+" stderr: %(err)s\n"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:200
+#, python-format
+msgid "Did not find expected column name in svcinfo lsnode: %s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:203 nova/volume/storwize_svc.py:253
+#, python-format
+msgid ""
+"check_for_setup_error: Unexpected CLI output.\n"
+" Details: %(msg)s\n"
+"Command: %(cmd)s\n"
+" stdout: %(out)s\n"
+" stderr: %(err)s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:250
+#, python-format
+msgid "Did not find expected column name in lsportip: %s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:272
+#, python-format
+msgid ""
+"check_for_setup_error: fail to storage configuration: unknown storage node %(node_id)s from CLI output.\n"
+" stdout: %(out)s\n"
+" stderr: %(err)s\n"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:294
+#, python-format
+msgid ""
+"check_for_setup_error: fail to storage configuration: storage node %s has no"
+" IP addresses configured"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:302
+#, python-format
+msgid ""
+"could not obtain IP address and iSCSI name from the storage. Please verify that the storage is configured for iSCSI.\n"
+" Storage nodes: %(nodes)s\n"
+" portips: %(portips)s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:310
+msgid "leave: check_for_setup_error"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:331
+msgid ""
+"Password or SSH private key is required for authentication: set either "
+"san_password or san_private_key option"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:339
+msgid ""
+"Illegal value specified for storwize_svc_vol_rsize: set to either a number "
+"or a percentage"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:346
+msgid ""
+"Illegal value specified for storwize_svc_vol_warning: set to either a number"
+" or a percentage"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:354
+msgid ""
+"Illegal value specified for storwize_svc_vol_grainsize: set to either '32', "
+"'64', '128', or '256'"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:363
+#, python-format
+msgid ""
+"Illegal value %s specified for storwize_svc_flashcopy_timeout: valid values "
+"are between 0 and 600"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:373
+msgid ""
+"If compression is set to True, rsize must also be set (not equal to -1)"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:378
+msgid "enter: do_setup"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:380
+msgid "leave: do_setup"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:392
+#, python-format
+msgid "enter: create_volume: volume %s "
+msgstr ""
+
+#: nova/volume/storwize_svc.py:429 nova/volume/storwize_svc.py:439
+#, python-format
+msgid ""
+"create volume %(name)s - did not find success message in CLI output.\n"
+" stdout: %(out)s\n"
+" stderr: %(err)s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:444
+#, python-format
+msgid "leave: create_volume: volume %(name)s "
+msgstr ""
+
+#: nova/volume/storwize_svc.py:453
+#, python-format
+msgid "enter: delete_volume: volume %(name)s "
+msgstr ""
+
+#: nova/volume/storwize_svc.py:468
+#, python-format
+msgid ""
+"delete volume %(name)s - non empty output from CLI.\n"
+" stdout: %(out)s\n"
+" stderr: %(err)s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:475
+#, python-format
+msgid "warning: tried to delete volume %(name)s but it does not exist."
+msgstr ""
+
+#: nova/volume/storwize_svc.py:478
+#, python-format
+msgid "leave: delete_volume: volume %(name)s "
+msgstr ""
+
+#: nova/volume/storwize_svc.py:488
+#, python-format
+msgid "ensure_export: volume %s not found on storage"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:512
+#, python-format
+msgid "enter: initialize_connection: volume %(vol)s with connector %(conn)s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:527
+msgid "_create_new_host failed to return the host name."
+msgstr ""
+
+#: nova/volume/storwize_svc.py:545
+#, python-format
+msgid ""
+"initialize_connection: did not find preferred node %(node)s for volume "
+"%(vol)s in iSCSI configuration"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:553
+#, python-format
+msgid ""
+"initialize_connection: did not find a preferred node for volume %s in iSCSI "
+"configuration"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:567
+#, python-format
+msgid ""
+"leave: initialize_connection:\n"
+" volume: %(vol)s\n"
+" connector %(conn)s\n"
+" properties: %(prop)s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:585
+#, python-format
+msgid "enter: terminate_connection: volume %(vol)s with connector %(conn)s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:595
+#, python-format
+msgid ""
+"_get_host_from_iscsiname failed to return the host name for iscsi name %s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:606
+#, python-format
+msgid ""
+"delete mapping of volume %(vol)s to host %(host)s - non empty output from CLI.\n"
+" stdout: %(out)s\n"
+" stderr: %(err)s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:615
+#, python-format
+msgid ""
+"terminate_connection: no mapping of volume %(vol)s to host %(host)s found"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:623
+#, python-format
+msgid "leave: terminate_connection: volume %(vol)s with connector %(conn)s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:634
+#, python-format
+msgid ""
+"_run_flashcopy: fail to cleanup failed FlashCopy mapping %(fc_map_id)% from %(source)s to %(target)s.\n"
+"stdout: %(out)s\n"
+" stderr: %(err)s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:648
+#, python-format
+msgid ""
+"enter: _run_flashcopy: execute FlashCopy from source %(source)s to target "
+"%(target)s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:656 nova/volume/storwize_svc.py:669
+#, python-format
+msgid ""
+"create FC mapping from %(source)s to %(target)s - did not find success message in CLI output.\n"
+" stdout: %(out)s\n"
+" stderr: %(err)s\n"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:680 nova/volume/storwize_svc.py:689
+#, python-format
+msgid ""
+"create FC mapping from %(source)s to %(target)s - did not find mapping id in CLI output.\n"
+" stdout: %(out)s\n"
+" stderr: %(err)s\n"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:700
+#, python-format
+msgid ""
+"_run_flashcopy: fail to prepare FlashCopy from %(source)s to %(target)s.\n"
+"stdout: %(out)s\n"
+" stderr: %(err)s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:725
+#, python-format
+msgid ""
+"unexecpted mapping status %(status)s for mapping %(id)s. Attributes: "
+"%(attr)s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:737
+#, python-format
+msgid ""
+"mapping %(id)s prepare failed to complete within the alloted %(to)s seconds "
+"timeout. Terminating"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:741
+#, python-format
+msgid ""
+"_run_flashcopy: fail to start FlashCopy from %(source)s to %(target)s with "
+"exception %(ex)s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:749
+#, python-format
+msgid "_run_flashcopy: %s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:755
+#, python-format
+msgid ""
+"_run_flashcopy: fail to start FlashCopy from %(source)s to %(target)s.\n"
+"stdout: %(out)s\n"
+" stderr: %(err)s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:764
+#, python-format
+msgid "leave: _run_flashcopy: FlashCopy started from %(source)s to %(target)s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:774
+#, python-format
+msgid ""
+"enter: create_volume_from_snapshot: snapshot %(tgt)s from volume %(src)s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:780
+#, python-format
+msgid "create_volume_from_snapshot: source volume %s does not exist"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:787 nova/volume/storwize_svc.py:841
+#: nova/volume/storwize_svc.py:860
+#, python-format
+msgid ""
+"create_volume_from_snapshot: cannot get source volume %(src)s capacity from "
+"volume attributes %(attr)s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:796
+#, python-format
+msgid ""
+"create_volume_from_snapshot: target volume %s already exists, cannot create"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:815
+#, python-format
+msgid "leave: create_volume_from_snapshot: %s created successfully"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:827
+#, python-format
+msgid "enter: create_snapshot: snapshot %(tgt)s from volume %(src)s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:834
+#, python-format
+msgid "create_snapshot: source volume %s does not exist"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:867
+#, python-format
+msgid ""
+"create_snapshot: source %(src)s and target volume %(tgt)s have different "
+"capacities (source:%(ssize)s target:%(tsize)s)"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:885
+#, python-format
+msgid "leave: create_snapshot: %s created successfully"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:893
+#, python-format
+msgid "enter: delete_snapshot: snapshot %s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:902
+#, python-format
+msgid "leave: delete_snapshot: snapshot %s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:911
+#, python-format
+msgid "enter: _get_host_from_iscsiname: iSCSI initiator %s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:921
+#, python-format
+msgid ""
+"_get_host_from_iscsiname: failed with unexpected CLI output.\n"
+" command: %(cmd)s\n"
+" stdout: %(out)s\n"
+" stderr: %(err)s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:941
+#, python-format
+msgid ""
+"_get_host_from_iscsiname: Unexpected response from CLI output. Command: %(cmd)s\n"
+" stdout: %(out)s\n"
+" stderr: %(err)s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:957
+#, python-format
+msgid "leave: _get_host_from_iscsiname: iSCSI initiator %s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:970
+#, python-format
+msgid "enter: _create_new_host: host %(name)s with iSCSI initiator %(init)s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:979
+msgid ""
+"_create_new_host: cannot clean host name. Host name is not unicode or string"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:992
+#, python-format
+msgid ""
+"create host %(name)s with iSCSI initiator %(init)s - did not find success message in CLI output.\n"
+" stdout: %(out)s\n"
+" stderr: %(err)s\n"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:1000
+#, python-format
+msgid "leave: _create_new_host: host %(host)s with iSCSI initiator %(init)s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:1009
+#, python-format
+msgid "enter: _delete_host: host %s "
+msgstr ""
+
+#: nova/volume/storwize_svc.py:1017
+#, python-format
+msgid "warning: tried to delete host %(name)s but it does not exist."
+msgstr ""
+
+#: nova/volume/storwize_svc.py:1020
+#, python-format
+msgid "leave: _delete_host: host %s "
+msgstr ""
+
+#: nova/volume/storwize_svc.py:1024
+#, python-format
+msgid "enter: _is_volume_defined: volume %s "
+msgstr ""
+
+#: nova/volume/storwize_svc.py:1026
+#, python-format
+msgid "leave: _is_volume_defined: volume %(vol)s with %(str)s "
+msgstr ""
+
+#: nova/volume/storwize_svc.py:1037
+#, python-format
+msgid "enter: _is_host_defined: host %s "
+msgstr ""
+
+#: nova/volume/storwize_svc.py:1049
+#, python-format
+msgid ""
+"_is_host_defined: Unexpected response from CLI output.\n"
+" stdout: %(out)s\n"
+" stderr: %(err)s\n"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:1059
+#, python-format
+msgid ""
+"Data received for host %(host1)s instead of host %(host2)s.\n"
+" stdout: %(out)s\n"
+" stderr: %(err)s\n"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:1069
+#, python-format
+msgid "leave: _is_host_defined: host %(host)s with %(str)s "
+msgstr ""
+
+#: nova/volume/storwize_svc.py:1097
+#, python-format
+msgid "enter: _map_vol_to_host: volume %(vol)s to host %(host)s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:1127
+#, python-format
+msgid ""
+"_map_vol_to_host: mapping host %(host)s to volume %(vol)s with LUN %(lun)s - did not find success message in CLI output. stdout: %(out)s\n"
+" stderr: %(err)s\n"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:1137
+#, python-format
+msgid "leave: _map_vol_to_host: LUN %(lun)s, volume %(vol)s, host %(host)s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:1153
+#, python-format
+msgid "enter: _get_flashcopy_mapping_attributes: mapping %s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:1160 nova/volume/storwize_svc.py:1172
+#, python-format
+msgid ""
+"_get_flashcopy_mapping_attributes: Unexpected response from CLI output. Command: %(cmd)s\n"
+" stdout: %(out)s\n"
+" stderr: %(err)s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:1184
+#, python-format
+msgid ""
+"leave: _get_flashcopy_mapping_attributes: mapping %(id)s, attributes "
+"%(attr)s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:1198
+#, python-format
+msgid "enter: _get_volume_attributes: volume %s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:1207
+#, python-format
+msgid ""
+"CLI Exception output:\n"
+" command: %(cmd)s\n"
+" stdout: %(out)s\n"
+" stderr: %(err)s"
+msgstr ""
+
+#: nova/volume/storwize_svc.py:1228
+#, python-format
+msgid ""
+"leave: _get_volume_attributes:\n"
+" volume %(vol)s\n"
+" attributes: %(attr)s"
+msgstr ""
+
+#: nova/volume/volume_types.py:49 nova/volume/volume_types.py:108
+msgid "name cannot be None"
+msgstr ""
+
+#: nova/volume/volume_types.py:96
+msgid "id cannot be None"
+msgstr ""
+
+#: nova/volume/xensm.py:55
+#, python-format
+msgid "SR name = %s"
+msgstr ""
+
+#: nova/volume/xensm.py:56
+#, python-format
+msgid "Params: %s"
+msgstr ""
+
+#: nova/volume/xensm.py:60
+#, python-format
+msgid "Failed to create sr %s...continuing"
+msgstr ""
+
+#: nova/volume/xensm.py:62
+msgid "Create failed"
+msgstr ""
+
+#: nova/volume/xensm.py:65
+#, python-format
+msgid "SR UUID of new SR is: %s"
+msgstr ""
+
+#: nova/volume/xensm.py:72
+msgid "Failed to update db"
+msgstr ""
+
+#: nova/volume/xensm.py:82
+#, python-format
+msgid "Failed to introduce sr %s...continuing"
+msgstr ""
+
+#: nova/volume/xensm.py:93
+#, python-format
+msgid "Failed to reach backend %d"
+msgstr ""
+
+#: nova/volume/xensm.py:102
+#, python-format
+msgid "XenSMDriver requires xenapi connection, using %s"
+msgstr ""
+
+#: nova/volume/xensm.py:114
+msgid "Failed to initiate session"
+msgstr ""
+
+#: nova/volume/xensm.py:147
+#, python-format
+msgid "Volume will be created in backend - %d"
+msgstr ""
+
+#: nova/volume/xensm.py:159
+msgid "Failed to update volume in db"
+msgstr ""
+
+#: nova/volume/xensm.py:163
+msgid "Unable to create volume"
+msgstr ""
+
+#: nova/volume/xensm.py:170
+#, python-format
+msgid "Volume %s does not exist"
+msgstr ""
+
+#: nova/volume/xensm.py:180
+msgid "Failed to delete vdi"
+msgstr ""
+
+#: nova/volume/xensm.py:187
+msgid "Failed to delete volume in db"
+msgstr ""
+
+#: nova/volume/xensm.py:221
+msgid "Failed to find volume in db"
+msgstr ""
+
+#: nova/volume/xensm.py:233
+msgid "Failed to find backend in db"
+msgstr ""
+
+#: nova/volume/nexenta/__init__.py:27
+msgid "Nexenta SA returned the error"
+msgstr ""
+
+#: nova/volume/nexenta/jsonrpc.py:64
+#, python-format
+msgid "Sending JSON data: %s"
+msgstr ""
+
+#: nova/volume/nexenta/jsonrpc.py:69
+#, python-format
+msgid "Auto switching to HTTPS connection to %s"
+msgstr ""
+
+#: nova/volume/nexenta/jsonrpc.py:75
+msgid "No headers in server response"
+msgstr ""
+
+#: nova/volume/nexenta/jsonrpc.py:76
+msgid "Bad response from server"
+msgstr ""
+
+#: nova/volume/nexenta/jsonrpc.py:79
+#, python-format
+msgid "Got response: %s"
+msgstr ""
+
+#: nova/volume/nexenta/volume.py:96
+#, python-format
+msgid "Volume %s does not exist in Nexenta SA"
+msgstr ""
+
+#: nova/volume/nexenta/volume.py:180
+msgid ""
+"Call to local_path should not happen. Verify that use_local_volumes flag is "
+"turned off."
+msgstr ""
+
+#: nova/volume/nexenta/volume.py:202
+#, python-format
+msgid "Ignored target creation error \"%s\" while ensuring export"
+msgstr ""
+
+#: nova/volume/nexenta/volume.py:210
+#, python-format
+msgid "Ignored target group creation error \"%s\" while ensuring export"
+msgstr ""
+
+#: nova/volume/nexenta/volume.py:219
+#, python-format
+msgid "Ignored target group member addition error \"%s\" while ensuring export"
+msgstr ""
+
+#: nova/volume/nexenta/volume.py:227
+#, python-format
+msgid "Ignored LU creation error \"%s\" while ensuring export"
+msgstr ""
+
+#: nova/volume/nexenta/volume.py:237
+#, python-format
+msgid "Ignored LUN mapping entry addition error \"%s\" while ensuring export"
+msgstr ""
+
+#: nova/volume/nexenta/volume.py:273
+#, python-format
+msgid ""
+"Got error trying to destroy target group %(target_group)s, assuming it is "
+"already gone: %(exc)s"
+msgstr ""
+
+#: nova/volume/nexenta/volume.py:280
+#, python-format
+msgid ""
+"Got error trying to delete target %(target)s, assuming it is already gone: "
+"%(exc)s"
+msgstr ""
diff --git a/nova/manager.py b/nova/manager.py
index 7df63f719..7c7cbeb67 100644
--- a/nova/manager.py
+++ b/nova/manager.py
@@ -63,7 +63,6 @@ from nova.openstack.common import log as logging
from nova.openstack.common.plugin import pluginmanager
from nova.openstack.common.rpc import dispatcher as rpc_dispatcher
from nova.scheduler import rpcapi as scheduler_rpcapi
-from nova import version
periodic_opts = [
@@ -269,15 +268,6 @@ class Manager(base.Base):
"""
pass
- def service_version(self, context):
- return version.version_string()
-
- def service_config(self, context):
- config = {}
- for key in CONF:
- config[key] = CONF.get(key, None)
- return config
-
class SchedulerDependentManager(Manager):
"""Periodically send capability updates to the Scheduler services.
diff --git a/nova/network/api.py b/nova/network/api.py
index 59172d9ec..8a173ba45 100644
--- a/nova/network/api.py
+++ b/nova/network/api.py
@@ -22,10 +22,12 @@ import inspect
from nova.db import base
from nova import exception
+from nova.network import floating_ips
from nova.network import model as network_model
from nova.network import rpcapi as network_rpcapi
from nova.openstack.common import log as logging
from nova import policy
+from nova import utils
LOG = logging.getLogger(__name__)
@@ -52,7 +54,7 @@ def refresh_cache(f):
raise Exception(msg)
update_instance_cache_with_nw_info(self, context, instance,
- nw_info=res)
+ nw_info=res, conductor_api=kwargs.get('conductor_api'))
# return the original function's return value
return res
@@ -60,8 +62,7 @@ def refresh_cache(f):
def update_instance_cache_with_nw_info(api, context, instance,
- nw_info=None):
-
+ nw_info=None, conductor_api=None):
try:
if not isinstance(nw_info, network_model.NetworkInfo):
nw_info = None
@@ -69,7 +70,10 @@ def update_instance_cache_with_nw_info(api, context, instance,
nw_info = api._get_instance_nw_info(context, instance)
# update cache
cache = {'network_info': nw_info.json()}
- api.db.instance_info_cache_update(context, instance['uuid'], cache)
+ if conductor_api:
+ conductor_api.instance_info_cache_update(context, instance, cache)
+ else:
+ api.db.instance_info_cache_update(context, instance['uuid'], cache)
except Exception:
LOG.exception(_('Failed storing info cache'), instance=instance)
@@ -101,20 +105,26 @@ class API(base.Base):
This is a pluggable module - other implementations do networking via
other services (such as Quantum).
"""
-
_sentinel = object()
def __init__(self, **kwargs):
self.network_rpcapi = network_rpcapi.NetworkAPI()
+ helper = utils.ExceptionHelper
+ # NOTE(vish): this local version of floating_manager has to convert
+ # ClientExceptions back since they aren't going over rpc.
+ self.floating_manager = helper(floating_ips.LocalManager())
super(API, self).__init__(**kwargs)
@wrap_check_policy
def get_all(self, context):
- return self.network_rpcapi.get_all_networks(context)
+ try:
+ return self.db.network_get_all(context)
+ except exception.NoNetworksFound:
+ return []
@wrap_check_policy
def get(self, context, network_uuid):
- return self.network_rpcapi.get_network(context, network_uuid)
+ return self.db.network_get_by_uuid(context.elevated(), network_uuid)
@wrap_check_policy
def create(self, context, **kwargs):
@@ -126,36 +136,39 @@ class API(base.Base):
@wrap_check_policy
def disassociate(self, context, network_uuid):
- return self.network_rpcapi.disassociate_network(context, network_uuid)
+ network = self.get(context, network_uuid)
+ self.db.network_disassociate(context, network['id'])
@wrap_check_policy
def get_fixed_ip(self, context, id):
- return self.network_rpcapi.get_fixed_ip(context, id)
+ return self.db.fixed_ip_get(context, id)
@wrap_check_policy
def get_fixed_ip_by_address(self, context, address):
- return self.network_rpcapi.get_fixed_ip_by_address(context, address)
+ return self.db.fixed_ip_get_by_address(context, address)
@wrap_check_policy
def get_floating_ip(self, context, id):
- return self.network_rpcapi.get_floating_ip(context, id)
+ return self.db.floating_ip_get(context, id)
@wrap_check_policy
def get_floating_ip_pools(self, context):
- return self.network_rpcapi.get_floating_ip_pools(context)
+ return self.db.floating_ip_get_pools(context)
@wrap_check_policy
def get_floating_ip_by_address(self, context, address):
- return self.network_rpcapi.get_floating_ip_by_address(context, address)
+ return self.db.floating_ip_get_by_address(context, address)
@wrap_check_policy
def get_floating_ips_by_project(self, context):
- return self.network_rpcapi.get_floating_ips_by_project(context)
+ return self.db.floating_ip_get_all_by_project(context,
+ context.project_id)
@wrap_check_policy
def get_floating_ips_by_fixed_address(self, context, fixed_address):
- return self.network_rpcapi.get_floating_ips_by_fixed_address(context,
- fixed_address)
+ floating_ips = self.db.floating_ip_get_by_fixed_address(context,
+ fixed_address)
+ return [floating_ip['address'] for floating_ip in floating_ips]
@wrap_check_policy
def get_backdoor_port(self, context, host):
@@ -163,35 +176,34 @@ class API(base.Base):
@wrap_check_policy
def get_instance_id_by_floating_address(self, context, address):
- # NOTE(tr3buchet): i hate this
- return self.network_rpcapi.get_instance_id_by_floating_address(context,
- address)
+ fixed_ip = self.db.fixed_ip_get_by_floating_address(context, address)
+ if fixed_ip is None:
+ return None
+ else:
+ return fixed_ip['instance_uuid']
@wrap_check_policy
def get_vifs_by_instance(self, context, instance):
- return self.network_rpcapi.get_vifs_by_instance(context,
- instance['id'])
+ return self.db.virtual_interface_get_by_instance(context,
+ instance['uuid'])
@wrap_check_policy
def get_vif_by_mac_address(self, context, mac_address):
- return self.network_rpcapi.get_vif_by_mac_address(context, mac_address)
+ return self.db.virtual_interface_get_by_address(context,
+ mac_address)
@wrap_check_policy
def allocate_floating_ip(self, context, pool=None):
"""Adds (allocates) a floating ip to a project from a pool."""
- # NOTE(vish): We don't know which network host should get the ip
- # when we allocate, so just send it to any one. This
- # will probably need to move into a network supervisor
- # at some point.
- return self.network_rpcapi.allocate_floating_ip(context,
- context.project_id, pool, False)
+ return self.floating_manager.allocate_floating_ip(context,
+ context.project_id, False, pool)
@wrap_check_policy
def release_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Removes (deallocates) a floating ip with address from a project."""
- return self.network_rpcapi.deallocate_floating_ip(context, address,
- affect_auto_assigned)
+ return self.floating_manager.deallocate_floating_ip(context, address,
+ affect_auto_assigned)
@wrap_check_policy
@refresh_cache
@@ -200,10 +212,13 @@ class API(base.Base):
affect_auto_assigned=False):
"""Associates a floating ip with a fixed ip.
- ensures floating ip is allocated to the project in context
+ Ensures floating ip is allocated to the project in context.
+ Does not verify ownership of the fixed ip. Caller is assumed to have
+ checked that the instance is properly owned.
+
"""
- orig_instance_uuid = self.network_rpcapi.associate_floating_ip(context,
- floating_address, fixed_address, affect_auto_assigned)
+ orig_instance_uuid = self.floating_manager.associate_floating_ip(
+ context, floating_address, fixed_address, affect_auto_assigned)
if orig_instance_uuid:
msg_dict = dict(address=floating_address,
@@ -221,13 +236,14 @@ class API(base.Base):
def disassociate_floating_ip(self, context, instance, address,
affect_auto_assigned=False):
"""Disassociates a floating ip from fixed ip it is associated with."""
- self.network_rpcapi.disassociate_floating_ip(context, address,
+ return self.floating_manager.disassociate_floating_ip(context, address,
affect_auto_assigned)
@wrap_check_policy
@refresh_cache
def allocate_for_instance(self, context, instance, vpn,
- requested_networks, macs=None):
+ requested_networks, macs=None,
+ conductor_api=None):
"""Allocates all network structures for an instance.
TODO(someone): document the rest of these parameters.
@@ -235,9 +251,12 @@ class API(base.Base):
:param macs: None or a set of MAC addresses that the instance
should use. macs is supplied by the hypervisor driver (contrast
with requested_networks which is user supplied).
- NB: macs is ignored by nova-network.
:returns: network info as from get_instance_nw_info() below
"""
+ # NOTE(vish): We can't do the floating ip allocation here because
+ # this is called from compute.manager which shouldn't
+ # have db access so we do it on the other side of the
+ # rpc.
args = {}
args['vpn'] = vpn
args['requested_networks'] = requested_networks
@@ -246,6 +265,7 @@ class API(base.Base):
args['project_id'] = instance['project_id']
args['host'] = instance['host']
args['rxtx_factor'] = instance['instance_type']['rxtx_factor']
+ args['macs'] = macs
nw_info = self.network_rpcapi.allocate_for_instance(context, **args)
return network_model.NetworkInfo.hydrate(nw_info)
@@ -253,7 +273,10 @@ class API(base.Base):
@wrap_check_policy
def deallocate_for_instance(self, context, instance):
"""Deallocates all network structures related to instance."""
-
+ # NOTE(vish): We can't do the floating ip deallocation here because
+ # this is called from compute.manager which shouldn't
+ # have db access so we do it on the other side of the
+ # rpc.
args = {}
args['instance_id'] = instance['id']
args['project_id'] = instance['project_id']
@@ -262,7 +285,8 @@ class API(base.Base):
@wrap_check_policy
@refresh_cache
- def add_fixed_ip_to_instance(self, context, instance, network_id):
+ def add_fixed_ip_to_instance(self, context, instance, network_id,
+ conductor_api=None):
"""Adds a fixed ip to instance from specified network."""
args = {'instance_id': instance['uuid'],
'host': instance['host'],
@@ -271,7 +295,8 @@ class API(base.Base):
@wrap_check_policy
@refresh_cache
- def remove_fixed_ip_from_instance(self, context, instance, address):
+ def remove_fixed_ip_from_instance(self, context, instance, address,
+ conductor_api=None):
"""Removes a fixed ip from instance from specified network."""
args = {'instance_id': instance['uuid'],
@@ -290,19 +315,29 @@ class API(base.Base):
project=_sentinel):
"""Associate or disassociate host or project to network."""
associations = {}
+ network_id = self.get(context, network_uuid)['id']
if host is not API._sentinel:
- associations['host'] = host
+ if host is None:
+ self.db.network_disassociate(context, network_id,
+ disassociate_host=True,
+ disassociate_project=False)
+ else:
+ self.db.network_set_host(context, network_id, host)
if project is not API._sentinel:
- associations['project'] = project
- self.network_rpcapi.associate(context, network_uuid, associations)
+ project = associations['project']
+ if project is None:
+ self.db.network_disassociate(context, network_id,
+ disassociate_host=False,
+ disassociate_project=True)
+ else:
+ self.db.network_associate(context, project, network_id, True)
@wrap_check_policy
- def get_instance_nw_info(self, context, instance, update_cache=True):
+ def get_instance_nw_info(self, context, instance, conductor_api=None):
"""Returns all network info related to an instance."""
result = self._get_instance_nw_info(context, instance)
- if update_cache:
- update_instance_cache_with_nw_info(self, context, instance,
- result)
+ update_instance_cache_with_nw_info(self, context, instance,
+ result, conductor_api)
return result
def _get_instance_nw_info(self, context, instance):
diff --git a/nova/network/api_deprecated.py b/nova/network/api_deprecated.py
new file mode 100644
index 000000000..b84a08a6d
--- /dev/null
+++ b/nova/network/api_deprecated.py
@@ -0,0 +1,465 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+This version of the api is deprecated in Grizzly and will be removed.
+
+It is provided just in case a third party manager is in use.
+"""
+
+import functools
+import inspect
+
+from nova.db import base
+from nova import exception
+from nova.network import model as network_model
+from nova.network import rpcapi as network_rpcapi
+from nova.openstack.common import log as logging
+from nova import policy
+
+LOG = logging.getLogger(__name__)
+
+
+def refresh_cache(f):
+ """
+ Decorator to update the instance_info_cache
+
+ Requires context and instance as function args
+ """
+ argspec = inspect.getargspec(f)
+
+ @functools.wraps(f)
+ def wrapper(self, context, *args, **kwargs):
+ res = f(self, context, *args, **kwargs)
+
+ try:
+ # get the instance from arguments (or raise ValueError)
+ instance = kwargs.get('instance')
+ if not instance:
+ instance = args[argspec.args.index('instance') - 2]
+ except ValueError:
+ msg = _('instance is a required argument to use @refresh_cache')
+ raise Exception(msg)
+
+ update_instance_cache_with_nw_info(self, context, instance,
+ nw_info=res)
+
+ # return the original function's return value
+ return res
+ return wrapper
+
+
+def update_instance_cache_with_nw_info(api, context, instance,
+ nw_info=None):
+
+ try:
+ if not isinstance(nw_info, network_model.NetworkInfo):
+ nw_info = None
+ if not nw_info:
+ nw_info = api._get_instance_nw_info(context, instance)
+ # update cache
+ cache = {'network_info': nw_info.json()}
+ api.db.instance_info_cache_update(context, instance['uuid'], cache)
+ except Exception:
+ LOG.exception(_('Failed storing info cache'), instance=instance)
+
+
+def wrap_check_policy(func):
+ """Check policy corresponding to the wrapped methods prior to execution."""
+
+ @functools.wraps(func)
+ def wrapped(self, context, *args, **kwargs):
+ action = func.__name__
+ check_policy(context, action)
+ return func(self, context, *args, **kwargs)
+
+ return wrapped
+
+
+def check_policy(context, action):
+ target = {
+ 'project_id': context.project_id,
+ 'user_id': context.user_id,
+ }
+ _action = 'network:%s' % action
+ policy.enforce(context, _action, target)
+
+
+class API(base.Base):
+ """API for doing networking via the nova-network network manager.
+
+ This is a pluggable module - other implementations do networking via
+ other services (such as Quantum).
+ """
+
+ _sentinel = object()
+
+ def __init__(self, **kwargs):
+ self.network_rpcapi = network_rpcapi.NetworkAPI()
+ super(API, self).__init__(**kwargs)
+
+ @wrap_check_policy
+ def get_all(self, context):
+ return self.network_rpcapi.get_all_networks(context)
+
+ @wrap_check_policy
+ def get(self, context, network_uuid):
+ return self.network_rpcapi.get_network(context, network_uuid)
+
+ @wrap_check_policy
+ def create(self, context, **kwargs):
+ return self.network_rpcapi.create_networks(context, **kwargs)
+
+ @wrap_check_policy
+ def delete(self, context, network_uuid):
+ return self.network_rpcapi.delete_network(context, network_uuid, None)
+
+ @wrap_check_policy
+ def disassociate(self, context, network_uuid):
+ return self.network_rpcapi.disassociate_network(context, network_uuid)
+
+ @wrap_check_policy
+ def get_fixed_ip(self, context, id):
+ return self.network_rpcapi.get_fixed_ip(context, id)
+
+ @wrap_check_policy
+ def get_fixed_ip_by_address(self, context, address):
+ return self.network_rpcapi.get_fixed_ip_by_address(context, address)
+
+ @wrap_check_policy
+ def get_floating_ip(self, context, id):
+ return self.network_rpcapi.get_floating_ip(context, id)
+
+ @wrap_check_policy
+ def get_floating_ip_pools(self, context):
+ return self.network_rpcapi.get_floating_ip_pools(context)
+
+ @wrap_check_policy
+ def get_floating_ip_by_address(self, context, address):
+ return self.network_rpcapi.get_floating_ip_by_address(context, address)
+
+ @wrap_check_policy
+ def get_floating_ips_by_project(self, context):
+ return self.network_rpcapi.get_floating_ips_by_project(context)
+
+ @wrap_check_policy
+ def get_floating_ips_by_fixed_address(self, context, fixed_address):
+ return self.network_rpcapi.get_floating_ips_by_fixed_address(context,
+ fixed_address)
+
+ @wrap_check_policy
+ def get_backdoor_port(self, context, host):
+ return self.network_rpcapi.get_backdoor_port(context, host)
+
+ @wrap_check_policy
+ def get_instance_id_by_floating_address(self, context, address):
+ # NOTE(tr3buchet): i hate this
+ return self.network_rpcapi.get_instance_id_by_floating_address(context,
+ address)
+
+ @wrap_check_policy
+ def get_vifs_by_instance(self, context, instance):
+ return self.network_rpcapi.get_vifs_by_instance(context,
+ instance['id'])
+
+ @wrap_check_policy
+ def get_vif_by_mac_address(self, context, mac_address):
+ return self.network_rpcapi.get_vif_by_mac_address(context, mac_address)
+
+ @wrap_check_policy
+ def allocate_floating_ip(self, context, pool=None):
+ """Adds (allocates) a floating ip to a project from a pool."""
+ # NOTE(vish): We don't know which network host should get the ip
+ # when we allocate, so just send it to any one. This
+ # will probably need to move into a network supervisor
+ # at some point.
+ return self.network_rpcapi.allocate_floating_ip(context,
+ context.project_id, pool, False)
+
+ @wrap_check_policy
+ def release_floating_ip(self, context, address,
+ affect_auto_assigned=False):
+ """Removes (deallocates) a floating ip with address from a project."""
+ return self.network_rpcapi.deallocate_floating_ip(context, address,
+ affect_auto_assigned)
+
+ @wrap_check_policy
+ @refresh_cache
+ def associate_floating_ip(self, context, instance,
+ floating_address, fixed_address,
+ affect_auto_assigned=False):
+ """Associates a floating ip with a fixed ip.
+
+ ensures floating ip is allocated to the project in context
+ """
+ orig_instance_uuid = self.network_rpcapi.associate_floating_ip(context,
+ floating_address, fixed_address, affect_auto_assigned)
+
+ if orig_instance_uuid:
+ msg_dict = dict(address=floating_address,
+ instance_id=orig_instance_uuid)
+ LOG.info(_('re-assign floating IP %(address)s from '
+ 'instance %(instance_id)s') % msg_dict)
+ orig_instance = self.db.instance_get_by_uuid(context,
+ orig_instance_uuid)
+
+ # purge cached nw info for the original instance
+ update_instance_cache_with_nw_info(self, context, orig_instance)
+
+ @wrap_check_policy
+ @refresh_cache
+ def disassociate_floating_ip(self, context, instance, address,
+ affect_auto_assigned=False):
+ """Disassociates a floating ip from fixed ip it is associated with."""
+ self.network_rpcapi.disassociate_floating_ip(context, address,
+ affect_auto_assigned)
+
+ @wrap_check_policy
+ @refresh_cache
+ def allocate_for_instance(self, context, instance, vpn,
+ requested_networks, macs=None):
+ """Allocates all network structures for an instance.
+
+ TODO(someone): document the rest of these parameters.
+
+ :param macs: None or a set of MAC addresses that the instance
+ should use. macs is supplied by the hypervisor driver (contrast
+ with requested_networks which is user supplied).
+ NB: macs is ignored by nova-network.
+ :returns: network info as from get_instance_nw_info() below
+ """
+ args = {}
+ args['vpn'] = vpn
+ args['requested_networks'] = requested_networks
+ args['instance_id'] = instance['id']
+ args['instance_uuid'] = instance['uuid']
+ args['project_id'] = instance['project_id']
+ args['host'] = instance['host']
+ args['rxtx_factor'] = instance['instance_type']['rxtx_factor']
+ nw_info = self.network_rpcapi.allocate_for_instance(context, **args)
+
+ return network_model.NetworkInfo.hydrate(nw_info)
+
+ @wrap_check_policy
+ def deallocate_for_instance(self, context, instance):
+ """Deallocates all network structures related to instance."""
+
+ args = {}
+ args['instance_id'] = instance['id']
+ args['project_id'] = instance['project_id']
+ args['host'] = instance['host']
+ self.network_rpcapi.deallocate_for_instance(context, **args)
+
+ @wrap_check_policy
+ @refresh_cache
+ def add_fixed_ip_to_instance(self, context, instance, network_id):
+ """Adds a fixed ip to instance from specified network."""
+ args = {'instance_id': instance['uuid'],
+ 'host': instance['host'],
+ 'network_id': network_id}
+ self.network_rpcapi.add_fixed_ip_to_instance(context, **args)
+
+ @wrap_check_policy
+ @refresh_cache
+ def remove_fixed_ip_from_instance(self, context, instance, address):
+ """Removes a fixed ip from instance from specified network."""
+
+ args = {'instance_id': instance['uuid'],
+ 'host': instance['host'],
+ 'address': address}
+ self.network_rpcapi.remove_fixed_ip_from_instance(context, **args)
+
+ @wrap_check_policy
+ def add_network_to_project(self, context, project_id, network_uuid=None):
+ """Force adds another network to a project."""
+ self.network_rpcapi.add_network_to_project(context, project_id,
+ network_uuid)
+
+ @wrap_check_policy
+ def associate(self, context, network_uuid, host=_sentinel,
+ project=_sentinel):
+ """Associate or disassociate host or project to network."""
+ associations = {}
+ if host is not API._sentinel:
+ associations['host'] = host
+ if project is not API._sentinel:
+ associations['project'] = project
+ self.network_rpcapi.associate(context, network_uuid, associations)
+
+ @wrap_check_policy
+ def get_instance_nw_info(self, context, instance, update_cache=True):
+ """Returns all network info related to an instance."""
+ result = self._get_instance_nw_info(context, instance)
+ if update_cache:
+ update_instance_cache_with_nw_info(self, context, instance,
+ result)
+ return result
+
+ def _get_instance_nw_info(self, context, instance):
+ """Returns all network info related to an instance."""
+ args = {'instance_id': instance['id'],
+ 'instance_uuid': instance['uuid'],
+ 'rxtx_factor': instance['instance_type']['rxtx_factor'],
+ 'host': instance['host'],
+ 'project_id': instance['project_id']}
+ nw_info = self.network_rpcapi.get_instance_nw_info(context, **args)
+
+ return network_model.NetworkInfo.hydrate(nw_info)
+
+ @wrap_check_policy
+ def validate_networks(self, context, requested_networks):
+ """validate the networks passed at the time of creating
+ the server
+ """
+ return self.network_rpcapi.validate_networks(context,
+ requested_networks)
+
+ @wrap_check_policy
+ def get_instance_uuids_by_ip_filter(self, context, filters):
+ """Returns a list of dicts in the form of
+ {'instance_uuid': uuid, 'ip': ip} that matched the ip_filter
+ """
+ return self.network_rpcapi.get_instance_uuids_by_ip_filter(context,
+ filters)
+
+ @wrap_check_policy
+ def get_dns_domains(self, context):
+ """Returns a list of available dns domains.
+ These can be used to create DNS entries for floating ips.
+ """
+ return self.network_rpcapi.get_dns_domains(context)
+
+ @wrap_check_policy
+ def add_dns_entry(self, context, address, name, dns_type, domain):
+ """Create specified DNS entry for address."""
+ args = {'address': address,
+ 'name': name,
+ 'dns_type': dns_type,
+ 'domain': domain}
+ return self.network_rpcapi.add_dns_entry(context, **args)
+
+ @wrap_check_policy
+ def modify_dns_entry(self, context, name, address, domain):
+ """Create specified DNS entry for address."""
+ args = {'address': address,
+ 'name': name,
+ 'domain': domain}
+ return self.network_rpcapi.modify_dns_entry(context, **args)
+
+ @wrap_check_policy
+ def delete_dns_entry(self, context, name, domain):
+ """Delete the specified dns entry."""
+ args = {'name': name, 'domain': domain}
+ return self.network_rpcapi.delete_dns_entry(context, **args)
+
+ @wrap_check_policy
+ def delete_dns_domain(self, context, domain):
+ """Delete the specified dns domain."""
+ return self.network_rpcapi.delete_dns_domain(context, domain=domain)
+
+ @wrap_check_policy
+ def get_dns_entries_by_address(self, context, address, domain):
+ """Get entries for address and domain."""
+ args = {'address': address, 'domain': domain}
+ return self.network_rpcapi.get_dns_entries_by_address(context, **args)
+
+ @wrap_check_policy
+ def get_dns_entries_by_name(self, context, name, domain):
+ """Get entries for name and domain."""
+ args = {'name': name, 'domain': domain}
+ return self.network_rpcapi.get_dns_entries_by_name(context, **args)
+
+ @wrap_check_policy
+ def create_private_dns_domain(self, context, domain, availability_zone):
+ """Create a private DNS domain with nova availability zone."""
+ args = {'domain': domain, 'av_zone': availability_zone}
+ return self.network_rpcapi.create_private_dns_domain(context, **args)
+
+ @wrap_check_policy
+ def create_public_dns_domain(self, context, domain, project=None):
+ """Create a public DNS domain with optional nova project."""
+ args = {'domain': domain, 'project': project}
+ return self.network_rpcapi.create_public_dns_domain(context, **args)
+
+ @wrap_check_policy
+ def setup_networks_on_host(self, context, instance, host=None,
+ teardown=False):
+ """Setup or teardown the network structures on hosts related to
+ instance"""
+ host = host or instance['host']
+ # NOTE(tr3buchet): host is passed in cases where we need to setup
+ # or teardown the networks on a host which has been migrated to/from
+ # and instance['host'] is not yet or is no longer equal to
+ args = {'instance_id': instance['id'],
+ 'host': host,
+ 'teardown': teardown}
+
+ self.network_rpcapi.setup_networks_on_host(context, **args)
+
+ def _is_multi_host(self, context, instance):
+ try:
+ fixed_ips = self.db.fixed_ip_get_by_instance(context,
+ instance['uuid'])
+ except exception.FixedIpNotFoundForInstance:
+ return False
+ network = self.db.network_get(context, fixed_ips[0]['network_id'],
+ project_only='allow_none')
+ return network['multi_host']
+
+ def _get_floating_ip_addresses(self, context, instance):
+ floating_ips = self.db.instance_floating_address_get_all(context,
+ instance['uuid'])
+ return [floating_ip['address'] for floating_ip in floating_ips]
+
+ @wrap_check_policy
+ def migrate_instance_start(self, context, instance, migration):
+ """Start to migrate the network of an instance."""
+ args = dict(
+ instance_uuid=instance['uuid'],
+ rxtx_factor=instance['instance_type']['rxtx_factor'],
+ project_id=instance['project_id'],
+ source_compute=migration['source_compute'],
+ dest_compute=migration['dest_compute'],
+ floating_addresses=None,
+ )
+
+ if self._is_multi_host(context, instance):
+ args['floating_addresses'] = \
+ self._get_floating_ip_addresses(context, instance)
+ args['host'] = migration['source_compute']
+
+ self.network_rpcapi.migrate_instance_start(context, **args)
+
+ @wrap_check_policy
+ def migrate_instance_finish(self, context, instance, migration):
+ """Finish migrating the network of an instance."""
+ args = dict(
+ instance_uuid=instance['uuid'],
+ rxtx_factor=instance['instance_type']['rxtx_factor'],
+ project_id=instance['project_id'],
+ source_compute=migration['source_compute'],
+ dest_compute=migration['dest_compute'],
+ floating_addresses=None,
+ )
+
+ if self._is_multi_host(context, instance):
+ args['floating_addresses'] = \
+ self._get_floating_ip_addresses(context, instance)
+ args['host'] = migration['dest_compute']
+
+ self.network_rpcapi.migrate_instance_finish(context, **args)
diff --git a/nova/network/floating_ips.py b/nova/network/floating_ips.py
new file mode 100644
index 000000000..4b0a66492
--- /dev/null
+++ b/nova/network/floating_ips.py
@@ -0,0 +1,691 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import context
+from nova.db import base
+from nova import exception
+from nova.network import rpcapi as network_rpcapi
+from nova.openstack.common import cfg
+from nova.openstack.common import excutils
+from nova.openstack.common import importutils
+from nova.openstack.common import lockutils
+from nova.openstack.common import log as logging
+from nova.openstack.common.notifier import api as notifier
+from nova.openstack.common.rpc import common as rpc_common
+from nova import quota
+from nova import servicegroup
+
+LOG = logging.getLogger(__name__)
+
+QUOTAS = quota.QUOTAS
+
+floating_opts = [
+ cfg.StrOpt('default_floating_pool',
+ default='nova',
+ help='Default pool for floating ips'),
+ cfg.BoolOpt('auto_assign_floating_ip',
+ default=False,
+ help='Autoassigning floating ip to VM'),
+ cfg.StrOpt('floating_ip_dns_manager',
+ default='nova.network.noop_dns_driver.NoopDNSDriver',
+ help='full class name for the DNS Manager for floating IPs'),
+ cfg.StrOpt('instance_dns_manager',
+ default='nova.network.noop_dns_driver.NoopDNSDriver',
+ help='full class name for the DNS Manager for instance IPs'),
+ cfg.StrOpt('instance_dns_domain',
+ default='',
+ help='full class name for the DNS Zone for instance IPs'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(floating_opts)
+CONF.import_opt('public_interface', 'nova.network.linux_net')
+CONF.import_opt('network_topic', 'nova.network.rpcapi')
+
+
+class FloatingIP(object):
+ """Mixin class for adding floating IP functionality to a manager."""
+
+ servicegroup_api = None
+
+ def init_host_floating_ips(self):
+ """Configures floating ips owned by host."""
+
+ admin_context = context.get_admin_context()
+ try:
+ floating_ips = self.db.floating_ip_get_all_by_host(admin_context,
+ self.host)
+ except exception.NotFound:
+ return
+
+ for floating_ip in floating_ips:
+ fixed_ip_id = floating_ip.get('fixed_ip_id')
+ if fixed_ip_id:
+ try:
+ fixed_ip = self.db.fixed_ip_get(admin_context,
+ fixed_ip_id,
+ get_network=True)
+ except exception.FixedIpNotFound:
+ msg = _('Fixed ip %(fixed_ip_id)s not found') % locals()
+ LOG.debug(msg)
+ continue
+ interface = CONF.public_interface or floating_ip['interface']
+ try:
+ self.l3driver.add_floating_ip(floating_ip['address'],
+ fixed_ip['address'],
+ interface,
+ fixed_ip['network'])
+ except exception.ProcessExecutionError:
+ LOG.debug(_('Interface %(interface)s not found'), locals())
+ raise exception.NoFloatingIpInterface(interface=interface)
+
+ def allocate_for_instance(self, context, **kwargs):
+ """Handles allocating the floating IP resources for an instance.
+
+ calls super class allocate_for_instance() as well
+
+ rpc.called by network_api
+ """
+ instance_id = kwargs.get('instance_id')
+ instance_uuid = kwargs.get('instance_uuid')
+ project_id = kwargs.get('project_id')
+ requested_networks = kwargs.get('requested_networks')
+ # call the next inherited class's allocate_for_instance()
+ # which is currently the NetworkManager version
+ # do this first so fixed ip is already allocated
+ nw_info = super(FloatingIP, self).allocate_for_instance(context,
+ **kwargs)
+ if CONF.auto_assign_floating_ip:
+ # allocate a floating ip
+ floating_address = self.allocate_floating_ip(context, project_id,
+ True)
+ LOG.debug(_("floating IP allocation for instance "
+ "|%(floating_address)s|") % locals(),
+ instance_uuid=instance_uuid, context=context)
+ # set auto_assigned column to true for the floating ip
+ self.db.floating_ip_set_auto_assigned(context, floating_address)
+
+ # get the first fixed address belonging to the instance
+ fixed_ips = nw_info.fixed_ips()
+ fixed_address = fixed_ips[0]['address']
+
+ # associate the floating ip to fixed_ip
+ self.associate_floating_ip(context,
+ floating_address,
+ fixed_address,
+ affect_auto_assigned=True)
+
+ # create a fresh set of network info that contains the floating ip
+ nw_info = self.get_instance_nw_info(context, **kwargs)
+
+ return nw_info
+
+ def deallocate_for_instance(self, context, **kwargs):
+ """Handles deallocating floating IP resources for an instance.
+
+ calls super class deallocate_for_instance() as well.
+
+ rpc.called by network_api
+ """
+ instance_id = kwargs.get('instance_id')
+
+ # NOTE(francois.charlier): in some cases the instance might be
+ # deleted before the IPs are released, so we need to get deleted
+ # instances too
+ instance = self.db.instance_get(
+ context.elevated(read_deleted='yes'), instance_id)
+
+ try:
+ fixed_ips = self.db.fixed_ip_get_by_instance(context,
+ instance['uuid'])
+ except exception.FixedIpNotFoundForInstance:
+ fixed_ips = []
+ # add to kwargs so we can pass to super to save a db lookup there
+ kwargs['fixed_ips'] = fixed_ips
+ for fixed_ip in fixed_ips:
+ fixed_id = fixed_ip['id']
+ floating_ips = self.db.floating_ip_get_by_fixed_ip_id(context,
+ fixed_id)
+ # disassociate floating ips related to fixed_ip
+ for floating_ip in floating_ips:
+ address = floating_ip['address']
+ try:
+ self.disassociate_floating_ip(context,
+ address,
+ affect_auto_assigned=True)
+ except exception.FloatingIpNotAssociated:
+ LOG.exception(_("Floating IP is not associated. Ignore."))
+ # deallocate if auto_assigned
+ if floating_ip['auto_assigned']:
+ self.deallocate_floating_ip(context, address,
+ affect_auto_assigned=True)
+
+ # call the next inherited class's deallocate_for_instance()
+ # which is currently the NetworkManager version
+ # call this after so floating IPs are handled first
+ super(FloatingIP, self).deallocate_for_instance(context, **kwargs)
+
+ def _floating_ip_owned_by_project(self, context, floating_ip):
+ """Raises if floating ip does not belong to project."""
+ if context.is_admin:
+ return
+
+ if floating_ip['project_id'] != context.project_id:
+ if floating_ip['project_id'] is None:
+ LOG.warn(_('Address |%(address)s| is not allocated'),
+ {'address': floating_ip['address']})
+ raise exception.NotAuthorized()
+ else:
+ LOG.warn(_('Address |%(address)s| is not allocated to your '
+ 'project |%(project)s|'),
+ {'address': floating_ip['address'],
+ 'project': context.project_id})
+ raise exception.NotAuthorized()
+
+ def allocate_floating_ip(self, context, project_id, auto_assigned=False,
+ pool=None):
+ """Gets a floating ip from the pool."""
+ # NOTE(tr3buchet): all network hosts in zone now use the same pool
+ pool = pool or CONF.default_floating_pool
+ use_quota = not auto_assigned
+
+ # Check the quota; can't put this in the API because we get
+ # called into from other places
+ try:
+ if use_quota:
+ reservations = QUOTAS.reserve(context, floating_ips=1)
+ except exception.OverQuota:
+ pid = context.project_id
+ LOG.warn(_("Quota exceeded for %(pid)s, tried to allocate "
+ "floating IP") % locals())
+ raise exception.FloatingIpLimitExceeded()
+
+ try:
+ floating_ip = self.db.floating_ip_allocate_address(context,
+ project_id,
+ pool)
+ payload = dict(project_id=project_id, floating_ip=floating_ip)
+ notifier.notify(context,
+ notifier.publisher_id("network"),
+ 'network.floating_ip.allocate',
+ notifier.INFO, payload)
+
+ # Commit the reservations
+ if use_quota:
+ QUOTAS.commit(context, reservations)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ if use_quota:
+ QUOTAS.rollback(context, reservations)
+
+ return floating_ip
+
+ @rpc_common.client_exceptions(exception.FloatingIpNotFoundForAddress)
+ def deallocate_floating_ip(self, context, address,
+ affect_auto_assigned=False):
+ """Returns a floating ip to the pool."""
+ floating_ip = self.db.floating_ip_get_by_address(context, address)
+
+ # handle auto_assigned
+ if not affect_auto_assigned and floating_ip.get('auto_assigned'):
+ return
+ use_quota = not floating_ip.get('auto_assigned')
+
+ # make sure project owns this floating ip (allocated)
+ self._floating_ip_owned_by_project(context, floating_ip)
+
+ # make sure floating ip is not associated
+ if floating_ip['fixed_ip_id']:
+ floating_address = floating_ip['address']
+ raise exception.FloatingIpAssociated(address=floating_address)
+
+ # clean up any associated DNS entries
+ self._delete_all_entries_for_ip(context,
+ floating_ip['address'])
+ payload = dict(project_id=floating_ip['project_id'],
+ floating_ip=floating_ip['address'])
+ notifier.notify(context,
+ notifier.publisher_id("network"),
+ 'network.floating_ip.deallocate',
+ notifier.INFO, payload=payload)
+
+ # Get reservations...
+ try:
+ if use_quota:
+ reservations = QUOTAS.reserve(context, floating_ips=-1)
+ else:
+ reservations = None
+ except Exception:
+ reservations = None
+ LOG.exception(_("Failed to update usages deallocating "
+ "floating IP"))
+
+ self.db.floating_ip_deallocate(context, address)
+
+ # Commit the reservations
+ if reservations:
+ QUOTAS.commit(context, reservations)
+
+ @rpc_common.client_exceptions(exception.FloatingIpNotFoundForAddress)
+ def associate_floating_ip(self, context, floating_address, fixed_address,
+ affect_auto_assigned=False):
+ """Associates a floating ip with a fixed ip.
+
+ Makes sure everything makes sense then calls _associate_floating_ip,
+ rpc'ing to correct host if i'm not it.
+
+ Access to the floating_address is verified but access to the
+ fixed_address is not verified. This assumes that that the calling
+ side has already verified that the fixed_address is legal by
+ checking access to the instance.
+ """
+ floating_ip = self.db.floating_ip_get_by_address(context,
+ floating_address)
+ # handle auto_assigned
+ if not affect_auto_assigned and floating_ip.get('auto_assigned'):
+ return
+
+ # make sure project owns this floating ip (allocated)
+ self._floating_ip_owned_by_project(context, floating_ip)
+
+ # disassociate any already associated
+ orig_instance_uuid = None
+ if floating_ip['fixed_ip_id']:
+ # find previously associated instance
+ fixed_ip = self.db.fixed_ip_get(context,
+ floating_ip['fixed_ip_id'])
+ if fixed_ip['address'] == fixed_address:
+ # NOTE(vish): already associated to this address
+ return
+ orig_instance_uuid = fixed_ip['instance_uuid']
+
+ self.disassociate_floating_ip(context, floating_address)
+
+ fixed_ip = self.db.fixed_ip_get_by_address(context, fixed_address)
+
+ # send to correct host, unless i'm the correct host
+ network = self.db.network_get(context.elevated(),
+ fixed_ip['network_id'])
+ if network['multi_host']:
+ instance = self.db.instance_get_by_uuid(context,
+ fixed_ip['instance_uuid'])
+ host = instance['host']
+ else:
+ host = network['host']
+
+ interface = floating_ip.get('interface')
+ if host == self.host:
+ # i'm the correct host
+ self._associate_floating_ip(context, floating_address,
+ fixed_address, interface,
+ fixed_ip['instance_uuid'])
+ else:
+ # send to correct host
+ self.network_rpcapi._associate_floating_ip(context,
+ floating_address, fixed_address, interface, host,
+ fixed_ip['instance_uuid'])
+
+ return orig_instance_uuid
+
+ def _associate_floating_ip(self, context, floating_address, fixed_address,
+ interface, instance_uuid):
+ """Performs db and driver calls to associate floating ip & fixed ip."""
+ interface = CONF.public_interface or interface
+
+ @lockutils.synchronized(unicode(floating_address), 'nova-')
+ def do_associate():
+ # associate floating ip
+ fixed = self.db.floating_ip_fixed_ip_associate(context,
+ floating_address,
+ fixed_address,
+ self.host)
+ if not fixed:
+ # NOTE(vish): ip was already associated
+ return
+ try:
+ # gogo driver time
+ self.l3driver.add_floating_ip(floating_address, fixed_address,
+ interface, fixed['network'])
+ except exception.ProcessExecutionError as e:
+ self.db.floating_ip_disassociate(context, floating_address)
+ if "Cannot find device" in str(e):
+ LOG.error(_('Interface %(interface)s not found'), locals())
+ raise exception.NoFloatingIpInterface(interface=interface)
+
+ payload = dict(project_id=context.project_id,
+ instance_id=instance_uuid,
+ floating_ip=floating_address)
+ notifier.notify(context,
+ notifier.publisher_id("network"),
+ 'network.floating_ip.associate',
+ notifier.INFO, payload=payload)
+ do_associate()
+
+ @rpc_common.client_exceptions(exception.FloatingIpNotFoundForAddress)
+ def disassociate_floating_ip(self, context, address,
+ affect_auto_assigned=False):
+ """Disassociates a floating ip from its fixed ip.
+
+ Makes sure everything makes sense then calls _disassociate_floating_ip,
+ rpc'ing to correct host if i'm not it.
+ """
+ floating_ip = self.db.floating_ip_get_by_address(context, address)
+
+ # handle auto assigned
+ if not affect_auto_assigned and floating_ip.get('auto_assigned'):
+ raise exception.CannotDisassociateAutoAssignedFloatingIP()
+
+ # make sure project owns this floating ip (allocated)
+ self._floating_ip_owned_by_project(context, floating_ip)
+
+ # make sure floating ip is associated
+ if not floating_ip.get('fixed_ip_id'):
+ floating_address = floating_ip['address']
+ raise exception.FloatingIpNotAssociated(address=floating_address)
+
+ fixed_ip = self.db.fixed_ip_get(context, floating_ip['fixed_ip_id'])
+
+ # send to correct host, unless i'm the correct host
+ network = self.db.network_get(context.elevated(),
+ fixed_ip['network_id'])
+ interface = floating_ip.get('interface')
+ if network['multi_host']:
+ instance = self.db.instance_get_by_uuid(context,
+ fixed_ip['instance_uuid'])
+ service = self.db.service_get_by_host_and_topic(
+ context.elevated(), instance['host'], CONF.network_topic)
+ if service and self.servicegroup_api.service_is_up(service):
+ host = instance['host']
+ else:
+ # NOTE(vish): if the service is down just deallocate the data
+ # locally. Set the host to local so the call will
+ # not go over rpc and set interface to None so the
+ # teardown in the driver does not happen.
+ host = self.host
+ interface = None
+ else:
+ host = network['host']
+
+ if host == self.host:
+ # i'm the correct host
+ self._disassociate_floating_ip(context, address, interface,
+ fixed_ip['instance_uuid'])
+ else:
+ # send to correct host
+ self.network_rpcapi._disassociate_floating_ip(context, address,
+ interface, host, fixed_ip['instance_uuid'])
+
+ def _disassociate_floating_ip(self, context, address, interface,
+ instance_uuid):
+ """Performs db and driver calls to disassociate floating ip."""
+ interface = CONF.public_interface or interface
+
+ @lockutils.synchronized(unicode(address), 'nova-')
+ def do_disassociate():
+ # NOTE(vish): Note that we are disassociating in the db before we
+ # actually remove the ip address on the host. We are
+ # safe from races on this host due to the decorator,
+ # but another host might grab the ip right away. We
+ # don't worry about this case because the minuscule
+ # window where the ip is on both hosts shouldn't cause
+ # any problems.
+ fixed = self.db.floating_ip_disassociate(context, address)
+
+ if not fixed:
+ # NOTE(vish): ip was already disassociated
+ return
+ if interface:
+ # go go driver time
+ self.l3driver.remove_floating_ip(address, fixed['address'],
+ interface, fixed['network'])
+ payload = dict(project_id=context.project_id,
+ instance_id=instance_uuid,
+ floating_ip=address)
+ notifier.notify(context,
+ notifier.publisher_id("network"),
+ 'network.floating_ip.disassociate',
+ notifier.INFO, payload=payload)
+ do_disassociate()
+
+ @rpc_common.client_exceptions(exception.FloatingIpNotFound)
+ def get_floating_ip(self, context, id):
+ """Returns a floating IP as a dict."""
+ # NOTE(vish): This is no longer used but can't be removed until
+ # we major version the network_rpcapi.
+ return dict(self.db.floating_ip_get(context, id).iteritems())
+
+ def get_floating_pools(self, context):
+ """Returns list of floating pools."""
+ # NOTE(maurosr) This method should be removed in future, replaced by
+ # get_floating_ip_pools. See bug #1091668
+ return self.get_floating_ip_pools(context)
+
+ def get_floating_ip_pools(self, context):
+ """Returns list of floating ip pools."""
+ # NOTE(vish): This is no longer used but can't be removed until
+ # we major version the network_rpcapi.
+ pools = self.db.floating_ip_get_pools(context)
+ return [dict(pool.iteritems()) for pool in pools]
+
+ def get_floating_ip_by_address(self, context, address):
+ """Returns a floating IP as a dict."""
+ # NOTE(vish): This is no longer used but can't be removed until
+ # we major version the network_rpcapi.
+ return dict(self.db.floating_ip_get_by_address(context,
+ address).iteritems())
+
+ def get_floating_ips_by_project(self, context):
+ """Returns the floating IPs allocated to a project."""
+ # NOTE(vish): This is no longer used but can't be removed until
+ # we major version the network_rpcapi.
+ ips = self.db.floating_ip_get_all_by_project(context,
+ context.project_id)
+ return [dict(ip.iteritems()) for ip in ips]
+
+ def get_floating_ips_by_fixed_address(self, context, fixed_address):
+ """Returns the floating IPs associated with a fixed_address."""
+ # NOTE(vish): This is no longer used but can't be removed until
+ # we major version the network_rpcapi.
+ floating_ips = self.db.floating_ip_get_by_fixed_address(context,
+ fixed_address)
+ return [floating_ip['address'] for floating_ip in floating_ips]
+
+ def _is_stale_floating_ip_address(self, context, floating_ip):
+ try:
+ self._floating_ip_owned_by_project(context, floating_ip)
+ except exception.NotAuthorized:
+ return True
+ return False if floating_ip.get('fixed_ip_id') else True
+
+ def migrate_instance_start(self, context, instance_uuid,
+ floating_addresses,
+ rxtx_factor=None, project_id=None,
+ source=None, dest=None):
+ # We only care if floating_addresses are provided and we're
+ # switching hosts
+ if not floating_addresses or (source and source == dest):
+ return
+
+ LOG.info(_("Starting migration network for instance"
+ " %(instance_uuid)s"), locals())
+ for address in floating_addresses:
+ floating_ip = self.db.floating_ip_get_by_address(context,
+ address)
+
+ if self._is_stale_floating_ip_address(context, floating_ip):
+ LOG.warn(_("Floating ip address |%(address)s| no longer "
+ "belongs to instance %(instance_uuid)s. Will not"
+ "migrate it "), locals())
+ continue
+
+ interface = CONF.public_interface or floating_ip['interface']
+ fixed_ip = self.db.fixed_ip_get(context,
+ floating_ip['fixed_ip_id'],
+ get_network=True)
+ self.l3driver.remove_floating_ip(floating_ip['address'],
+ fixed_ip['address'],
+ interface,
+ fixed_ip['network'])
+
+ # NOTE(wenjianhn): Make this address will not be bound to public
+ # interface when restarts nova-network on dest compute node
+ self.db.floating_ip_update(context,
+ floating_ip['address'],
+ {'host': None})
+
+ def migrate_instance_finish(self, context, instance_uuid,
+ floating_addresses, host=None,
+ rxtx_factor=None, project_id=None,
+ source=None, dest=None):
+ # We only care if floating_addresses are provided and we're
+ # switching hosts
+ if host and not dest:
+ dest = host
+ if not floating_addresses or (source and source == dest):
+ return
+
+ LOG.info(_("Finishing migration network for instance"
+ " %(instance_uuid)s"), locals())
+
+ for address in floating_addresses:
+ floating_ip = self.db.floating_ip_get_by_address(context,
+ address)
+
+ if self._is_stale_floating_ip_address(context, floating_ip):
+ LOG.warn(_("Floating ip address |%(address)s| no longer "
+ "belongs to instance %(instance_uuid)s. Will not"
+ "setup it."), locals())
+ continue
+
+ self.db.floating_ip_update(context,
+ floating_ip['address'],
+ {'host': dest})
+
+ interface = CONF.public_interface or floating_ip['interface']
+ fixed_ip = self.db.fixed_ip_get(context,
+ floating_ip['fixed_ip_id'],
+ get_network=True)
+ self.l3driver.add_floating_ip(floating_ip['address'],
+ fixed_ip['address'],
+ interface,
+ fixed_ip['network'])
+
+ def _prepare_domain_entry(self, context, domain):
+ domainref = self.db.dnsdomain_get(context, domain)
+ scope = domainref['scope']
+ if scope == 'private':
+ av_zone = domainref['availability_zone']
+ this_domain = {'domain': domain,
+ 'scope': scope,
+ 'availability_zone': av_zone}
+ else:
+ project = domainref['project_id']
+ this_domain = {'domain': domain,
+ 'scope': scope,
+ 'project': project}
+ return this_domain
+
+ def get_dns_domains(self, context):
+ domains = []
+
+ db_domain_list = self.db.dnsdomain_list(context)
+ floating_driver_domain_list = self.floating_dns_manager.get_domains()
+ instance_driver_domain_list = self.instance_dns_manager.get_domains()
+
+ for db_domain in db_domain_list:
+ if (db_domain in floating_driver_domain_list or
+ db_domain in instance_driver_domain_list):
+ domain_entry = self._prepare_domain_entry(context,
+ db_domain)
+ if domain_entry:
+ domains.append(domain_entry)
+ else:
+ LOG.warn(_('Database inconsistency: DNS domain |%s| is '
+ 'registered in the Nova db but not visible to '
+ 'either the floating or instance DNS driver. It '
+ 'will be ignored.'), db_domain)
+
+ return domains
+
+ def add_dns_entry(self, context, address, name, dns_type, domain):
+ self.floating_dns_manager.create_entry(name, address,
+ dns_type, domain)
+
+ def modify_dns_entry(self, context, address, name, domain):
+ self.floating_dns_manager.modify_address(name, address,
+ domain)
+
+ def delete_dns_entry(self, context, name, domain):
+ self.floating_dns_manager.delete_entry(name, domain)
+
+ def _delete_all_entries_for_ip(self, context, address):
+ domain_list = self.get_dns_domains(context)
+ for domain in domain_list:
+ names = self.get_dns_entries_by_address(context,
+ address,
+ domain['domain'])
+ for name in names:
+ self.delete_dns_entry(context, name, domain['domain'])
+
+ def get_dns_entries_by_address(self, context, address, domain):
+ return self.floating_dns_manager.get_entries_by_address(address,
+ domain)
+
+ def get_dns_entries_by_name(self, context, name, domain):
+ return self.floating_dns_manager.get_entries_by_name(name,
+ domain)
+
+ def create_private_dns_domain(self, context, domain, av_zone):
+ self.db.dnsdomain_register_for_zone(context, domain, av_zone)
+ try:
+ self.instance_dns_manager.create_domain(domain)
+ except exception.FloatingIpDNSExists:
+ LOG.warn(_('Domain |%(domain)s| already exists, '
+ 'changing zone to |%(av_zone)s|.'),
+ {'domain': domain, 'av_zone': av_zone})
+
+ def create_public_dns_domain(self, context, domain, project):
+ self.db.dnsdomain_register_for_project(context, domain, project)
+ try:
+ self.floating_dns_manager.create_domain(domain)
+ except exception.FloatingIpDNSExists:
+ LOG.warn(_('Domain |%(domain)s| already exists, '
+ 'changing project to |%(project)s|.'),
+ {'domain': domain, 'project': project})
+
+ def delete_dns_domain(self, context, domain):
+ self.db.dnsdomain_unregister(context, domain)
+ self.floating_dns_manager.delete_domain(domain)
+
+ def _get_project_for_domain(self, context, domain):
+ return self.db.dnsdomain_project(context, domain)
+
+
+class LocalManager(base.Base, FloatingIP):
+ def __init__(self):
+ super(LocalManager, self).__init__()
+ # NOTE(vish): setting the host to none ensures that the actual
+ # l3driver commands for l3 are done via rpc.
+ self.host = None
+ self.servicegroup_api = servicegroup.API()
+ self.network_rpcapi = network_rpcapi.NetworkAPI()
+ self.floating_dns_manager = importutils.import_object(
+ CONF.floating_ip_dns_manager)
+ self.instance_dns_manager = importutils.import_object(
+ CONF.instance_dns_manager)
diff --git a/nova/network/l3.py b/nova/network/l3.py
index baf77c112..9ca6b6a43 100644
--- a/nova/network/l3.py
+++ b/nova/network/l3.py
@@ -48,13 +48,16 @@ class L3Driver(object):
""":returns: True/False (whether the driver is initialized)."""
raise NotImplementedError()
- def add_floating_ip(self, floating_ip, fixed_ip, l3_interface_id):
+ def add_floating_ip(self, floating_ip, fixed_ip, l3_interface_id,
+ network=None):
"""Add a floating IP bound to the fixed IP with an optional
l3_interface_id. Some drivers won't care about the
- l3_interface_id so just pass None in that case"""
+ l3_interface_id so just pass None in that case. Network
+ is also an optional parameter."""
raise NotImplementedError()
- def remove_floating_ip(self, floating_ip, fixed_ip, l3_interface_id):
+ def remove_floating_ip(self, floating_ip, fixed_ip, l3_interface_id,
+ network=None):
raise NotImplementedError()
def add_vpn(self, public_ip, port, private_ip):
@@ -96,15 +99,17 @@ class LinuxNetL3(L3Driver):
def remove_gateway(self, network_ref):
linux_net.unplug(network_ref)
- def add_floating_ip(self, floating_ip, fixed_ip, l3_interface_id):
- linux_net.bind_floating_ip(floating_ip, l3_interface_id)
+ def add_floating_ip(self, floating_ip, fixed_ip, l3_interface_id,
+ network=None):
linux_net.ensure_floating_forward(floating_ip, fixed_ip,
- l3_interface_id)
+ l3_interface_id, network)
+ linux_net.bind_floating_ip(floating_ip, l3_interface_id)
- def remove_floating_ip(self, floating_ip, fixed_ip, l3_interface_id):
+ def remove_floating_ip(self, floating_ip, fixed_ip, l3_interface_id,
+ network=None):
linux_net.unbind_floating_ip(floating_ip, l3_interface_id)
linux_net.remove_floating_forward(floating_ip, fixed_ip,
- l3_interface_id)
+ l3_interface_id, network)
def add_vpn(self, public_ip, port, private_ip):
linux_net.ensure_vpn_forward(public_ip, port, private_ip)
@@ -120,8 +125,8 @@ class LinuxNetL3(L3Driver):
class NullL3(L3Driver):
"""The L3 driver that doesn't do anything. This class can be used when
- nova-network shuld not manipulate L3 forwarding at all (e.g., in a Flat
- or FlatDHCP scenario"""
+ nova-network should not manipulate L3 forwarding at all (e.g., in a Flat
+ or FlatDHCP scenario)."""
def __init__(self):
pass
@@ -140,10 +145,12 @@ class NullL3(L3Driver):
def remove_gateway(self, network_ref):
pass
- def add_floating_ip(self, floating_ip, fixed_ip, l3_interface_id):
+ def add_floating_ip(self, floating_ip, fixed_ip, l3_interface_id,
+ network=None):
pass
- def remove_floating_ip(self, floating_ip, fixed_ip, l3_interface_id):
+ def remove_floating_ip(self, floating_ip, fixed_ip, l3_interface_id,
+ network=None):
pass
def add_vpn(self, public_ip, port, private_ip):
diff --git a/nova/network/ldapdns.py b/nova/network/ldapdns.py
index c5ae62899..680b2f435 100644
--- a/nova/network/ldapdns.py
+++ b/nova/network/ldapdns.py
@@ -33,7 +33,8 @@ ldap_dns_opts = [
help='user for ldap DNS'),
cfg.StrOpt('ldap_dns_password',
default='password',
- help='password for ldap DNS'),
+ help='password for ldap DNS',
+ secret=True),
cfg.StrOpt('ldap_dns_soa_hostmaster',
default='hostmaster@example.org',
help='Hostmaster for ldap dns driver Statement of Authority'),
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index a9b44e94a..08a2ae354 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -31,6 +31,7 @@ from nova.openstack.common import fileutils
from nova.openstack.common import importutils
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
+from nova.openstack.common import timeutils
from nova import paths
from nova import utils
@@ -85,6 +86,11 @@ linux_net_opts = [
default=False,
help='Use single default gateway. Only first nic of vm will '
'get default gateway from dhcp server'),
+ cfg.MultiStrOpt('forward_bridge_interface',
+ default=['all'],
+ help='An interface that bridges can forward to. If this '
+ 'is set to all then all traffic will be forwarded. '
+ 'Can be specified multiple times.'),
cfg.StrOpt('metadata_host',
default='$my_ip',
help='the ip for the metadata api server'),
@@ -378,7 +384,7 @@ class IptablesManager(object):
for table in tables:
start, end = self._find_table(all_lines, table)
all_lines[start:end] = self._modify_rules(
- all_lines[start:end], tables[table])
+ all_lines[start:end], tables[table], table_name=table)
self.execute('%s-restore' % (cmd,), '-c', run_as_root=True,
process_input='\n'.join(all_lines),
attempts=5)
@@ -392,18 +398,24 @@ class IptablesManager(object):
start = lines.index('*%s' % table_name) - 1
except ValueError:
# Couldn't find table_name
- # For Unit Tests
return (0, 0)
end = lines[start:].index('COMMIT') + start + 2
return (start, end)
- def _modify_rules(self, current_lines, table, binary=None):
+ def _modify_rules(self, current_lines, table, binary=None,
+ table_name=None):
unwrapped_chains = table.unwrapped_chains
chains = table.chains
remove_chains = table.remove_chains
rules = table.rules
remove_rules = table.remove_rules
+ if not current_lines:
+ fake_table = ['#Generated by nova',
+ '*' + table_name, 'COMMIT',
+ '#Completed by nova']
+ current_lines = fake_table
+
# Remove any trace of our rules
new_filter = filter(lambda line: binary_name not in line,
current_lines)
@@ -418,6 +430,9 @@ class IptablesManager(object):
if not rule.startswith(':'):
break
+ if not seen_chains:
+ rules_index = 2
+
our_rules = []
bot_rules = []
for rule in rules:
@@ -645,18 +660,29 @@ def ensure_vpn_forward(public_ip, port, private_ip):
iptables_manager.apply()
-def ensure_floating_forward(floating_ip, fixed_ip, device):
+def ensure_floating_forward(floating_ip, fixed_ip, device, network):
"""Ensure floating ip forwarding rule."""
for chain, rule in floating_forward_rules(floating_ip, fixed_ip, device):
iptables_manager.ipv4['nat'].add_rule(chain, rule)
iptables_manager.apply()
+ if device != network['bridge']:
+ ensure_ebtables_rules(*floating_ebtables_rules(fixed_ip, network))
-def remove_floating_forward(floating_ip, fixed_ip, device):
+def remove_floating_forward(floating_ip, fixed_ip, device, network):
"""Remove forwarding for floating ip."""
for chain, rule in floating_forward_rules(floating_ip, fixed_ip, device):
iptables_manager.ipv4['nat'].remove_rule(chain, rule)
iptables_manager.apply()
+ if device != network['bridge']:
+ remove_ebtables_rules(*floating_ebtables_rules(fixed_ip, network))
+
+
+def floating_ebtables_rules(fixed_ip, network):
+ """Makes sure only in-network traffic is bridged."""
+ return (['PREROUTING --logical-in %s -p ipv4 --ip-src %s '
+ '! --ip-dst %s -j redirect --redirect-target ACCEPT' %
+ (network['bridge'], fixed_ip, network['cidr'])], 'nat')
def floating_forward_rules(floating_ip, fixed_ip, device):
@@ -735,7 +761,11 @@ def get_dhcp_leases(context, network_ref):
for data in db.network_get_associated_fixed_ips(context,
network_ref['id'],
host=host):
- hosts.append(_host_lease(data))
+ # NOTE(cfb): Don't return a lease entry if the IP isn't
+ # already leased
+ if data['allocated'] and data['leased']:
+ hosts.append(_host_lease(data))
+
return '\n'.join(hosts)
@@ -988,13 +1018,8 @@ interface %s
def _host_lease(data):
"""Return a host string for an address in leasefile format."""
- if data['instance_updated']:
- timestamp = data['instance_updated']
- else:
- timestamp = data['instance_created']
-
+ timestamp = timeutils.utcnow()
seconds_since_epoch = calendar.timegm(timestamp.utctimetuple())
-
return '%d %s %s %s *' % (seconds_since_epoch + CONF.dhcp_lease_time,
data['vif_address'],
data['address'],
@@ -1381,10 +1406,8 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
# Don't forward traffic unless we were told to be a gateway
ipv4_filter = iptables_manager.ipv4['filter']
if gateway:
- ipv4_filter.add_rule('FORWARD',
- '--in-interface %s -j ACCEPT' % bridge)
- ipv4_filter.add_rule('FORWARD',
- '--out-interface %s -j ACCEPT' % bridge)
+ for rule in get_gateway_rules(bridge):
+ ipv4_filter.add_rule(*rule)
else:
ipv4_filter.add_rule('FORWARD',
'--in-interface %s -j DROP' % bridge)
@@ -1401,10 +1424,8 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
if filtering:
ipv4_filter = iptables_manager.ipv4['filter']
if gateway:
- ipv4_filter.remove_rule('FORWARD',
- '--in-interface %s -j ACCEPT' % bridge)
- ipv4_filter.remove_rule('FORWARD',
- '--out-interface %s -j ACCEPT' % bridge)
+ for rule in get_gateway_rules(bridge):
+ ipv4_filter.remove_rule(*rule)
else:
ipv4_filter.remove_rule('FORWARD',
'--in-interface %s -j DROP' % bridge)
@@ -1421,18 +1442,18 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
@lockutils.synchronized('ebtables', 'nova-', external=True)
-def ensure_ebtables_rules(rules):
+def ensure_ebtables_rules(rules, table='filter'):
for rule in rules:
- cmd = ['ebtables', '-D'] + rule.split()
+ cmd = ['ebtables', '-t', table, '-D'] + rule.split()
_execute(*cmd, check_exit_code=False, run_as_root=True)
- cmd[1] = '-I'
+ cmd[3] = '-I'
_execute(*cmd, run_as_root=True)
@lockutils.synchronized('ebtables', 'nova-', external=True)
-def remove_ebtables_rules(rules):
+def remove_ebtables_rules(rules, table='filter'):
for rule in rules:
- cmd = ['ebtables', '-D'] + rule.split()
+ cmd = ['ebtables', '-t', table, '-D'] + rule.split()
_execute(*cmd, check_exit_code=False, run_as_root=True)
@@ -1488,6 +1509,24 @@ def remove_isolate_dhcp_address(interface, address):
% (interface, address), top=True)
+def get_gateway_rules(bridge):
+ interfaces = CONF.forward_bridge_interface
+ if 'all' in interfaces:
+ return [('FORWARD', '-i %s -j ACCEPT' % bridge),
+ ('FORWARD', '-o %s -j ACCEPT' % bridge)]
+ rules = []
+ for iface in CONF.forward_bridge_interface:
+ if iface:
+ rules.append(('FORWARD', '-i %s -o %s -j ACCEPT' % (bridge,
+ iface)))
+ rules.append(('FORWARD', '-i %s -o %s -j ACCEPT' % (iface,
+ bridge)))
+ rules.append(('FORWARD', '-i %s -o %s -j ACCEPT' % (bridge, bridge)))
+ rules.append(('FORWARD', '-i %s -j DROP' % bridge))
+ rules.append(('FORWARD', '-o %s -j DROP' % bridge))
+ return rules
+
+
# plugs interfaces using Open vSwitch
class LinuxOVSInterfaceDriver(LinuxNetInterfaceDriver):
@@ -1526,10 +1565,8 @@ class LinuxOVSInterfaceDriver(LinuxNetInterfaceDriver):
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--out-interface %s -j DROP' % bridge)
else:
- iptables_manager.ipv4['filter'].add_rule('FORWARD',
- '--in-interface %s -j ACCEPT' % bridge)
- iptables_manager.ipv4['filter'].add_rule('FORWARD',
- '--out-interface %s -j ACCEPT' % bridge)
+ for rule in get_gateway_rules(bridge):
+ iptables_manager.ipv4['filter'].add_rule(*rule)
return dev
@@ -1564,10 +1601,8 @@ class QuantumLinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
'--out-interface %s -j DROP' % bridge)
return bridge
else:
- iptables_manager.ipv4['filter'].add_rule('FORWARD',
- '--in-interface %s -j ACCEPT' % bridge)
- iptables_manager.ipv4['filter'].add_rule('FORWARD',
- '--out-interface %s -j ACCEPT' % bridge)
+ for rule in get_gateway_rules(bridge):
+ iptables_manager.ipv4['filter'].add_rule(*rule)
create_tap_dev(dev, mac_address)
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 897472d08..92d016717 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -34,7 +34,6 @@ topologies. All of the network commands are issued to a subclass of
:vpn_start: First Vpn port for private networks
:cnt_vpn_clients: Number of addresses reserved for vpn clients
:network_size: Number of addresses in each private subnet
-:floating_range: Floating IP address block
:fixed_range: Fixed IP address block
:fixed_ip_disassociate_timeout: Seconds after which a deallocated ip
is disassociated
@@ -47,7 +46,6 @@ import datetime
import itertools
import math
import re
-import socket
import uuid
from eventlet import greenpool
@@ -60,6 +58,7 @@ from nova import ipv6
from nova import manager
from nova.network import api as network_api
from nova.network import driver
+from nova.network import floating_ips
from nova.network import model as network_model
from nova.network import rpcapi as network_rpcapi
from nova.openstack.common import cfg
@@ -68,18 +67,14 @@ from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
-from nova.openstack.common.notifier import api as notifier
-from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
-from nova import quota
from nova import servicegroup
from nova import utils
LOG = logging.getLogger(__name__)
-QUOTAS = quota.QUOTAS
network_opts = [
cfg.StrOpt('flat_network_bridge',
@@ -109,18 +104,9 @@ network_opts = [
cfg.IntOpt('vpn_start',
default=1000,
help='First Vpn port for private networks'),
- cfg.BoolOpt('multi_host',
- default=False,
- help='Default value for multi_host in networks'),
cfg.IntOpt('network_size',
default=256,
help='Number of addresses in each private subnet'),
- cfg.StrOpt('floating_range',
- default='4.4.4.0/24',
- help='Floating IP address block'),
- cfg.StrOpt('default_floating_pool',
- default='nova',
- help='Default pool for floating ips'),
cfg.StrOpt('fixed_range',
default='10.0.0.0/8',
help='Fixed IP address block'),
@@ -142,9 +128,6 @@ network_opts = [
cfg.IntOpt('create_unique_mac_address_attempts',
default=5,
help='Number of attempts to create unique mac address'),
- cfg.BoolOpt('auto_assign_floating_ip',
- default=False,
- help='Autoassigning floating ip to VM'),
cfg.BoolOpt('fake_network',
default=False,
help='If passed, use fake network devices and addresses'),
@@ -178,21 +161,13 @@ network_opts = [
cfg.StrOpt('l3_lib',
default='nova.network.l3.LinuxNetL3',
help="Indicates underlying L3 management library"),
- cfg.StrOpt('instance_dns_manager',
- default='nova.network.noop_dns_driver.NoopDNSDriver',
- help='full class name for the DNS Manager for instance IPs'),
- cfg.StrOpt('instance_dns_domain',
- default='',
- help='full class name for the DNS Zone for instance IPs'),
- cfg.StrOpt('floating_ip_dns_manager',
- default='nova.network.noop_dns_driver.NoopDNSDriver',
- help='full class name for the DNS Manager for floating IPs'),
]
CONF = cfg.CONF
CONF.register_opts(network_opts)
CONF.import_opt('use_ipv6', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
+CONF.import_opt('network_topic', 'nova.network.rpcapi')
class RPCAllocateFixedIP(object):
@@ -265,7 +240,7 @@ class RPCAllocateFixedIP(object):
if network['multi_host']:
service = self.db.service_get_by_host_and_topic(context,
host,
- 'network')
+ CONF.network_topic)
if not service or not self.servicegroup_api.service_is_up(service):
# NOTE(vish): deallocate the fixed ip locally but don't
# teardown network devices
@@ -275,601 +250,6 @@ class RPCAllocateFixedIP(object):
self.network_rpcapi.deallocate_fixed_ip(context, address, host)
-class FloatingIP(object):
- """Mixin class for adding floating IP functionality to a manager."""
-
- servicegroup_api = None
-
- def init_host_floating_ips(self):
- """Configures floating ips owned by host."""
-
- admin_context = context.get_admin_context()
- try:
- floating_ips = self.db.floating_ip_get_all_by_host(admin_context,
- self.host)
- except exception.NotFound:
- return
-
- for floating_ip in floating_ips:
- fixed_ip_id = floating_ip.get('fixed_ip_id')
- if fixed_ip_id:
- try:
- fixed_ip_ref = self.db.fixed_ip_get(admin_context,
- fixed_ip_id)
- except exception.FixedIpNotFound:
- msg = _('Fixed ip %(fixed_ip_id)s not found') % locals()
- LOG.debug(msg)
- continue
- fixed_address = fixed_ip_ref['address']
- interface = CONF.public_interface or floating_ip['interface']
- try:
- self.l3driver.add_floating_ip(floating_ip['address'],
- fixed_address, interface)
- except exception.ProcessExecutionError:
- LOG.debug(_('Interface %(interface)s not found'), locals())
- raise exception.NoFloatingIpInterface(interface=interface)
-
- def allocate_for_instance(self, context, **kwargs):
- """Handles allocating the floating IP resources for an instance.
-
- calls super class allocate_for_instance() as well
-
- rpc.called by network_api
- """
- instance_id = kwargs.get('instance_id')
- instance_uuid = kwargs.get('instance_uuid')
- project_id = kwargs.get('project_id')
- requested_networks = kwargs.get('requested_networks')
- LOG.debug(_("floating IP allocation for instance |%s|"),
- instance_uuid=instance_uuid, context=context)
- # call the next inherited class's allocate_for_instance()
- # which is currently the NetworkManager version
- # do this first so fixed ip is already allocated
- nw_info = super(FloatingIP, self).allocate_for_instance(context,
- **kwargs)
- if CONF.auto_assign_floating_ip:
- # allocate a floating ip
- floating_address = self.allocate_floating_ip(context, project_id,
- True)
- # set auto_assigned column to true for the floating ip
- self.db.floating_ip_set_auto_assigned(context, floating_address)
-
- # get the first fixed address belonging to the instance
- fixed_ips = nw_info.fixed_ips()
- fixed_address = fixed_ips[0]['address']
-
- # associate the floating ip to fixed_ip
- self.associate_floating_ip(context,
- floating_address,
- fixed_address,
- affect_auto_assigned=True)
-
- # create a fresh set of network info that contains the floating ip
- nw_info = self.get_instance_nw_info(context, **kwargs)
-
- return nw_info
-
- def deallocate_for_instance(self, context, **kwargs):
- """Handles deallocating floating IP resources for an instance.
-
- calls super class deallocate_for_instance() as well.
-
- rpc.called by network_api
- """
- instance_id = kwargs.get('instance_id')
-
- # NOTE(francois.charlier): in some cases the instance might be
- # deleted before the IPs are released, so we need to get deleted
- # instances too
- instance = self.db.instance_get(
- context.elevated(read_deleted='yes'), instance_id)
-
- try:
- fixed_ips = self.db.fixed_ip_get_by_instance(context,
- instance['uuid'])
- except exception.FixedIpNotFoundForInstance:
- fixed_ips = []
- # add to kwargs so we can pass to super to save a db lookup there
- kwargs['fixed_ips'] = fixed_ips
- for fixed_ip in fixed_ips:
- fixed_id = fixed_ip['id']
- floating_ips = self.db.floating_ip_get_by_fixed_ip_id(context,
- fixed_id)
- # disassociate floating ips related to fixed_ip
- for floating_ip in floating_ips:
- address = floating_ip['address']
- try:
- self.disassociate_floating_ip(context,
- address,
- affect_auto_assigned=True)
- except exception.FloatingIpNotAssociated:
- LOG.exception(_("Floating IP is not associated. Ignore."))
- # deallocate if auto_assigned
- if floating_ip['auto_assigned']:
- self.deallocate_floating_ip(context, address,
- affect_auto_assigned=True)
-
- # call the next inherited class's deallocate_for_instance()
- # which is currently the NetworkManager version
- # call this after so floating IPs are handled first
- super(FloatingIP, self).deallocate_for_instance(context, **kwargs)
-
- def _floating_ip_owned_by_project(self, context, floating_ip):
- """Raises if floating ip does not belong to project."""
- if context.is_admin:
- return
-
- if floating_ip['project_id'] != context.project_id:
- if floating_ip['project_id'] is None:
- LOG.warn(_('Address |%(address)s| is not allocated'),
- {'address': floating_ip['address']})
- raise exception.NotAuthorized()
- else:
- LOG.warn(_('Address |%(address)s| is not allocated to your '
- 'project |%(project)s|'),
- {'address': floating_ip['address'],
- 'project': context.project_id})
- raise exception.NotAuthorized()
-
- def allocate_floating_ip(self, context, project_id, auto_assigned=False,
- pool=None):
- """Gets a floating ip from the pool."""
- # NOTE(tr3buchet): all network hosts in zone now use the same pool
- pool = pool or CONF.default_floating_pool
- use_quota = not auto_assigned
-
- # Check the quota; can't put this in the API because we get
- # called into from other places
- try:
- if use_quota:
- reservations = QUOTAS.reserve(context, floating_ips=1)
- except exception.OverQuota:
- pid = context.project_id
- LOG.warn(_("Quota exceeded for %(pid)s, tried to allocate "
- "floating IP") % locals())
- raise exception.FloatingIpLimitExceeded()
-
- try:
- floating_ip = self.db.floating_ip_allocate_address(context,
- project_id,
- pool)
- payload = dict(project_id=project_id, floating_ip=floating_ip)
- notifier.notify(context,
- notifier.publisher_id("network"),
- 'network.floating_ip.allocate',
- notifier.INFO, payload)
-
- # Commit the reservations
- if use_quota:
- QUOTAS.commit(context, reservations)
- except Exception:
- with excutils.save_and_reraise_exception():
- if use_quota:
- QUOTAS.rollback(context, reservations)
-
- return floating_ip
-
- @rpc_common.client_exceptions(exception.FloatingIpNotFoundForAddress)
- def deallocate_floating_ip(self, context, address,
- affect_auto_assigned=False):
- """Returns a floating ip to the pool."""
- floating_ip = self.db.floating_ip_get_by_address(context, address)
-
- # handle auto_assigned
- if not affect_auto_assigned and floating_ip.get('auto_assigned'):
- return
- use_quota = not floating_ip.get('auto_assigned')
-
- # make sure project owns this floating ip (allocated)
- self._floating_ip_owned_by_project(context, floating_ip)
-
- # make sure floating ip is not associated
- if floating_ip['fixed_ip_id']:
- floating_address = floating_ip['address']
- raise exception.FloatingIpAssociated(address=floating_address)
-
- # clean up any associated DNS entries
- self._delete_all_entries_for_ip(context,
- floating_ip['address'])
- payload = dict(project_id=floating_ip['project_id'],
- floating_ip=floating_ip['address'])
- notifier.notify(context,
- notifier.publisher_id("network"),
- 'network.floating_ip.deallocate',
- notifier.INFO, payload=payload)
-
- # Get reservations...
- try:
- if use_quota:
- reservations = QUOTAS.reserve(context, floating_ips=-1)
- else:
- reservations = None
- except Exception:
- reservations = None
- LOG.exception(_("Failed to update usages deallocating "
- "floating IP"))
-
- self.db.floating_ip_deallocate(context, address)
-
- # Commit the reservations
- if reservations:
- QUOTAS.commit(context, reservations)
-
- @rpc_common.client_exceptions(exception.FloatingIpNotFoundForAddress)
- def associate_floating_ip(self, context, floating_address, fixed_address,
- affect_auto_assigned=False):
- """Associates a floating ip with a fixed ip.
-
- Makes sure everything makes sense then calls _associate_floating_ip,
- rpc'ing to correct host if i'm not it.
- """
- floating_ip = self.db.floating_ip_get_by_address(context,
- floating_address)
- # handle auto_assigned
- if not affect_auto_assigned and floating_ip.get('auto_assigned'):
- return
-
- # make sure project owns this floating ip (allocated)
- self._floating_ip_owned_by_project(context, floating_ip)
-
- # disassociate any already associated
- orig_instance_uuid = None
- if floating_ip['fixed_ip_id']:
- # find previously associated instance
- fixed_ip = self.db.fixed_ip_get(context,
- floating_ip['fixed_ip_id'])
- if fixed_ip['address'] == fixed_address:
- # NOTE(vish): already associated to this address
- return
- orig_instance_uuid = fixed_ip['instance_uuid']
-
- self.disassociate_floating_ip(context, floating_address)
-
- fixed_ip = self.db.fixed_ip_get_by_address(context, fixed_address)
-
- # send to correct host, unless i'm the correct host
- network = self._get_network_by_id(context.elevated(),
- fixed_ip['network_id'])
- if network['multi_host']:
- instance = self.db.instance_get_by_uuid(context,
- fixed_ip['instance_uuid'])
- host = instance['host']
- else:
- host = network['host']
-
- interface = floating_ip.get('interface')
- if host == self.host:
- # i'm the correct host
- self._associate_floating_ip(context, floating_address,
- fixed_address, interface,
- fixed_ip['instance_uuid'])
- else:
- # send to correct host
- self.network_rpcapi._associate_floating_ip(context,
- floating_address, fixed_address, interface, host,
- fixed_ip['instance_uuid'])
-
- return orig_instance_uuid
-
- def _associate_floating_ip(self, context, floating_address, fixed_address,
- interface, instance_uuid):
- """Performs db and driver calls to associate floating ip & fixed ip."""
- interface = CONF.public_interface or interface
-
- @lockutils.synchronized(unicode(floating_address), 'nova-')
- def do_associate():
- # associate floating ip
- res = self.db.floating_ip_fixed_ip_associate(context,
- floating_address,
- fixed_address,
- self.host)
- if not res:
- # NOTE(vish): ip was already associated
- return
- try:
- # gogo driver time
- self.l3driver.add_floating_ip(floating_address, fixed_address,
- interface)
- except exception.ProcessExecutionError as e:
- self.db.floating_ip_disassociate(context, floating_address)
- if "Cannot find device" in str(e):
- LOG.error(_('Interface %(interface)s not found'), locals())
- raise exception.NoFloatingIpInterface(interface=interface)
-
- payload = dict(project_id=context.project_id,
- instance_id=instance_uuid,
- floating_ip=floating_address)
- notifier.notify(context,
- notifier.publisher_id("network"),
- 'network.floating_ip.associate',
- notifier.INFO, payload=payload)
- do_associate()
-
- @rpc_common.client_exceptions(exception.FloatingIpNotFoundForAddress)
- def disassociate_floating_ip(self, context, address,
- affect_auto_assigned=False):
- """Disassociates a floating ip from its fixed ip.
-
- Makes sure everything makes sense then calls _disassociate_floating_ip,
- rpc'ing to correct host if i'm not it.
- """
- floating_ip = self.db.floating_ip_get_by_address(context, address)
-
- # handle auto assigned
- if not affect_auto_assigned and floating_ip.get('auto_assigned'):
- raise exception.CannotDisassociateAutoAssignedFloatingIP()
-
- # make sure project owns this floating ip (allocated)
- self._floating_ip_owned_by_project(context, floating_ip)
-
- # make sure floating ip is associated
- if not floating_ip.get('fixed_ip_id'):
- floating_address = floating_ip['address']
- raise exception.FloatingIpNotAssociated(address=floating_address)
-
- fixed_ip = self.db.fixed_ip_get(context, floating_ip['fixed_ip_id'])
-
- # send to correct host, unless i'm the correct host
- network = self._get_network_by_id(context, fixed_ip['network_id'])
- interface = floating_ip.get('interface')
- if network['multi_host']:
- instance = self.db.instance_get_by_uuid(context,
- fixed_ip['instance_uuid'])
- service = self.db.service_get_by_host_and_topic(
- context.elevated(), instance['host'], 'network')
- if service and self.servicegroup_api.service_is_up(service):
- host = instance['host']
- else:
- # NOTE(vish): if the service is down just deallocate the data
- # locally. Set the host to local so the call will
- # not go over rpc and set interface to None so the
- # teardown in the driver does not happen.
- host = self.host
- interface = None
- else:
- host = network['host']
-
- if host == self.host:
- # i'm the correct host
- self._disassociate_floating_ip(context, address, interface,
- fixed_ip['instance_uuid'])
- else:
- # send to correct host
- self.network_rpcapi._disassociate_floating_ip(context, address,
- interface, host, fixed_ip['instance_uuid'])
-
- def _disassociate_floating_ip(self, context, address, interface,
- instance_uuid):
- """Performs db and driver calls to disassociate floating ip."""
- interface = CONF.public_interface or interface
-
- @lockutils.synchronized(unicode(address), 'nova-')
- def do_disassociate():
- # NOTE(vish): Note that we are disassociating in the db before we
- # actually remove the ip address on the host. We are
- # safe from races on this host due to the decorator,
- # but another host might grab the ip right away. We
- # don't worry about this case because the minuscule
- # window where the ip is on both hosts shouldn't cause
- # any problems.
- fixed_address = self.db.floating_ip_disassociate(context, address)
-
- if not fixed_address:
- # NOTE(vish): ip was already disassociated
- return
- if interface:
- # go go driver time
- self.l3driver.remove_floating_ip(address, fixed_address,
- interface)
- payload = dict(project_id=context.project_id,
- instance_id=instance_uuid,
- floating_ip=address)
- notifier.notify(context,
- notifier.publisher_id("network"),
- 'network.floating_ip.disassociate',
- notifier.INFO, payload=payload)
- do_disassociate()
-
- @rpc_common.client_exceptions(exception.FloatingIpNotFound)
- def get_floating_ip(self, context, id):
- """Returns a floating IP as a dict."""
- return dict(self.db.floating_ip_get(context, id).iteritems())
-
- def get_floating_pools(self, context):
- """Returns list of floating pools."""
- # NOTE(maurosr) This method should be removed in future, replaced by
- # get_floating_ip_pools. See bug #1091668
- return self.get_floating_ip_pools(context)
-
- def get_floating_ip_pools(self, context):
- """Returns list of floating ip pools."""
- pools = self.db.floating_ip_get_pools(context)
- return [dict(pool.iteritems()) for pool in pools]
-
- def get_floating_ip_by_address(self, context, address):
- """Returns a floating IP as a dict."""
- return dict(self.db.floating_ip_get_by_address(context,
- address).iteritems())
-
- def get_floating_ips_by_project(self, context):
- """Returns the floating IPs allocated to a project."""
- ips = self.db.floating_ip_get_all_by_project(context,
- context.project_id)
- return [dict(ip.iteritems()) for ip in ips]
-
- def get_floating_ips_by_fixed_address(self, context, fixed_address):
- """Returns the floating IPs associated with a fixed_address."""
- floating_ips = self.db.floating_ip_get_by_fixed_address(context,
- fixed_address)
- return [floating_ip['address'] for floating_ip in floating_ips]
-
- def _is_stale_floating_ip_address(self, context, floating_ip):
- try:
- self._floating_ip_owned_by_project(context, floating_ip)
- except exception.NotAuthorized:
- return True
- return False if floating_ip.get('fixed_ip_id') else True
-
- def migrate_instance_start(self, context, instance_uuid,
- floating_addresses,
- rxtx_factor=None, project_id=None,
- source=None, dest=None):
- # We only care if floating_addresses are provided and we're
- # switching hosts
- if not floating_addresses or (source and source == dest):
- return
-
- LOG.info(_("Starting migration network for instance"
- " %(instance_uuid)s"), locals())
- for address in floating_addresses:
- floating_ip = self.db.floating_ip_get_by_address(context,
- address)
-
- if self._is_stale_floating_ip_address(context, floating_ip):
- LOG.warn(_("Floating ip address |%(address)s| no longer "
- "belongs to instance %(instance_uuid)s. Will not"
- "migrate it "), locals())
- continue
-
- interface = CONF.public_interface or floating_ip['interface']
- fixed_ip = self.db.fixed_ip_get(context,
- floating_ip['fixed_ip_id'])
- self.l3driver.remove_floating_ip(floating_ip['address'],
- fixed_ip['address'],
- interface)
-
- # NOTE(wenjianhn): Make this address will not be bound to public
- # interface when restarts nova-network on dest compute node
- self.db.floating_ip_update(context,
- floating_ip['address'],
- {'host': None})
-
- def migrate_instance_finish(self, context, instance_uuid,
- floating_addresses, host=None,
- rxtx_factor=None, project_id=None,
- source=None, dest=None):
- # We only care if floating_addresses are provided and we're
- # switching hosts
- if host and not dest:
- dest = host
- if not floating_addresses or (source and source == dest):
- return
-
- LOG.info(_("Finishing migration network for instance"
- " %(instance_uuid)s"), locals())
-
- for address in floating_addresses:
- floating_ip = self.db.floating_ip_get_by_address(context,
- address)
-
- if self._is_stale_floating_ip_address(context, floating_ip):
- LOG.warn(_("Floating ip address |%(address)s| no longer "
- "belongs to instance %(instance_uuid)s. Will not"
- "setup it."), locals())
- continue
-
- self.db.floating_ip_update(context,
- floating_ip['address'],
- {'host': dest})
-
- interface = CONF.public_interface or floating_ip['interface']
- fixed_ip = self.db.fixed_ip_get(context,
- floating_ip['fixed_ip_id'])
- self.l3driver.add_floating_ip(floating_ip['address'],
- fixed_ip['address'],
- interface)
-
- def _prepare_domain_entry(self, context, domain):
- domainref = self.db.dnsdomain_get(context, domain)
- scope = domainref['scope']
- if scope == 'private':
- av_zone = domainref['availability_zone']
- this_domain = {'domain': domain,
- 'scope': scope,
- 'availability_zone': av_zone}
- else:
- project = domainref['project_id']
- this_domain = {'domain': domain,
- 'scope': scope,
- 'project': project}
- return this_domain
-
- def get_dns_domains(self, context):
- domains = []
-
- db_domain_list = self.db.dnsdomain_list(context)
- floating_driver_domain_list = self.floating_dns_manager.get_domains()
- instance_driver_domain_list = self.instance_dns_manager.get_domains()
-
- for db_domain in db_domain_list:
- if (db_domain in floating_driver_domain_list or
- db_domain in instance_driver_domain_list):
- domain_entry = self._prepare_domain_entry(context,
- db_domain)
- if domain_entry:
- domains.append(domain_entry)
- else:
- LOG.warn(_('Database inconsistency: DNS domain |%s| is '
- 'registered in the Nova db but not visible to '
- 'either the floating or instance DNS driver. It '
- 'will be ignored.'), db_domain)
-
- return domains
-
- def add_dns_entry(self, context, address, name, dns_type, domain):
- self.floating_dns_manager.create_entry(name, address,
- dns_type, domain)
-
- def modify_dns_entry(self, context, address, name, domain):
- self.floating_dns_manager.modify_address(name, address,
- domain)
-
- def delete_dns_entry(self, context, name, domain):
- self.floating_dns_manager.delete_entry(name, domain)
-
- def _delete_all_entries_for_ip(self, context, address):
- domain_list = self.get_dns_domains(context)
- for domain in domain_list:
- names = self.get_dns_entries_by_address(context,
- address,
- domain['domain'])
- for name in names:
- self.delete_dns_entry(context, name, domain['domain'])
-
- def get_dns_entries_by_address(self, context, address, domain):
- return self.floating_dns_manager.get_entries_by_address(address,
- domain)
-
- def get_dns_entries_by_name(self, context, name, domain):
- return self.floating_dns_manager.get_entries_by_name(name,
- domain)
-
- def create_private_dns_domain(self, context, domain, av_zone):
- self.db.dnsdomain_register_for_zone(context, domain, av_zone)
- try:
- self.instance_dns_manager.create_domain(domain)
- except exception.FloatingIpDNSExists:
- LOG.warn(_('Domain |%(domain)s| already exists, '
- 'changing zone to |%(av_zone)s|.'),
- {'domain': domain, 'av_zone': av_zone})
-
- def create_public_dns_domain(self, context, domain, project):
- self.db.dnsdomain_register_for_project(context, domain, project)
- try:
- self.floating_dns_manager.create_domain(domain)
- except exception.FloatingIpDNSExists:
- LOG.warn(_('Domain |%(domain)s| already exists, '
- 'changing project to |%(project)s|.'),
- {'domain': domain, 'project': project})
-
- def delete_dns_domain(self, context, domain):
- self.db.dnsdomain_unregister(context, domain)
- self.floating_dns_manager.delete_domain(domain)
-
- def _get_project_for_domain(self, context, domain):
- return self.db.dnsdomain_project(context, domain)
-
-
class NetworkManager(manager.SchedulerDependentManager):
"""Implements common network manager functionality.
@@ -883,7 +263,7 @@ class NetworkManager(manager.SchedulerDependentManager):
The one at a time part is to flatten the layout to help scale
"""
- RPC_API_VERSION = '1.7'
+ RPC_API_VERSION = '1.8'
# If True, this manager requires VIF to create a bridge.
SHOULD_CREATE_BRIDGE = False
@@ -908,8 +288,6 @@ class NetworkManager(manager.SchedulerDependentManager):
self.network_api = network_api.API()
self.network_rpcapi = network_rpcapi.NetworkAPI()
self.security_group_api = compute_api.SecurityGroupAPI()
- self.compute_api = compute_api.API(
- security_group_api=self.security_group_api)
self.servicegroup_api = servicegroup.API()
# NOTE(tr3buchet: unless manager subclassing NetworkManager has
@@ -1101,15 +479,28 @@ class NetworkManager(manager.SchedulerDependentManager):
rxtx_factor = kwargs['rxtx_factor']
requested_networks = kwargs.get('requested_networks')
vpn = kwargs['vpn']
+ macs = kwargs['macs']
admin_context = context.elevated()
LOG.debug(_("network allocations"), instance_uuid=instance_uuid,
context=context)
networks = self._get_networks_for_instance(admin_context,
instance_id, project_id,
requested_networks=requested_networks)
- LOG.debug(_('networks retrieved for instance: |%(networks)s|'),
+ networks_list = [self._get_network_dict(network)
+ for network in networks]
+ LOG.debug(_('networks retrieved for instance: |%(networks_list)s|'),
locals(), context=context, instance_uuid=instance_uuid)
- self._allocate_mac_addresses(context, instance_uuid, networks)
+
+ try:
+ self._allocate_mac_addresses(context, instance_uuid, networks,
+ macs)
+ except Exception as e:
+ with excutils.save_and_reraise_exception():
+ # If we fail to allocate any one mac address, clean up all
+ # allocated VIFs
+ self.db.virtual_interface_delete_by_instance(context,
+ instance_uuid)
+
self._allocate_fixed_ips(admin_context, instance_id,
host, networks, vpn=vpn,
requested_networks=requested_networks)
@@ -1317,25 +708,43 @@ class NetworkManager(manager.SchedulerDependentManager):
return subnets
- def _allocate_mac_addresses(self, context, instance_uuid, networks):
+ def _allocate_mac_addresses(self, context, instance_uuid, networks, macs):
"""Generates mac addresses and creates vif rows in db for them."""
- for network in networks:
- self.add_virtual_interface(context, instance_uuid, network['id'])
+ # make a copy we can mutate
+ if macs is not None:
+ available_macs = set(macs)
- def add_virtual_interface(self, context, instance_uuid, network_id):
- vif = {'address': utils.generate_mac_address(),
+ for network in networks:
+ if macs is None:
+ self._add_virtual_interface(context, instance_uuid,
+ network['id'])
+ else:
+ try:
+ mac = available_macs.pop()
+ except KeyError:
+ raise exception.VirtualInterfaceCreateException()
+ self._add_virtual_interface(context, instance_uuid,
+ network['id'], mac)
+
+ def _add_virtual_interface(self, context, instance_uuid, network_id,
+ mac=None):
+ vif = {'address': mac,
'instance_uuid': instance_uuid,
'network_id': network_id,
'uuid': str(uuid.uuid4())}
- # try FLAG times to create a vif record with a unique mac_address
- for i in xrange(CONF.create_unique_mac_address_attempts):
+
+ if mac is None:
+ vif['address'] = utils.generate_mac_address()
+ attempts = CONF.create_unique_mac_address_attempts
+ else:
+ attempts = 1
+
+ for i in range(attempts):
try:
return self.db.virtual_interface_create(context, vif)
except exception.VirtualInterfaceCreateException:
vif['address'] = utils.generate_mac_address()
else:
- self.db.virtual_interface_delete_by_instance(context,
- instance_uuid)
raise exception.VirtualInterfaceMacAddressException()
def add_fixed_ip_to_instance(self, context, instance_id, host, network_id):
@@ -1734,10 +1143,10 @@ class NetworkManager(manager.SchedulerDependentManager):
elif fixed_range:
network = self.db.network_get_by_cidr(elevated, fixed_range)
- if require_disassociated and network.project_id is not None:
+ if require_disassociated and network['project_id'] is not None:
raise ValueError(_('Network must be disassociated from project %s'
- ' before delete') % network.project_id)
- self.db.network_delete_safe(context, network.id)
+ ' before delete') % network['project_id'])
+ self.db.network_delete_safe(context, network['id'])
@property
def _bottom_reserved_ips(self): # pylint: disable=R0201
@@ -1868,6 +1277,8 @@ class NetworkManager(manager.SchedulerDependentManager):
def get_vifs_by_instance(self, context, instance_id):
"""Returns the vifs associated with an instance."""
+ # NOTE(vish): This is no longer used but can't be removed until
+ # we major version the network_rpcapi to 2.0.
instance = self.db.instance_get(context, instance_id)
vifs = self.db.virtual_interface_get_by_instance(context,
instance['uuid'])
@@ -1875,6 +1286,8 @@ class NetworkManager(manager.SchedulerDependentManager):
def get_instance_id_by_floating_address(self, context, address):
"""Returns the instance id a floating ip's fixed ip is allocated to."""
+ # NOTE(vish): This is no longer used but can't be removed until
+ # we major version the network_rpcapi to 2.0.
fixed_ip = self.db.fixed_ip_get_by_floating_address(context, address)
if fixed_ip is None:
return None
@@ -1882,10 +1295,14 @@ class NetworkManager(manager.SchedulerDependentManager):
return fixed_ip['instance_uuid']
def get_network(self, context, network_uuid):
+ # NOTE(vish): used locally
+
network = self.db.network_get_by_uuid(context.elevated(), network_uuid)
return jsonutils.to_primitive(network)
def get_all_networks(self, context):
+ # NOTE(vish): This is no longer used but can't be removed until
+ # we major version the network_rpcapi to 2.0.
try:
networks = self.db.network_get_all(context)
except exception.NoNetworksFound:
@@ -1893,20 +1310,28 @@ class NetworkManager(manager.SchedulerDependentManager):
return [jsonutils.to_primitive(network) for network in networks]
def disassociate_network(self, context, network_uuid):
+ # NOTE(vish): This is no longer used but can't be removed until
+ # we major version the network_rpcapi to 2.0.
network = self.get_network(context, network_uuid)
self.db.network_disassociate(context, network['id'])
def get_fixed_ip(self, context, id):
"""Return a fixed ip."""
+ # NOTE(vish): This is no longer used but can't be removed until
+ # we major version the network_rpcapi to 2.0.
fixed = self.db.fixed_ip_get(context, id)
return jsonutils.to_primitive(fixed)
def get_fixed_ip_by_address(self, context, address):
+ # NOTE(vish): This is no longer used but can't be removed until
+ # we major version the network_rpcapi to 2.0.
fixed = self.db.fixed_ip_get_by_address(context, address)
return jsonutils.to_primitive(fixed)
def get_vif_by_mac_address(self, context, mac_address):
"""Returns the vifs record for the mac_address."""
+ # NOTE(vish): This is no longer used but can't be removed until
+ # we major version the network_rpcapi to 2.0.
return self.db.virtual_interface_get_by_address(context,
mac_address)
@@ -2006,6 +1431,8 @@ class FlatManager(NetworkManager):
def get_floating_ip(self, context, id):
"""Returns a floating IP as a dict."""
+ # NOTE(vish): This is no longer used but can't be removed until
+ # we major version the network_rpcapi to 2.0.
return None
def get_floating_pools(self, context):
@@ -2016,18 +1443,26 @@ class FlatManager(NetworkManager):
def get_floating_ip_pools(self, context):
"""Returns list of floating ip pools."""
+ # NOTE(vish): This is no longer used but can't be removed until
+ # we major version the network_rpcapi to 2.0.
return {}
def get_floating_ip_by_address(self, context, address):
"""Returns a floating IP as a dict."""
+ # NOTE(vish): This is no longer used but can't be removed until
+ # we major version the network_rpcapi to 2.0.
return None
def get_floating_ips_by_project(self, context):
"""Returns the floating IPs allocated to a project."""
+ # NOTE(vish): This is no longer used but can't be removed until
+ # we major version the network_rpcapi to 2.0.
return []
def get_floating_ips_by_fixed_address(self, context, fixed_address):
"""Returns the floating IPs associated with a fixed_address."""
+ # NOTE(vish): This is no longer used but can't be removed until
+ # we major version the network_rpcapi to 2.0.
return []
def migrate_instance_start(self, context, instance_uuid,
@@ -2047,7 +1482,8 @@ class FlatManager(NetworkManager):
pass
-class FlatDHCPManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
+class FlatDHCPManager(RPCAllocateFixedIP, floating_ips.FloatingIP,
+ NetworkManager):
"""Flat networking with dhcp.
FlatDHCPManager will start up one dhcp server to give out addresses.
@@ -2110,7 +1546,7 @@ class FlatDHCPManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
return network_dict
-class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
+class VlanManager(RPCAllocateFixedIP, floating_ips.FloatingIP, NetworkManager):
"""Vlan network with dhcp.
VlanManager is the most complicated. It will create a host-managed
@@ -2192,6 +1628,8 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
def associate(self, context, network_uuid, associations):
"""Associate or disassociate host or project to network."""
+ # NOTE(vish): This is no longer used but can't be removed until
+ # we major version the network_rpcapi to 2.0.
network_id = self.get_network(context, network_uuid)['id']
if 'host' in associations:
host = associations['host']
diff --git a/nova/network/model.py b/nova/network/model.py
index 9accb883e..bfd4639ee 100644
--- a/nova/network/model.py
+++ b/nova/network/model.py
@@ -204,10 +204,29 @@ class Network(Model):
return network
+class VIF8021QbgParams(Model):
+ """Represents the parameters for a 802.1qbg VIF."""
+
+ def __init__(self, managerid, typeid, typeidversion, instanceid):
+ self['managerid'] = managerid
+ self['typeid'] = typeid
+ self['typeidversion'] = typeidversion
+ self['instanceid'] = instanceid
+
+
+class VIF8021QbhParams(Model):
+ """Represents the parameters for a 802.1qbh VIF."""
+
+ def __init__(self, profileid):
+ self['profileid'] = profileid
+
+
class VIF(Model):
"""Represents a Virtual Interface in Nova."""
def __init__(self, id=None, address=None, network=None, type=None,
- devname=None, ovs_interfaceid=None, **kwargs):
+ devname=None, ovs_interfaceid=None,
+ qbh_params=None, qbg_params=None,
+ **kwargs):
super(VIF, self).__init__()
self['id'] = id
@@ -217,6 +236,8 @@ class VIF(Model):
self['devname'] = devname
self['ovs_interfaceid'] = ovs_interfaceid
+ self['qbh_params'] = qbh_params
+ self['qbg_params'] = qbg_params
self._set_meta(kwargs)
@@ -384,6 +405,8 @@ class NetworkInfo(list):
'vif_devname': vif.get('devname'),
'vif_uuid': vif['id'],
'ovs_interfaceid': vif.get('ovs_interfaceid'),
+ 'qbh_params': vif.get('qbh_params'),
+ 'qbg_params': vif.get('qbg_params'),
'rxtx_cap': vif.get_meta('rxtx_cap', 0),
'dns': [get_ip(ip) for ip in subnet_v4['dns']],
'ips': [fixed_ip_dict(ip, subnet)
diff --git a/nova/network/quantumv2/__init__.py b/nova/network/quantumv2/__init__.py
index 914600ed8..1b7381e8e 100644
--- a/nova/network/quantumv2/__init__.py
+++ b/nova/network/quantumv2/__init__.py
@@ -30,10 +30,12 @@ def _get_auth_token():
httpclient = client.HTTPClient(
username=CONF.quantum_admin_username,
tenant_name=CONF.quantum_admin_tenant_name,
+ region_name=CONF.quantum_region_name,
password=CONF.quantum_admin_password,
auth_url=CONF.quantum_admin_auth_url,
timeout=CONF.quantum_url_timeout,
- auth_strategy=CONF.quantum_auth_strategy)
+ auth_strategy=CONF.quantum_auth_strategy,
+ insecure=CONF.quantum_api_insecure)
httpclient.authenticate()
except Exception:
with excutils.save_and_reraise_exception():
@@ -47,6 +49,7 @@ def _get_client(token=None):
params = {
'endpoint_url': CONF.quantum_url,
'timeout': CONF.quantum_url_timeout,
+ 'insecure': CONF.quantum_api_insecure,
}
if token:
params['token'] = token
diff --git a/nova/network/quantumv2/api.py b/nova/network/quantumv2/api.py
index 704ed5cef..420396ed4 100644
--- a/nova/network/quantumv2/api.py
+++ b/nova/network/quantumv2/api.py
@@ -16,7 +16,7 @@
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-from nova.compute import api as compute_api
+from nova import conductor
from nova.db import base
from nova import exception
from nova.network import api as network_api
@@ -38,12 +38,18 @@ quantum_opts = [
cfg.StrOpt('quantum_admin_username',
help='username for connecting to quantum in admin context'),
cfg.StrOpt('quantum_admin_password',
- help='password for connecting to quantum in admin context'),
+ help='password for connecting to quantum in admin context',
+ secret=True),
cfg.StrOpt('quantum_admin_tenant_name',
help='tenant name for connecting to quantum in admin context'),
+ cfg.StrOpt('quantum_region_name',
+ help='region name for connecting to quantum in admin context'),
cfg.StrOpt('quantum_admin_auth_url',
default='http://localhost:5000/v2.0',
help='auth url for connecting to quantum in admin context'),
+ cfg.BoolOpt('quantum_api_insecure',
+ default=False,
+ help='if set, ignore any SSL validation issues'),
cfg.StrOpt('quantum_auth_strategy',
default='keystone',
help='auth strategy for connecting to '
@@ -57,7 +63,7 @@ quantum_opts = [
CONF = cfg.CONF
CONF.register_opts(quantum_opts)
-CONF.import_opt('default_floating_pool', 'nova.network.manager')
+CONF.import_opt('default_floating_pool', 'nova.network.floating_ips')
LOG = logging.getLogger(__name__)
NET_EXTERNAL = 'router:external'
@@ -69,7 +75,7 @@ update_instance_info_cache = network_api.update_instance_cache_with_nw_info
class API(base.Base):
"""API for interacting with the quantum 2.x API."""
- security_group_api = compute_api.SecurityGroupAPI()
+ conductor_api = conductor.API()
def setup_networks_on_host(self, context, instance, host=None,
teardown=False):
@@ -205,7 +211,8 @@ class API(base.Base):
self.trigger_security_group_members_refresh(context, instance)
self.trigger_instance_add_security_group_refresh(context, instance)
- return self.get_instance_nw_info(context, instance, networks=nets)
+ return self.get_instance_nw_info(context, instance, networks=nets,
+ conductor_api=kwargs.get('conductor_api'))
def deallocate_for_instance(self, context, instance, **kwargs):
"""Deallocate all network resources related to the instance."""
@@ -224,10 +231,10 @@ class API(base.Base):
self.trigger_instance_remove_security_group_refresh(context, instance)
def get_instance_nw_info(self, context, instance, networks=None,
- update_cache=True):
+ conductor_api=None):
result = self._get_instance_nw_info(context, instance, networks)
- if update_cache:
- update_instance_info_cache(self, context, instance, result)
+ update_instance_info_cache(self, context, instance, result,
+ conductor_api)
return result
def _get_instance_nw_info(self, context, instance, networks=None):
@@ -236,7 +243,8 @@ class API(base.Base):
nw_info = self._build_network_info_model(context, instance, networks)
return network_model.NetworkInfo.hydrate(nw_info)
- def add_fixed_ip_to_instance(self, context, instance, network_id):
+ def add_fixed_ip_to_instance(self, context, instance, network_id,
+ conductor_api=None):
"""Add a fixed ip to the instance from specified network."""
search_opts = {'network_id': network_id}
data = quantumv2.get_client(context).list_subnets(**search_opts)
@@ -268,7 +276,8 @@ class API(base.Base):
raise exception.NetworkNotFoundForInstance(
instance_id=instance['uuid'])
- def remove_fixed_ip_from_instance(self, context, instance, address):
+ def remove_fixed_ip_from_instance(self, context, instance, address,
+ conductor_api=None):
"""Remove a fixed ip from the instance."""
zone = 'compute:%s' % instance['availability_zone']
search_opts = {'device_id': instance['uuid'],
@@ -358,27 +367,25 @@ class API(base.Base):
instance_ref):
admin_context = context.elevated()
for group in instance_ref['security_groups']:
- self.security_group_api.trigger_handler(
- 'instance_add_security_group', context, instance_ref,
- group['name'])
+ self.conductor_api.security_groups_trigger_handler(context,
+ 'instance_add_security_group', instance_ref, group['name'])
def trigger_instance_remove_security_group_refresh(self, context,
instance_ref):
admin_context = context.elevated()
for group in instance_ref['security_groups']:
- self.security_group_api.trigger_handler(
- 'instance_remove_security_group', context, instance_ref,
- group['name'])
+ self.conductor_api.security_groups_trigger_handler(context,
+ 'instance_remove_security_group', instance_ref, group['name'])
def trigger_security_group_members_refresh(self, context, instance_ref):
admin_context = context.elevated()
group_ids = [group['id'] for group in instance_ref['security_groups']]
- self.security_group_api.trigger_members_refresh(admin_context,
- group_ids)
- self.security_group_api.trigger_handler('security_group_members',
- admin_context, group_ids)
+ self.conductor_api.security_groups_trigger_members_refresh(
+ admin_context, group_ids)
+ self.conductor_api.security_groups_trigger_handler(admin_context,
+ 'security_group_members', group_ids)
def _get_port_id_by_fixed_address(self, client,
instance, address):
diff --git a/nova/network/rpcapi.py b/nova/network/rpcapi.py
index a7bffe17a..5c11f956f 100644
--- a/nova/network/rpcapi.py
+++ b/nova/network/rpcapi.py
@@ -27,6 +27,10 @@ rpcapi_opts = [
cfg.StrOpt('network_topic',
default='network',
help='the topic network nodes listen on'),
+ cfg.BoolOpt('multi_host',
+ default=False,
+ help='Default value for multi_host in networks. Also, if set, '
+ 'some rpc network calls will be sent directly to host.'),
]
CONF = cfg.CONF
@@ -46,6 +50,7 @@ class NetworkAPI(rpc_proxy.RpcProxy):
1.5 - Adds associate
1.6 - Adds instance_uuid to _{dis,}associate_floating_ip
1.7 - Adds method get_floating_ip_pools to replace get_floating_pools
+ 1.8 - Adds macs to allocate_for_instance
'''
#
@@ -151,15 +156,25 @@ class NetworkAPI(rpc_proxy.RpcProxy):
def allocate_for_instance(self, ctxt, instance_id, instance_uuid,
project_id, host, rxtx_factor, vpn,
- requested_networks):
+ requested_networks, macs=None):
+ if CONF.multi_host:
+ topic = rpc.queue_get_for(ctxt, self.topic, host)
+ else:
+ topic = None
return self.call(ctxt, self.make_msg('allocate_for_instance',
instance_id=instance_id, instance_uuid=instance_uuid,
project_id=project_id, host=host, rxtx_factor=rxtx_factor,
- vpn=vpn, requested_networks=requested_networks))
+ vpn=vpn, requested_networks=requested_networks, macs=macs),
+ topic=topic, version='1.8')
def deallocate_for_instance(self, ctxt, instance_id, project_id, host):
+ if CONF.multi_host:
+ topic = rpc.queue_get_for(ctxt, self.topic, host)
+ else:
+ topic = None
return self.call(ctxt, self.make_msg('deallocate_for_instance',
- instance_id=instance_id, project_id=project_id, host=host))
+ instance_id=instance_id, project_id=project_id, host=host),
+ topic=topic)
def add_fixed_ip_to_instance(self, ctxt, instance_id, host, network_id):
return self.call(ctxt, self.make_msg('add_fixed_ip_to_instance',
diff --git a/nova/notifications.py b/nova/notifications.py
index 65428d03f..f40fff7f2 100644
--- a/nova/notifications.py
+++ b/nova/notifications.py
@@ -21,7 +21,6 @@ the system.
import nova.context
from nova import db
-from nova import exception
from nova.image import glance
from nova import network
from nova.network import model as network_model
diff --git a/nova/virt/hyperv/ioutils.py b/nova/openstack/common/db/__init__.py
index d927e317f..1b9b60dec 100644
--- a/nova/virt/hyperv/ioutils.py
+++ b/nova/openstack/common/db/__init__.py
@@ -1,6 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright 2012 Cloudbase Solutions Srl
+# Copyright 2012 Cloudscaling Group, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -14,13 +14,3 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
-"""
-Utility class to ease the task of creating stubs of built in IO functions.
-"""
-
-import __builtin__
-
-
-def open(name, mode):
- return __builtin__.open(name, mode)
diff --git a/nova/openstack/common/db/sqlalchemy/__init__.py b/nova/openstack/common/db/sqlalchemy/__init__.py
new file mode 100644
index 000000000..1b9b60dec
--- /dev/null
+++ b/nova/openstack/common/db/sqlalchemy/__init__.py
@@ -0,0 +1,16 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Cloudscaling Group, Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
diff --git a/nova/openstack/common/db/sqlalchemy/models.py b/nova/openstack/common/db/sqlalchemy/models.py
new file mode 100644
index 000000000..87ec7ccc3
--- /dev/null
+++ b/nova/openstack/common/db/sqlalchemy/models.py
@@ -0,0 +1,103 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Piston Cloud Computing, Inc.
+# Copyright 2012 Cloudscaling Group, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+SQLAlchemy models.
+"""
+
+from sqlalchemy import Column, Integer
+from sqlalchemy import DateTime
+from sqlalchemy.orm import object_mapper
+
+from nova.openstack.common.db.sqlalchemy.session import get_session
+from nova.openstack.common import timeutils
+
+
+class ModelBase(object):
+ """Base class for models."""
+ __table_initialized__ = False
+ created_at = Column(DateTime, default=timeutils.utcnow)
+ updated_at = Column(DateTime, onupdate=timeutils.utcnow)
+ metadata = None
+
+ def save(self, session=None):
+ """Save this object."""
+ if not session:
+ session = get_session()
+ # NOTE(boris-42): This part of code should be look like:
+ # sesssion.add(self)
+ # session.flush()
+ # But there is a bug in sqlalchemy and eventlet that
+ # raises NoneType exception if there is no running
+ # transaction and rollback is called. As long as
+ # sqlalchemy has this bug we have to create transaction
+ # explicity.
+ with session.begin(subtransactions=True):
+ session.add(self)
+ session.flush()
+
+ def __setitem__(self, key, value):
+ setattr(self, key, value)
+
+ def __getitem__(self, key):
+ return getattr(self, key)
+
+ def get(self, key, default=None):
+ return getattr(self, key, default)
+
+ def __iter__(self):
+ columns = dict(object_mapper(self).columns).keys()
+ # NOTE(russellb): Allow models to specify other keys that can be looked
+ # up, beyond the actual db columns. An example would be the 'name'
+ # property for an Instance.
+ if hasattr(self, '_extra_keys'):
+ columns.extend(self._extra_keys())
+ self._i = iter(columns)
+ return self
+
+ def next(self):
+ n = self._i.next()
+ return n, getattr(self, n)
+
+ def update(self, values):
+ """Make the model object behave like a dict."""
+ for k, v in values.iteritems():
+ setattr(self, k, v)
+
+ def iteritems(self):
+ """Make the model object behave like a dict.
+
+ Includes attributes from joins."""
+ local = dict(self)
+ joined = dict([(k, v) for k, v in self.__dict__.iteritems()
+ if not k[0] == '_'])
+ local.update(joined)
+ return local.iteritems()
+
+
+class SoftDeleteMixin(object):
+ deleted_at = Column(DateTime)
+ deleted = Column(Integer, default=0)
+
+ def soft_delete(self, session=None):
+ """Mark this object as deleted."""
+ self.deleted = self.id
+ self.deleted_at = timeutils.utcnow()
+ self.save(session=session)
diff --git a/nova/db/sqlalchemy/session.py b/nova/openstack/common/db/sqlalchemy/session.py
index eb5d8016f..bc889fc36 100644
--- a/nova/db/sqlalchemy/session.py
+++ b/nova/openstack/common/db/sqlalchemy/session.py
@@ -18,6 +18,16 @@
"""Session Handling for SQLAlchemy backend.
+Initializing:
+
+* Call set_defaults with the minimal of the following kwargs:
+ sql_connection, sqlite_db
+
+ Example:
+
+ session.set_defaults(sql_connection="sqlite:///var/lib/nova/sqlite.db",
+ sqlite_db="/var/lib/nova/sqlite.db")
+
Recommended ways to use sessions within this framework:
* Don't use them explicitly; this is like running with AUTOCOMMIT=1.
@@ -159,6 +169,15 @@ There are some things which it is best to avoid:
proper UNIQUE constraints are added to the tables.
+Enabling soft deletes:
+
+* To use/enable soft-deletes, the SoftDeleteMixin must be added
+ to your model class. For example:
+
+ class NovaBase(models.SoftDeleteMixin, models.ModelBase):
+ pass
+
+
Efficient use of soft deletes:
* There are two possible ways to mark a record as deleted:
@@ -221,31 +240,33 @@ Efficient use of soft deletes:
# This will produce count(bar_refs) db requests.
"""
+import os.path
import re
import time
from eventlet import db_pool
from eventlet import greenthread
-try:
- import MySQLdb
-except ImportError:
- MySQLdb = None
from sqlalchemy.exc import DisconnectionError, OperationalError, IntegrityError
import sqlalchemy.interfaces
import sqlalchemy.orm
from sqlalchemy.pool import NullPool, StaticPool
from sqlalchemy.sql.expression import literal_column
-import nova.exception
from nova.openstack.common import cfg
-import nova.openstack.common.log as logging
+from nova.openstack.common import importutils
+from nova.openstack.common import log as logging
+from nova.openstack.common.gettextutils import _
from nova.openstack.common import timeutils
-from nova import paths
+MySQLdb = importutils.try_import('MySQLdb')
+if MySQLdb is not None:
+ from MySQLdb.constants import CLIENT as mysql_client_constants
sql_opts = [
cfg.StrOpt('sql_connection',
- default='sqlite:///' + paths.state_path_def('$sqlite_db'),
+ default='sqlite:///' +
+ os.path.abspath(os.path.join(os.path.dirname(__file__),
+ '../', '$sqlite_db')),
help='The SQLAlchemy connection string used to connect to the '
'database'),
cfg.StrOpt('sqlite_db',
@@ -260,11 +281,11 @@ sql_opts = [
cfg.IntOpt('sql_min_pool_size',
default=1,
help='Minimum number of SQL connections to keep open in a '
- 'pool'),
+ 'pool'),
cfg.IntOpt('sql_max_pool_size',
default=5,
help='Maximum number of SQL connections to keep open in a '
- 'pool'),
+ 'pool'),
cfg.IntOpt('sql_max_retries',
default=10,
help='maximum db connection retries during startup. '
@@ -295,6 +316,13 @@ _ENGINE = None
_MAKER = None
+def set_defaults(sql_connection, sqlite_db):
+ """Set defaults for configuration variables."""
+ cfg.set_defaults(sql_opts,
+ sql_connection=sql_connection,
+ sqlite_db=sqlite_db)
+
+
def get_session(autocommit=True, expire_on_commit=False):
"""Return a SQLAlchemy session."""
global _MAKER
@@ -307,6 +335,25 @@ def get_session(autocommit=True, expire_on_commit=False):
return session
+class DBError(Exception):
+ """Wraps an implementation specific exception."""
+ def __init__(self, inner_exception=None):
+ self.inner_exception = inner_exception
+ super(DBError, self).__init__(str(inner_exception))
+
+
+class DBDuplicateEntry(DBError):
+ """Wraps an implementation specific exception."""
+ def __init__(self, columns=[], inner_exception=None):
+ self.columns = columns
+ super(DBDuplicateEntry, self).__init__(inner_exception)
+
+
+class InvalidUnicodeParameter(Exception):
+ message = _("Invalid Parameter: "
+ "Unicode is not supported by the current database.")
+
+
# note(boris-42): In current versions of DB backends unique constraint
# violation messages follow the structure:
#
@@ -360,7 +407,7 @@ def raise_if_duplicate_entry_error(integrity_error, engine_name):
columns = columns.strip().split(", ")
else:
columns = get_columns_from_uniq_cons_or_name(columns)
- raise nova.exception.DBDuplicateEntry(columns, integrity_error)
+ raise DBDuplicateEntry(columns, integrity_error)
def wrap_db_error(f):
@@ -368,7 +415,7 @@ def wrap_db_error(f):
try:
return f(*args, **kwargs)
except UnicodeEncodeError:
- raise nova.exception.InvalidUnicodeParameter()
+ raise InvalidUnicodeParameter()
# note(boris-42): We should catch unique constraint violation and
# wrap it by our own DBDuplicateEntry exception. Unique constraint
# violation is wrapped by IntegrityError.
@@ -379,10 +426,10 @@ def wrap_db_error(f):
# means we should get names of columns, which values violate
# unique constraint, from error message.
raise_if_duplicate_entry_error(e, get_engine().name)
- raise nova.exception.DBError(e)
+ raise DBError(e)
except Exception, e:
LOG.exception(_('DB exception wrapped.'))
- raise nova.exception.DBError(e)
+ raise DBError(e)
_wrap.func_name = f.func_name
return _wrap
@@ -471,20 +518,32 @@ def create_engine(sql_connection):
engine_args["poolclass"] = StaticPool
engine_args["connect_args"] = {'check_same_thread': False}
elif all((CONF.sql_dbpool_enable, MySQLdb,
- "mysql" in connection_dict.drivername)):
+ "mysql" in connection_dict.drivername)):
LOG.info(_("Using mysql/eventlet db_pool."))
# MySQLdb won't accept 'None' in the password field
password = connection_dict.password or ''
pool_args = {
- 'db': connection_dict.database,
- 'passwd': password,
- 'host': connection_dict.host,
- 'user': connection_dict.username,
- 'min_size': CONF.sql_min_pool_size,
- 'max_size': CONF.sql_max_pool_size,
- 'max_idle': CONF.sql_idle_timeout}
- creator = db_pool.ConnectionPool(MySQLdb, **pool_args)
- engine_args['creator'] = creator.create
+ 'db': connection_dict.database,
+ 'passwd': password,
+ 'host': connection_dict.host,
+ 'user': connection_dict.username,
+ 'min_size': CONF.sql_min_pool_size,
+ 'max_size': CONF.sql_max_pool_size,
+ 'max_idle': CONF.sql_idle_timeout,
+ 'client_flag': mysql_client_constants.FOUND_ROWS}
+
+ pool = db_pool.ConnectionPool(MySQLdb, **pool_args)
+
+ def creator():
+ conn = pool.create()
+ if isinstance(conn, tuple):
+ # NOTE(belliott) eventlet >= 0.10 returns a tuple
+ now, now, conn = conn
+
+ return conn
+
+ engine_args['creator'] = creator
+
else:
engine_args['pool_size'] = CONF.sql_max_pool_size
if CONF.sql_max_overflow is not None:
@@ -526,7 +585,7 @@ def create_engine(sql_connection):
break
except OperationalError, e:
if (remaining != 'infinite' and remaining == 0) or \
- not is_db_connection_error(e.args[0]):
+ not is_db_connection_error(e.args[0]):
raise
return engine
@@ -550,6 +609,10 @@ class Session(sqlalchemy.orm.session.Session):
def flush(self, *args, **kwargs):
return super(Session, self).flush(*args, **kwargs)
+ @wrap_db_error
+ def execute(self, *args, **kwargs):
+ return super(Session, self).execute(*args, **kwargs)
+
def get_maker(engine, autocommit=True, expire_on_commit=False):
"""Return a SQLAlchemy sessionmaker using the given engine."""
@@ -581,15 +644,15 @@ def patch_mysqldb_with_stacktrace_comments():
continue
if file.endswith('exception.py') and method == '_wrap':
continue
- # nova/db/api is just a wrapper around nova/db/sqlalchemy/api
- if file.endswith('nova/db/api.py'):
+ # db/api is just a wrapper around db/sqlalchemy/api
+ if file.endswith('db/api.py'):
continue
# only trace inside nova
index = file.rfind('nova')
if index == -1:
continue
stack += "File:%s:%s Method:%s() Line:%s | " \
- % (file[index:], line, method, function)
+ % (file[index:], line, method, function)
# strip trailing " | " from stack
if stack:
diff --git a/nova/common/sqlalchemyutils.py b/nova/openstack/common/db/sqlalchemy/utils.py
index a186948ac..ef8af57ce 100644
--- a/nova/common/sqlalchemyutils.py
+++ b/nova/openstack/common/db/sqlalchemy/utils.py
@@ -22,13 +22,17 @@
import sqlalchemy
-from nova import exception
+from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
+class InvalidSortKey(Exception):
+ message = _("Sort key supplied was not valid.")
+
+
# copy from glance/db/sqlalchemy/api.py
def paginate_query(query, model, limit, sort_keys, marker=None,
sort_dir=None, sort_dirs=None):
@@ -89,7 +93,7 @@ def paginate_query(query, model, limit, sort_keys, marker=None,
try:
sort_key_attr = getattr(model, current_sort_key)
except AttributeError:
- raise exception.InvalidSortKey()
+ raise InvalidSortKey()
query = query.order_by(sort_dir_func(sort_key_attr))
# Add pagination
diff --git a/nova/openstack/common/importutils.py b/nova/openstack/common/importutils.py
index f45372b4d..9dec764fb 100644
--- a/nova/openstack/common/importutils.py
+++ b/nova/openstack/common/importutils.py
@@ -29,7 +29,7 @@ def import_class(import_str):
try:
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
- except (ValueError, AttributeError), exc:
+ except (ValueError, AttributeError):
raise ImportError('Class %s cannot be found (%s)' %
(class_str,
traceback.format_exception(*sys.exc_info())))
@@ -57,3 +57,11 @@ def import_module(import_str):
"""Import a module."""
__import__(import_str)
return sys.modules[import_str]
+
+
+def try_import(import_str, default=None):
+ """Try to import a module and if it fails return default."""
+ try:
+ return import_module(import_str)
+ except ImportError:
+ return default
diff --git a/nova/openstack/common/jsonutils.py b/nova/openstack/common/jsonutils.py
index 5a90d5c5e..290435450 100644
--- a/nova/openstack/common/jsonutils.py
+++ b/nova/openstack/common/jsonutils.py
@@ -34,6 +34,7 @@ This module provides a few things:
import datetime
+import functools
import inspect
import itertools
import json
@@ -42,7 +43,8 @@ import xmlrpclib
from nova.openstack.common import timeutils
-def to_primitive(value, convert_instances=False, level=0):
+def to_primitive(value, convert_instances=False, convert_datetime=True,
+ level=0):
"""Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
@@ -84,6 +86,10 @@ def to_primitive(value, convert_instances=False, level=0):
# The try block may not be necessary after the class check above,
# but just in case ...
try:
+ recursive = functools.partial(to_primitive,
+ convert_instances=convert_instances,
+ convert_datetime=convert_datetime,
+ level=level)
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
@@ -91,36 +97,22 @@ def to_primitive(value, convert_instances=False, level=0):
value = datetime.datetime(*tuple(value.timetuple())[:6])
if isinstance(value, (list, tuple)):
- o = []
- for v in value:
- o.append(to_primitive(v, convert_instances=convert_instances,
- level=level))
- return o
+ return [recursive(v) for v in value]
elif isinstance(value, dict):
- o = {}
- for k, v in value.iteritems():
- o[k] = to_primitive(v, convert_instances=convert_instances,
- level=level)
- return o
- elif isinstance(value, datetime.datetime):
+ return dict((k, recursive(v)) for k, v in value.iteritems())
+ elif convert_datetime and isinstance(value, datetime.datetime):
return timeutils.strtime(value)
elif hasattr(value, 'iteritems'):
- return to_primitive(dict(value.iteritems()),
- convert_instances=convert_instances,
- level=level + 1)
+ return recursive(dict(value.iteritems()), level=level + 1)
elif hasattr(value, '__iter__'):
- return to_primitive(list(value),
- convert_instances=convert_instances,
- level=level)
+ return recursive(list(value))
elif convert_instances and hasattr(value, '__dict__'):
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
- return to_primitive(value.__dict__,
- convert_instances=convert_instances,
- level=level + 1)
+ return recursive(value.__dict__, level=level + 1)
else:
return value
- except TypeError, e:
+ except TypeError:
# Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list().
return unicode(value)
diff --git a/nova/openstack/common/local.py b/nova/openstack/common/local.py
index 19d962732..8bdc837a9 100644
--- a/nova/openstack/common/local.py
+++ b/nova/openstack/common/local.py
@@ -26,6 +26,9 @@ class WeakLocal(corolocal.local):
def __getattribute__(self, attr):
rval = corolocal.local.__getattribute__(self, attr)
if rval:
+ # NOTE(mikal): this bit is confusing. What is stored is a weak
+ # reference, not the value itself. We therefore need to lookup
+ # the weak reference and return the inner value here.
rval = rval()
return rval
@@ -34,4 +37,12 @@ class WeakLocal(corolocal.local):
return corolocal.local.__setattr__(self, attr, value)
+# NOTE(mikal): the name "store" should be deprecated in the future
store = WeakLocal()
+
+# A "weak" store uses weak references and allows an object to fall out of scope
+# when it falls out of scope in the code that uses the thread local storage. A
+# "strong" store will hold a reference to the object so that it never falls out
+# of scope.
+weak_store = WeakLocal()
+strong_store = corolocal.local
diff --git a/nova/openstack/common/lockutils.py b/nova/openstack/common/lockutils.py
index 6f80a1f67..930e265f6 100644
--- a/nova/openstack/common/lockutils.py
+++ b/nova/openstack/common/lockutils.py
@@ -29,6 +29,7 @@ from eventlet import semaphore
from nova.openstack.common import cfg
from nova.openstack.common import fileutils
from nova.openstack.common.gettextutils import _
+from nova.openstack.common import local
from nova.openstack.common import log as logging
@@ -39,9 +40,8 @@ util_opts = [
cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'),
cfg.StrOpt('lock_path',
- default=os.path.abspath(os.path.join(os.path.dirname(__file__),
- '../')),
- help='Directory to use for lock files')
+ help=('Directory to use for lock files. Default to a '
+ 'temp directory'))
]
@@ -140,7 +140,7 @@ def synchronized(name, lock_file_prefix, external=False, lock_path=None):
def foo(self, *args):
...
- ensures that only one thread will execute the bar method at a time.
+ ensures that only one thread will execute the foo method at a time.
Different methods can share the same lock::
@@ -184,54 +184,66 @@ def synchronized(name, lock_file_prefix, external=False, lock_path=None):
LOG.debug(_('Got semaphore "%(lock)s" for method '
'"%(method)s"...'), {'lock': name,
'method': f.__name__})
- if external and not CONF.disable_process_locking:
- LOG.debug(_('Attempting to grab file lock "%(lock)s" for '
- 'method "%(method)s"...'),
- {'lock': name, 'method': f.__name__})
- cleanup_dir = False
-
- # We need a copy of lock_path because it is non-local
- local_lock_path = lock_path
- if not local_lock_path:
- local_lock_path = CONF.lock_path
-
- if not local_lock_path:
- cleanup_dir = True
- local_lock_path = tempfile.mkdtemp()
-
- if not os.path.exists(local_lock_path):
- cleanup_dir = True
- fileutils.ensure_tree(local_lock_path)
-
- # NOTE(mikal): the lock name cannot contain directory
- # separators
- safe_name = name.replace(os.sep, '_')
- lock_file_name = '%s%s' % (lock_file_prefix, safe_name)
- lock_file_path = os.path.join(local_lock_path,
- lock_file_name)
-
- try:
- lock = InterProcessLock(lock_file_path)
- with lock:
- LOG.debug(_('Got file lock "%(lock)s" at %(path)s '
- 'for method "%(method)s"...'),
+
+ # NOTE(mikal): I know this looks odd
+ if not hasattr(local.strong_store, 'locks_held'):
+ local.strong_store.locks_held = []
+ local.strong_store.locks_held.append(name)
+
+ try:
+ if external and not CONF.disable_process_locking:
+ LOG.debug(_('Attempting to grab file lock "%(lock)s" '
+ 'for method "%(method)s"...'),
+ {'lock': name, 'method': f.__name__})
+ cleanup_dir = False
+
+ # We need a copy of lock_path because it is non-local
+ local_lock_path = lock_path
+ if not local_lock_path:
+ local_lock_path = CONF.lock_path
+
+ if not local_lock_path:
+ cleanup_dir = True
+ local_lock_path = tempfile.mkdtemp()
+
+ if not os.path.exists(local_lock_path):
+ cleanup_dir = True
+ fileutils.ensure_tree(local_lock_path)
+
+ # NOTE(mikal): the lock name cannot contain directory
+ # separators
+ safe_name = name.replace(os.sep, '_')
+ lock_file_name = '%s%s' % (lock_file_prefix, safe_name)
+ lock_file_path = os.path.join(local_lock_path,
+ lock_file_name)
+
+ try:
+ lock = InterProcessLock(lock_file_path)
+ with lock:
+ LOG.debug(_('Got file lock "%(lock)s" at '
+ '%(path)s for method '
+ '"%(method)s"...'),
+ {'lock': name,
+ 'path': lock_file_path,
+ 'method': f.__name__})
+ retval = f(*args, **kwargs)
+ finally:
+ LOG.debug(_('Released file lock "%(lock)s" at '
+ '%(path)s for method "%(method)s"...'),
{'lock': name,
'path': lock_file_path,
'method': f.__name__})
- retval = f(*args, **kwargs)
- finally:
- LOG.debug(_('Released file lock "%(lock)s" at %(path)s'
- ' for method "%(method)s"...'),
- {'lock': name,
- 'path': lock_file_path,
- 'method': f.__name__})
- # NOTE(vish): This removes the tempdir if we needed
- # to create one. This is used to cleanup
- # the locks left behind by unit tests.
- if cleanup_dir:
- shutil.rmtree(local_lock_path)
- else:
- retval = f(*args, **kwargs)
+ # NOTE(vish): This removes the tempdir if we needed
+ # to create one. This is used to
+ # cleanup the locks left behind by unit
+ # tests.
+ if cleanup_dir:
+ shutil.rmtree(local_lock_path)
+ else:
+ retval = f(*args, **kwargs)
+
+ finally:
+ local.strong_store.locks_held.remove(name)
return retval
return inner
diff --git a/nova/openstack/common/rpc/__init__.py b/nova/openstack/common/rpc/__init__.py
index d43b48fa2..b98fef006 100644
--- a/nova/openstack/common/rpc/__init__.py
+++ b/nova/openstack/common/rpc/__init__.py
@@ -25,8 +25,16 @@ For some wrappers that add message versioning to rpc, see:
rpc.proxy
"""
+import inspect
+import logging
+
from nova.openstack.common import cfg
+from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
+from nova.openstack.common import local
+
+
+LOG = logging.getLogger(__name__)
rpc_opts = [
@@ -62,7 +70,8 @@ rpc_opts = [
help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
]
-cfg.CONF.register_opts(rpc_opts)
+CONF = cfg.CONF
+CONF.register_opts(rpc_opts)
def set_defaults(control_exchange):
@@ -83,10 +92,27 @@ def create_connection(new=True):
:returns: An instance of openstack.common.rpc.common.Connection
"""
- return _get_impl().create_connection(cfg.CONF, new=new)
+ return _get_impl().create_connection(CONF, new=new)
+
+
+def _check_for_lock():
+ if not CONF.debug:
+ return None
+
+ if ((hasattr(local.strong_store, 'locks_held')
+ and local.strong_store.locks_held)):
+ stack = ' :: '.join([frame[3] for frame in inspect.stack()])
+ LOG.warn(_('A RPC is being made while holding a lock. The locks '
+ 'currently held are %(locks)s. This is probably a bug. '
+ 'Please report it. Include the following: [%(stack)s].'),
+ {'locks': local.strong_store.locks_held,
+ 'stack': stack})
+ return True
+
+ return False
-def call(context, topic, msg, timeout=None):
+def call(context, topic, msg, timeout=None, check_for_lock=False):
"""Invoke a remote method that returns something.
:param context: Information that identifies the user that has made this
@@ -100,13 +126,17 @@ def call(context, topic, msg, timeout=None):
"args" : dict_of_kwargs }
:param timeout: int, number of seconds to use for a response timeout.
If set, this overrides the rpc_response_timeout option.
+ :param check_for_lock: if True, a warning is emitted if a RPC call is made
+ with a lock held.
:returns: A dict from the remote method.
:raises: openstack.common.rpc.common.Timeout if a complete response
is not received before the timeout is reached.
"""
- return _get_impl().call(cfg.CONF, context, topic, msg, timeout)
+ if check_for_lock:
+ _check_for_lock()
+ return _get_impl().call(CONF, context, topic, msg, timeout)
def cast(context, topic, msg):
@@ -124,7 +154,7 @@ def cast(context, topic, msg):
:returns: None
"""
- return _get_impl().cast(cfg.CONF, context, topic, msg)
+ return _get_impl().cast(CONF, context, topic, msg)
def fanout_cast(context, topic, msg):
@@ -145,10 +175,10 @@ def fanout_cast(context, topic, msg):
:returns: None
"""
- return _get_impl().fanout_cast(cfg.CONF, context, topic, msg)
+ return _get_impl().fanout_cast(CONF, context, topic, msg)
-def multicall(context, topic, msg, timeout=None):
+def multicall(context, topic, msg, timeout=None, check_for_lock=False):
"""Invoke a remote method and get back an iterator.
In this case, the remote method will be returning multiple values in
@@ -166,6 +196,8 @@ def multicall(context, topic, msg, timeout=None):
"args" : dict_of_kwargs }
:param timeout: int, number of seconds to use for a response timeout.
If set, this overrides the rpc_response_timeout option.
+ :param check_for_lock: if True, a warning is emitted if a RPC call is made
+ with a lock held.
:returns: An iterator. The iterator will yield a tuple (N, X) where N is
an index that starts at 0 and increases by one for each value
@@ -175,7 +207,9 @@ def multicall(context, topic, msg, timeout=None):
:raises: openstack.common.rpc.common.Timeout if a complete response
is not received before the timeout is reached.
"""
- return _get_impl().multicall(cfg.CONF, context, topic, msg, timeout)
+ if check_for_lock:
+ _check_for_lock()
+ return _get_impl().multicall(CONF, context, topic, msg, timeout)
def notify(context, topic, msg, envelope=False):
@@ -217,7 +251,7 @@ def cast_to_server(context, server_params, topic, msg):
:returns: None
"""
- return _get_impl().cast_to_server(cfg.CONF, context, server_params, topic,
+ return _get_impl().cast_to_server(CONF, context, server_params, topic,
msg)
@@ -233,7 +267,7 @@ def fanout_cast_to_server(context, server_params, topic, msg):
:returns: None
"""
- return _get_impl().fanout_cast_to_server(cfg.CONF, context, server_params,
+ return _get_impl().fanout_cast_to_server(CONF, context, server_params,
topic, msg)
@@ -263,10 +297,10 @@ def _get_impl():
global _RPCIMPL
if _RPCIMPL is None:
try:
- _RPCIMPL = importutils.import_module(cfg.CONF.rpc_backend)
+ _RPCIMPL = importutils.import_module(CONF.rpc_backend)
except ImportError:
# For backwards compatibility with older nova config.
- impl = cfg.CONF.rpc_backend.replace('nova.rpc',
- 'nova.openstack.common.rpc')
+ impl = CONF.rpc_backend.replace('nova.rpc',
+ 'nova.openstack.common.rpc')
_RPCIMPL = importutils.import_module(impl)
return _RPCIMPL
diff --git a/nova/openstack/common/rpc/amqp.py b/nova/openstack/common/rpc/amqp.py
index 105e6fcbe..9aadce733 100644
--- a/nova/openstack/common/rpc/amqp.py
+++ b/nova/openstack/common/rpc/amqp.py
@@ -368,7 +368,7 @@ def multicall(conf, context, topic, msg, timeout, connection_pool):
conn = ConnectionContext(conf, connection_pool)
wait_msg = MulticallWaiter(conf, conn, timeout)
conn.declare_direct_consumer(msg_id, wait_msg)
- conn.topic_send(topic, rpc_common.serialize_msg(msg))
+ conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
return wait_msg
diff --git a/nova/openstack/common/rpc/common.py b/nova/openstack/common/rpc/common.py
index bf4f5a3de..d2f5a7b8f 100644
--- a/nova/openstack/common/rpc/common.py
+++ b/nova/openstack/common/rpc/common.py
@@ -289,7 +289,7 @@ def deserialize_remote_exception(conf, data):
# NOTE(ameade): We DO NOT want to allow just any module to be imported, in
# order to prevent arbitrary code execution.
- if not module in conf.allowed_rpc_exception_modules:
+ if module not in conf.allowed_rpc_exception_modules:
return RemoteError(name, failure.get('message'), trace)
try:
diff --git a/nova/openstack/common/rpc/impl_fake.py b/nova/openstack/common/rpc/impl_fake.py
index 4d133a1af..f43dbfe91 100644
--- a/nova/openstack/common/rpc/impl_fake.py
+++ b/nova/openstack/common/rpc/impl_fake.py
@@ -167,7 +167,7 @@ def cast(conf, context, topic, msg):
pass
-def notify(conf, context, topic, msg):
+def notify(conf, context, topic, msg, envelope):
check_serialize(msg)
diff --git a/nova/openstack/common/rpc/impl_kombu.py b/nova/openstack/common/rpc/impl_kombu.py
index 305dc7877..4b5550d6f 100644
--- a/nova/openstack/common/rpc/impl_kombu.py
+++ b/nova/openstack/common/rpc/impl_kombu.py
@@ -66,7 +66,8 @@ kombu_opts = [
help='the RabbitMQ userid'),
cfg.StrOpt('rabbit_password',
default='guest',
- help='the RabbitMQ password'),
+ help='the RabbitMQ password',
+ secret=True),
cfg.StrOpt('rabbit_virtual_host',
default='/',
help='the RabbitMQ virtual host'),
@@ -175,7 +176,7 @@ class ConsumerBase(object):
try:
self.queue.cancel(self.tag)
except KeyError, e:
- # NOTE(comstud): Kludge to get around an amqplib bug
+ # NOTE(comstud): Kludge to get around a amqplib bug
if str(e) != "u'%s'" % self.tag:
raise
self.queue = None
@@ -302,9 +303,15 @@ class Publisher(object):
channel=channel,
routing_key=self.routing_key)
- def send(self, msg):
+ def send(self, msg, timeout=None):
"""Send a message"""
- self.producer.publish(msg)
+ if timeout:
+ #
+ # AMQP TTL is in milliseconds when set in the header.
+ #
+ self.producer.publish(msg, headers={'ttl': (timeout * 1000)})
+ else:
+ self.producer.publish(msg)
class DirectPublisher(Publisher):
@@ -653,7 +660,7 @@ class Connection(object):
for proxy_cb in self.proxy_callbacks:
proxy_cb.wait()
- def publisher_send(self, cls, topic, msg, **kwargs):
+ def publisher_send(self, cls, topic, msg, timeout=None, **kwargs):
"""Send to a publisher based on the publisher class"""
def _error_callback(exc):
@@ -663,7 +670,7 @@ class Connection(object):
def _publish():
publisher = cls(self.conf, self.channel, topic, **kwargs)
- publisher.send(msg)
+ publisher.send(msg, timeout)
self.ensure(_error_callback, _publish)
@@ -691,9 +698,9 @@ class Connection(object):
"""Send a 'direct' message"""
self.publisher_send(DirectPublisher, msg_id, msg)
- def topic_send(self, topic, msg):
+ def topic_send(self, topic, msg, timeout=None):
"""Send a 'topic' message"""
- self.publisher_send(TopicPublisher, topic, msg)
+ self.publisher_send(TopicPublisher, topic, msg, timeout)
def fanout_send(self, topic, msg):
"""Send a 'fanout' message"""
@@ -701,7 +708,7 @@ class Connection(object):
def notify_send(self, topic, msg, **kwargs):
"""Send a notify message on a topic"""
- self.publisher_send(NotifyPublisher, topic, msg, **kwargs)
+ self.publisher_send(NotifyPublisher, topic, msg, None, **kwargs)
def consume(self, limit=None):
"""Consume from all queues/consumers"""
diff --git a/nova/openstack/common/rpc/impl_qpid.py b/nova/openstack/common/rpc/impl_qpid.py
index 2e05f02f1..544d33790 100644
--- a/nova/openstack/common/rpc/impl_qpid.py
+++ b/nova/openstack/common/rpc/impl_qpid.py
@@ -22,16 +22,18 @@ import uuid
import eventlet
import greenlet
-import qpid.messaging
-import qpid.messaging.exceptions
from nova.openstack.common import cfg
from nova.openstack.common.gettextutils import _
+from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import amqp as rpc_amqp
from nova.openstack.common.rpc import common as rpc_common
+qpid_messaging = importutils.try_import("qpid.messaging")
+qpid_exceptions = importutils.try_import("qpid.messaging.exceptions")
+
LOG = logging.getLogger(__name__)
qpid_opts = [
@@ -49,7 +51,8 @@ qpid_opts = [
help='Username for qpid connection'),
cfg.StrOpt('qpid_password',
default='',
- help='Password for qpid connection'),
+ help='Password for qpid connection',
+ secret=True),
cfg.StrOpt('qpid_sasl_mechanisms',
default='',
help='Space separated list of SASL mechanisms to use for auth'),
@@ -275,6 +278,9 @@ class Connection(object):
pool = None
def __init__(self, conf, server_params=None):
+ if not qpid_messaging:
+ raise ImportError("Failed to import qpid.messaging")
+
self.session = None
self.consumers = {}
self.consumer_thread = None
@@ -303,7 +309,7 @@ class Connection(object):
def connection_create(self, broker):
# Create the connection - this does not open the connection
- self.connection = qpid.messaging.Connection(broker)
+ self.connection = qpid_messaging.Connection(broker)
# Check if flags are set and if so set them for the connection
# before we call open
@@ -328,7 +334,7 @@ class Connection(object):
if self.connection.opened():
try:
self.connection.close()
- except qpid.messaging.exceptions.ConnectionError:
+ except qpid_exceptions.ConnectionError:
pass
attempt = 0
@@ -340,7 +346,7 @@ class Connection(object):
try:
self.connection_create(broker)
self.connection.open()
- except qpid.messaging.exceptions.ConnectionError, e:
+ except qpid_exceptions.ConnectionError, e:
msg_dict = dict(e=e, delay=delay)
msg = _("Unable to connect to AMQP server: %(e)s. "
"Sleeping %(delay)s seconds") % msg_dict
@@ -367,8 +373,8 @@ class Connection(object):
while True:
try:
return method(*args, **kwargs)
- except (qpid.messaging.exceptions.Empty,
- qpid.messaging.exceptions.ConnectionError), e:
+ except (qpid_exceptions.Empty,
+ qpid_exceptions.ConnectionError), e:
if error_callback:
error_callback(e)
self.reconnect()
@@ -408,7 +414,7 @@ class Connection(object):
"""Return an iterator that will consume from all queues/consumers"""
def _error_callback(exc):
- if isinstance(exc, qpid.messaging.exceptions.Empty):
+ if isinstance(exc, qpid_exceptions.Empty):
LOG.exception(_('Timed out waiting for RPC response: %s') %
str(exc))
raise rpc_common.Timeout()
@@ -481,9 +487,20 @@ class Connection(object):
"""Send a 'direct' message"""
self.publisher_send(DirectPublisher, msg_id, msg)
- def topic_send(self, topic, msg):
+ def topic_send(self, topic, msg, timeout=None):
"""Send a 'topic' message"""
- self.publisher_send(TopicPublisher, topic, msg)
+ #
+ # We want to create a message with attributes, e.g. a TTL. We
+ # don't really need to keep 'msg' in its JSON format any longer
+ # so let's create an actual qpid message here and get some
+ # value-add on the go.
+ #
+ # WARNING: Request timeout happens to be in the same units as
+ # qpid's TTL (seconds). If this changes in the future, then this
+ # will need to be altered accordingly.
+ #
+ qpid_message = qpid_messaging.Message(content=msg, ttl=timeout)
+ self.publisher_send(TopicPublisher, topic, qpid_message)
def fanout_send(self, topic, msg):
"""Send a 'fanout' message"""
diff --git a/nova/openstack/common/rpc/impl_zmq.py b/nova/openstack/common/rpc/impl_zmq.py
index d99d390f2..2c0631548 100644
--- a/nova/openstack/common/rpc/impl_zmq.py
+++ b/nova/openstack/common/rpc/impl_zmq.py
@@ -14,23 +14,24 @@
# License for the specific language governing permissions and limitations
# under the License.
+import os
import pprint
import socket
-import string
import sys
import types
import uuid
import eventlet
-from eventlet.green import zmq
import greenlet
from nova.openstack.common import cfg
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
+from nova.openstack.common import processutils as utils
from nova.openstack.common.rpc import common as rpc_common
+zmq = importutils.try_import('eventlet.green.zmq')
# for convenience, are not modified.
pformat = pprint.pformat
@@ -61,6 +62,10 @@ zmq_opts = [
cfg.IntOpt('rpc_zmq_contexts', default=1,
help='Number of ZeroMQ contexts, defaults to 1'),
+ cfg.IntOpt('rpc_zmq_topic_backlog', default=None,
+ help='Maximum number of ingress messages to locally buffer '
+ 'per topic. Default is unlimited.'),
+
cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack',
help='Directory for holding IPC sockets'),
@@ -70,9 +75,9 @@ zmq_opts = [
]
-# These globals are defined in register_opts(conf),
-# a mandatory initialization call
-CONF = None
+CONF = cfg.CONF
+CONF.register_opts(zmq_opts)
+
ZMQ_CTX = None # ZeroMQ Context, must be global.
matchmaker = None # memoized matchmaker object
@@ -84,7 +89,7 @@ def _serialize(data):
Error if a developer passes us bad data.
"""
try:
- return str(jsonutils.dumps(data, ensure_ascii=True))
+ return jsonutils.dumps(data, ensure_ascii=True)
except TypeError:
LOG.error(_("JSON serialization failed."))
raise
@@ -107,7 +112,7 @@ class ZmqSocket(object):
"""
def __init__(self, addr, zmq_type, bind=True, subscribe=None):
- self.sock = ZMQ_CTX.socket(zmq_type)
+ self.sock = _get_ctxt().socket(zmq_type)
self.addr = addr
self.type = zmq_type
self.subscriptions = []
@@ -181,11 +186,15 @@ class ZmqSocket(object):
pass
self.subscriptions = []
- # Linger -1 prevents lost/dropped messages
try:
- self.sock.close(linger=-1)
+ # Default is to linger
+ self.sock.close()
except Exception:
- pass
+ # While this is a bad thing to happen,
+ # it would be much worse if some of the code calling this
+ # were to fail. For now, lets log, and later evaluate
+ # if we can safely raise here.
+ LOG.error("ZeroMQ socket could not be closed.")
self.sock = None
def recv(self):
@@ -202,14 +211,17 @@ class ZmqSocket(object):
class ZmqClient(object):
"""Client for ZMQ sockets."""
- def __init__(self, addr, socket_type=zmq.PUSH, bind=False):
+ def __init__(self, addr, socket_type=None, bind=False):
+ if socket_type is None:
+ socket_type = zmq.PUSH
self.outq = ZmqSocket(addr, socket_type, bind=bind)
def cast(self, msg_id, topic, data, serialize=True, force_envelope=False):
+ msg_id = msg_id or 0
+
if serialize:
data = rpc_common.serialize_msg(data, force_envelope)
- self.outq.send([str(msg_id), str(topic), str('cast'),
- _serialize(data)])
+ self.outq.send(map(bytes, (msg_id, topic, 'cast', _serialize(data))))
def close(self):
self.outq.close()
@@ -283,13 +295,13 @@ class InternalContext(object):
ctx.replies)
LOG.debug(_("Sending reply"))
- cast(CONF, ctx, topic, {
+ _multi_send(_cast, ctx, topic, {
'method': '-process_reply',
'args': {
- 'msg_id': msg_id,
+ 'msg_id': msg_id, # Include for Folsom compat.
'response': response
}
- })
+ }, _msg_id=msg_id)
class ConsumerBase(object):
@@ -309,21 +321,22 @@ class ConsumerBase(object):
return [result]
def process(self, style, target, proxy, ctx, data):
+ data.setdefault('version', None)
+ data.setdefault('args', {})
+
# Method starting with - are
# processed internally. (non-valid method name)
- method = data['method']
+ method = data.get('method')
+ if not method:
+ LOG.error(_("RPC message did not include method."))
+ return
# Internal method
# uses internal context for safety.
- if data['method'][0] == '-':
- # For reply / process_reply
- method = method[1:]
- if method == 'reply':
- self.private_ctx.reply(ctx, proxy, **data['args'])
+ if method == '-reply':
+ self.private_ctx.reply(ctx, proxy, **data['args'])
return
- data.setdefault('version', None)
- data.setdefault('args', {})
proxy.dispatch(ctx, data['version'],
data['method'], **data['args'])
@@ -413,12 +426,6 @@ class ZmqProxy(ZmqBaseReactor):
super(ZmqProxy, self).__init__(conf)
self.topic_proxy = {}
- ipc_dir = CONF.rpc_zmq_ipc_dir
-
- self.topic_proxy['zmq_replies'] = \
- ZmqSocket("ipc://%s/zmq_topic_zmq_replies" % (ipc_dir, ),
- zmq.PUB, bind=True)
- self.sockets.append(self.topic_proxy['zmq_replies'])
def consume(self, sock):
ipc_dir = CONF.rpc_zmq_ipc_dir
@@ -430,34 +437,87 @@ class ZmqProxy(ZmqBaseReactor):
LOG.debug(_("CONSUMER GOT %s"), ' '.join(map(pformat, data)))
- # Handle zmq_replies magic
- if topic.startswith('fanout~'):
+ if topic.startswith('fanout~') or topic.startswith('zmq_replies'):
sock_type = zmq.PUB
- elif topic.startswith('zmq_replies'):
- sock_type = zmq.PUB
- inside = rpc_common.deserialize_msg(_deserialize(in_msg))
- msg_id = inside[-1]['args']['msg_id']
- response = inside[-1]['args']['response']
- LOG.debug(_("->response->%s"), response)
- data = [str(msg_id), _serialize(response)]
else:
sock_type = zmq.PUSH
- if not topic in self.topic_proxy:
- outq = ZmqSocket("ipc://%s/zmq_topic_%s" % (ipc_dir, topic),
- sock_type, bind=True)
- self.topic_proxy[topic] = outq
- self.sockets.append(outq)
- LOG.info(_("Created topic proxy: %s"), topic)
+ if topic not in self.topic_proxy:
+ def publisher(waiter):
+ LOG.info(_("Creating proxy for topic: %s"), topic)
+
+ try:
+ out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" %
+ (ipc_dir, topic),
+ sock_type, bind=True)
+ except RPCException:
+ waiter.send_exception(*sys.exc_info())
+ return
+
+ self.topic_proxy[topic] = eventlet.queue.LightQueue(
+ CONF.rpc_zmq_topic_backlog)
+ self.sockets.append(out_sock)
+
+ # It takes some time for a pub socket to open,
+ # before we can have any faith in doing a send() to it.
+ if sock_type == zmq.PUB:
+ eventlet.sleep(.5)
+
+ waiter.send(True)
+
+ while(True):
+ data = self.topic_proxy[topic].get()
+ out_sock.send(data)
+ LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") %
+ {'data': data})
+
+ wait_sock_creation = eventlet.event.Event()
+ eventlet.spawn(publisher, wait_sock_creation)
+
+ try:
+ wait_sock_creation.wait()
+ except RPCException:
+ LOG.error(_("Topic socket file creation failed."))
+ return
+
+ try:
+ self.topic_proxy[topic].put_nowait(data)
+ LOG.debug(_("ROUTER RELAY-OUT QUEUED %(data)s") %
+ {'data': data})
+ except eventlet.queue.Full:
+ LOG.error(_("Local per-topic backlog buffer full for topic "
+ "%(topic)s. Dropping message.") % {'topic': topic})
+
+ def consume_in_thread(self):
+ """Runs the ZmqProxy service"""
+ ipc_dir = CONF.rpc_zmq_ipc_dir
+ consume_in = "tcp://%s:%s" % \
+ (CONF.rpc_zmq_bind_address,
+ CONF.rpc_zmq_port)
+ consumption_proxy = InternalContext(None)
+
+ if not os.path.isdir(ipc_dir):
+ try:
+ utils.execute('mkdir', '-p', ipc_dir, run_as_root=True)
+ utils.execute('chown', "%s:%s" % (os.getuid(), os.getgid()),
+ ipc_dir, run_as_root=True)
+ utils.execute('chmod', '750', ipc_dir, run_as_root=True)
+ except utils.ProcessExecutionError:
+ LOG.error(_("Could not create IPC directory %s") %
+ (ipc_dir, ))
+ raise
- # It takes some time for a pub socket to open,
- # before we can have any faith in doing a send() to it.
- if sock_type == zmq.PUB:
- eventlet.sleep(.5)
+ try:
+ self.register(consumption_proxy,
+ consume_in,
+ zmq.PULL,
+ out_bind=True)
+ except zmq.ZMQError:
+ LOG.error(_("Could not create ZeroMQ receiver daemon. "
+ "Socket may already be in use."))
+ raise
- LOG.debug(_("ROUTER RELAY-OUT START %(data)s") % {'data': data})
- self.topic_proxy[topic].send(data)
- LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") % {'data': data})
+ super(ZmqProxy, self).consume_in_thread()
class ZmqReactor(ZmqBaseReactor):
@@ -533,8 +593,8 @@ class Connection(rpc_common.Connection):
self.reactor.consume_in_thread()
-def _cast(addr, context, msg_id, topic, msg, timeout=None, serialize=True,
- force_envelope=False):
+def _cast(addr, context, topic, msg, timeout=None, serialize=True,
+ force_envelope=False, _msg_id=None):
timeout_cast = timeout or CONF.rpc_cast_timeout
payload = [RpcContext.marshal(context), msg]
@@ -543,7 +603,7 @@ def _cast(addr, context, msg_id, topic, msg, timeout=None, serialize=True,
conn = ZmqClient(addr)
# assumes cast can't return an exception
- conn.cast(msg_id, topic, payload, serialize, force_envelope)
+ conn.cast(_msg_id, topic, payload, serialize, force_envelope)
except zmq.ZMQError:
raise RPCException("Cast failed. ZMQ Socket Exception")
finally:
@@ -551,7 +611,8 @@ def _cast(addr, context, msg_id, topic, msg, timeout=None, serialize=True,
conn.close()
-def _call(addr, context, msg_id, topic, msg, timeout=None):
+def _call(addr, context, topic, msg, timeout=None,
+ serialize=True, force_envelope=False):
# timeout_response is how long we wait for a response
timeout = timeout or CONF.rpc_response_timeout
@@ -586,17 +647,20 @@ def _call(addr, context, msg_id, topic, msg, timeout=None):
)
LOG.debug(_("Sending cast"))
- _cast(addr, context, msg_id, topic, payload)
+ _cast(addr, context, topic, payload,
+ serialize=serialize, force_envelope=force_envelope)
LOG.debug(_("Cast sent; Waiting reply"))
# Blocks until receives reply
msg = msg_waiter.recv()
LOG.debug(_("Received message: %s"), msg)
LOG.debug(_("Unpacking response"))
- responses = _deserialize(msg[-1])
+ responses = _deserialize(msg[-1])[-1]['args']['response']
# ZMQError trumps the Timeout error.
except zmq.ZMQError:
raise RPCException("ZMQ Socket Error")
+ except (IndexError, KeyError):
+ raise RPCException(_("RPC Message Invalid."))
finally:
if 'msg_waiter' in vars():
msg_waiter.close()
@@ -613,7 +677,7 @@ def _call(addr, context, msg_id, topic, msg, timeout=None):
def _multi_send(method, context, topic, msg, timeout=None, serialize=True,
- force_envelope=False):
+ force_envelope=False, _msg_id=None):
"""
Wraps the sending of messages,
dispatches to the matchmaker and sends
@@ -622,7 +686,7 @@ def _multi_send(method, context, topic, msg, timeout=None, serialize=True,
conf = CONF
LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
- queues = matchmaker.queues(topic)
+ queues = _get_matchmaker().queues(topic)
LOG.debug(_("Sending message(s) to: %s"), queues)
# Don't stack if we have no matchmaker results
@@ -639,10 +703,11 @@ def _multi_send(method, context, topic, msg, timeout=None, serialize=True,
if method.__name__ == '_cast':
eventlet.spawn_n(method, _addr, context,
- _topic, _topic, msg, timeout, serialize,
- force_envelope)
+ _topic, msg, timeout, serialize,
+ force_envelope, _msg_id)
return
- return method(_addr, context, _topic, _topic, msg, timeout)
+ return method(_addr, context, _topic, msg, timeout,
+ serialize, force_envelope)
def create_connection(conf, new=True):
@@ -689,44 +754,26 @@ def notify(conf, context, topic, msg, **kwargs):
def cleanup():
"""Clean up resources in use by implementation."""
global ZMQ_CTX
+ if ZMQ_CTX:
+ ZMQ_CTX.term()
+ ZMQ_CTX = None
+
global matchmaker
matchmaker = None
- ZMQ_CTX.term()
- ZMQ_CTX = None
-def register_opts(conf):
- """Registration of options for this driver."""
- #NOTE(ewindisch): ZMQ_CTX and matchmaker
- # are initialized here as this is as good
- # an initialization method as any.
+def _get_ctxt():
+ if not zmq:
+ raise ImportError("Failed to import eventlet.green.zmq")
- # We memoize through these globals
global ZMQ_CTX
- global matchmaker
- global CONF
-
- if not CONF:
- conf.register_opts(zmq_opts)
- CONF = conf
- # Don't re-set, if this method is called twice.
if not ZMQ_CTX:
- ZMQ_CTX = zmq.Context(conf.rpc_zmq_contexts)
- if not matchmaker:
- # rpc_zmq_matchmaker should be set to a 'module.Class'
- mm_path = conf.rpc_zmq_matchmaker.split('.')
- mm_module = '.'.join(mm_path[:-1])
- mm_class = mm_path[-1]
+ ZMQ_CTX = zmq.Context(CONF.rpc_zmq_contexts)
+ return ZMQ_CTX
- # Only initialize a class.
- if mm_path[-1][0] not in string.ascii_uppercase:
- LOG.error(_("Matchmaker could not be loaded.\n"
- "rpc_zmq_matchmaker is not a class."))
- raise RPCException(_("Error loading Matchmaker."))
- mm_impl = importutils.import_module(mm_module)
- mm_constructor = getattr(mm_impl, mm_class)
- matchmaker = mm_constructor()
-
-
-register_opts(cfg.CONF)
+def _get_matchmaker():
+ global matchmaker
+ if not matchmaker:
+ matchmaker = importutils.import_object(CONF.rpc_zmq_matchmaker)
+ return matchmaker
diff --git a/nova/openstack/common/setup.py b/nova/openstack/common/setup.py
index e6f72f034..fb187fff4 100644
--- a/nova/openstack/common/setup.py
+++ b/nova/openstack/common/setup.py
@@ -1,6 +1,7 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
+# Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -19,7 +20,7 @@
Utilities with minimum-depends for use in setup.py
"""
-import datetime
+import email
import os
import re
import subprocess
@@ -33,11 +34,12 @@ def parse_mailmap(mailmap='.mailmap'):
if os.path.exists(mailmap):
with open(mailmap, 'r') as fp:
for l in fp:
- l = l.strip()
- if not l.startswith('#') and ' ' in l:
- canonical_email, alias = [x for x in l.split(' ')
- if x.startswith('<')]
- mapping[alias] = canonical_email
+ try:
+ canonical_email, alias = re.match(
+ r'[^#]*?(<.+>).*(<.+>).*', l).groups()
+ except AttributeError:
+ continue
+ mapping[alias] = canonical_email
return mapping
@@ -45,8 +47,8 @@ def canonicalize_emails(changelog, mapping):
"""Takes in a string and an email alias mapping and replaces all
instances of the aliases in the string with their real email.
"""
- for alias, email in mapping.iteritems():
- changelog = changelog.replace(alias, email)
+ for alias, email_address in mapping.iteritems():
+ changelog = changelog.replace(alias, email_address)
return changelog
@@ -106,23 +108,17 @@ def parse_dependency_links(requirements_files=['requirements.txt',
return dependency_links
-def write_requirements():
- venv = os.environ.get('VIRTUAL_ENV', None)
- if venv is not None:
- with open("requirements.txt", "w") as req_file:
- output = subprocess.Popen(["pip", "-E", venv, "freeze", "-l"],
- stdout=subprocess.PIPE)
- requirements = output.communicate()[0].strip()
- req_file.write(requirements)
-
-
-def _run_shell_command(cmd):
+def _run_shell_command(cmd, throw_on_error=False):
if os.name == 'nt':
output = subprocess.Popen(["cmd.exe", "/C", cmd],
- stdout=subprocess.PIPE)
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
else:
output = subprocess.Popen(["/bin/sh", "-c", cmd],
- stdout=subprocess.PIPE)
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ if output.returncode and throw_on_error:
+ raise Exception("%s returned %d" % cmd, output.returncode)
out = output.communicate()
if len(out) == 0:
return None
@@ -131,57 +127,6 @@ def _run_shell_command(cmd):
return out[0].strip()
-def _get_git_next_version_suffix(branch_name):
- datestamp = datetime.datetime.now().strftime('%Y%m%d')
- if branch_name == 'milestone-proposed':
- revno_prefix = "r"
- else:
- revno_prefix = ""
- _run_shell_command("git fetch origin +refs/meta/*:refs/remotes/meta/*")
- milestone_cmd = "git show meta/openstack/release:%s" % branch_name
- milestonever = _run_shell_command(milestone_cmd)
- if milestonever:
- first_half = "%s~%s" % (milestonever, datestamp)
- else:
- first_half = datestamp
-
- post_version = _get_git_post_version()
- # post version should look like:
- # 0.1.1.4.gcc9e28a
- # where the bit after the last . is the short sha, and the bit between
- # the last and second to last is the revno count
- (revno, sha) = post_version.split(".")[-2:]
- second_half = "%s%s.%s" % (revno_prefix, revno, sha)
- return ".".join((first_half, second_half))
-
-
-def _get_git_current_tag():
- return _run_shell_command("git tag --contains HEAD")
-
-
-def _get_git_tag_info():
- return _run_shell_command("git describe --tags")
-
-
-def _get_git_post_version():
- current_tag = _get_git_current_tag()
- if current_tag is not None:
- return current_tag
- else:
- tag_info = _get_git_tag_info()
- if tag_info is None:
- base_version = "0.0"
- cmd = "git --no-pager log --oneline"
- out = _run_shell_command(cmd)
- revno = len(out.split("\n"))
- sha = _run_shell_command("git describe --always")
- else:
- tag_infos = tag_info.split("-")
- base_version = "-".join(tag_infos[:-2])
- (revno, sha) = tag_infos[-2:]
- return "%s.%s.%s" % (base_version, revno, sha)
-
-
def write_git_changelog():
"""Write a changelog based on the git changelog."""
new_changelog = 'ChangeLog'
@@ -227,26 +172,6 @@ _rst_template = """%(heading)s
"""
-def read_versioninfo(project):
- """Read the versioninfo file. If it doesn't exist, we're in a github
- zipball, and there's really no way to know what version we really
- are, but that should be ok, because the utility of that should be
- just about nil if this code path is in use in the first place."""
- versioninfo_path = os.path.join(project, 'versioninfo')
- if os.path.exists(versioninfo_path):
- with open(versioninfo_path, 'r') as vinfo:
- version = vinfo.read().strip()
- else:
- version = "0.0.0"
- return version
-
-
-def write_versioninfo(project, version):
- """Write a simple file containing the version of the package."""
- with open(os.path.join(project, 'versioninfo'), 'w') as fil:
- fil.write("%s\n" % version)
-
-
def get_cmdclass():
"""Return dict of commands to run from setup.py."""
@@ -276,6 +201,9 @@ def get_cmdclass():
from sphinx.setup_command import BuildDoc
class LocalBuildDoc(BuildDoc):
+
+ builders = ['html', 'man']
+
def generate_autoindex(self):
print "**Autodocumenting from %s" % os.path.abspath(os.curdir)
modules = {}
@@ -311,56 +239,97 @@ def get_cmdclass():
if not os.getenv('SPHINX_DEBUG'):
self.generate_autoindex()
- for builder in ['html', 'man']:
+ for builder in self.builders:
self.builder = builder
self.finalize_options()
self.project = self.distribution.get_name()
self.version = self.distribution.get_version()
self.release = self.distribution.get_version()
BuildDoc.run(self)
+
+ class LocalBuildLatex(LocalBuildDoc):
+ builders = ['latex']
+
cmdclass['build_sphinx'] = LocalBuildDoc
+ cmdclass['build_sphinx_latex'] = LocalBuildLatex
except ImportError:
pass
return cmdclass
-def get_git_branchname():
- for branch in _run_shell_command("git branch --color=never").split("\n"):
- if branch.startswith('*'):
- _branch_name = branch.split()[1].strip()
- if _branch_name == "(no":
- _branch_name = "no-branch"
- return _branch_name
+def _get_revno():
+ """Return the number of commits since the most recent tag.
+ We use git-describe to find this out, but if there are no
+ tags then we fall back to counting commits since the beginning
+ of time.
+ """
+ describe = _run_shell_command("git describe --always")
+ if "-" in describe:
+ return describe.rsplit("-", 2)[-2]
-def get_pre_version(projectname, base_version):
- """Return a version which is leading up to a version that will
- be released in the future."""
- if os.path.isdir('.git'):
- current_tag = _get_git_current_tag()
- if current_tag is not None:
- version = current_tag
- else:
- branch_name = os.getenv('BRANCHNAME',
- os.getenv('GERRIT_REFNAME',
- get_git_branchname()))
- version_suffix = _get_git_next_version_suffix(branch_name)
- version = "%s~%s" % (base_version, version_suffix)
- write_versioninfo(projectname, version)
- return version
- else:
- version = read_versioninfo(projectname)
- return version
+ # no tags found
+ revlist = _run_shell_command("git rev-list --abbrev-commit HEAD")
+ return len(revlist.splitlines())
-def get_post_version(projectname):
+def get_version_from_git(pre_version):
"""Return a version which is equal to the tag that's on the current
revision if there is one, or tag plus number of additional revisions
if the current revision has no tag."""
if os.path.isdir('.git'):
- version = _get_git_post_version()
- write_versioninfo(projectname, version)
+ if pre_version:
+ try:
+ return _run_shell_command(
+ "git describe --exact-match",
+ throw_on_error=True).replace('-', '.')
+ except Exception:
+ sha = _run_shell_command("git log -n1 --pretty=format:%h")
+ return "%s.a%s.g%s" % (pre_version, _get_revno(), sha)
+ else:
+ return _run_shell_command(
+ "git describe --always").replace('-', '.')
+ return None
+
+
+def get_version_from_pkg_info(package_name):
+ """Get the version from PKG-INFO file if we can."""
+ try:
+ pkg_info_file = open('PKG-INFO', 'r')
+ except (IOError, OSError):
+ return None
+ try:
+ pkg_info = email.message_from_file(pkg_info_file)
+ except email.MessageError:
+ return None
+ # Check to make sure we're in our own dir
+ if pkg_info.get('Name', None) != package_name:
+ return None
+ return pkg_info.get('Version', None)
+
+
+def get_version(package_name, pre_version=None):
+ """Get the version of the project. First, try getting it from PKG-INFO, if
+ it exists. If it does, that means we're in a distribution tarball or that
+ install has happened. Otherwise, if there is no PKG-INFO file, pull the
+ version from git.
+
+ We do not support setup.py version sanity in git archive tarballs, nor do
+ we support packagers directly sucking our git repo into theirs. We expect
+ that a source tarball be made from our git repo - or that if someone wants
+ to make a source tarball from a fork of our repo with additional tags in it
+ that they understand and desire the results of doing that.
+ """
+ version = os.environ.get("OSLO_PACKAGE_VERSION", None)
+ if version:
+ return version
+ version = get_version_from_pkg_info(package_name)
+ if version:
+ return version
+ version = get_version_from_git(pre_version)
+ if version:
return version
- return read_versioninfo(projectname)
+ raise Exception("Versioning for this project requires either an sdist"
+ " tarball, or access to an upstream git repository.")
diff --git a/nova/openstack/common/version.py b/nova/openstack/common/version.py
new file mode 100644
index 000000000..b16ef4bca
--- /dev/null
+++ b/nova/openstack/common/version.py
@@ -0,0 +1,86 @@
+
+# Copyright 2012 OpenStack LLC
+# Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Utilities for consuming the version from pkg_resources.
+"""
+
+import pkg_resources
+
+
+class VersionInfo(object):
+
+ def __init__(self, package):
+ """Object that understands versioning for a package
+ :param package: name of the python package, such as glance, or
+ python-glanceclient
+ """
+ self.package = package
+ self.release = None
+ self.version = None
+ self._cached_version = None
+
+ def _get_version_from_pkg_resources(self):
+ """Get the version of the package from the pkg_resources record
+ associated with the package."""
+ try:
+ requirement = pkg_resources.Requirement.parse(self.package)
+ provider = pkg_resources.get_provider(requirement)
+ return provider.version
+ except pkg_resources.DistributionNotFound:
+ # The most likely cause for this is running tests in a tree with
+ # produced from a tarball where the package itself has not been
+ # installed into anything. Check for a PKG-INFO file.
+ from nova.openstack.common import setup
+ return setup.get_version_from_pkg_info(self.package)
+
+ def release_string(self):
+ """Return the full version of the package including suffixes indicating
+ VCS status.
+ """
+ if self.release is None:
+ self.release = self._get_version_from_pkg_resources()
+
+ return self.release
+
+ def version_string(self):
+ """Return the short version minus any alpha/beta tags."""
+ if self.version is None:
+ parts = []
+ for part in self.release_string().split('.'):
+ if part[0].isdigit():
+ parts.append(part)
+ else:
+ break
+ self.version = ".".join(parts)
+
+ return self.version
+
+ # Compatibility functions
+ canonical_version_string = version_string
+ version_string_with_vcs = release_string
+
+ def cached_version_string(self, prefix=""):
+ """Generate an object which will expand in a string context to
+ the results of version_string(). We do this so that don't
+ call into pkg_resources every time we start up a program when
+ passing version information into the CONF constructor, but
+ rather only do the calculation when and if a version is requested
+ """
+ if not self._cached_version:
+ self._cached_version = "%s%s" % (prefix,
+ self.version_string())
+ return self._cached_version
diff --git a/nova/policy.py b/nova/policy.py
index 27e261eac..ac2f2e730 100644
--- a/nova/policy.py
+++ b/nova/policy.py
@@ -101,14 +101,15 @@ def enforce(context, action, target, do_raise=True):
return policy.check(action, target, credentials, **extra)
-def check_is_admin(roles):
+def check_is_admin(context):
"""Whether or not roles contains 'admin' role according to policy setting.
"""
init()
- target = {}
- credentials = {'roles': roles}
+ #the target is user-self
+ credentials = context.to_dict()
+ target = credentials
return policy.check('context_is_admin', target, credentials)
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index 16714a5ff..4ad548798 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -27,6 +27,7 @@ from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import utils as compute_utils
from nova.compute import vm_states
+from nova.conductor import api as conductor_api
from nova import db
from nova import exception
from nova import notifications
@@ -66,6 +67,7 @@ def handle_schedule_error(context, ex, instance_uuid, request_spec):
notifications.send_update(context, old_ref, new_ref,
service="scheduler")
compute_utils.add_instance_fault_from_exc(context,
+ conductor_api.LocalAPI(),
new_ref, ex, sys.exc_info())
properties = request_spec.get('instance_properties', {})
@@ -80,13 +82,16 @@ def handle_schedule_error(context, ex, instance_uuid, request_spec):
'scheduler.run_instance', notifier.ERROR, payload)
-def instance_update_db(context, instance_uuid):
+def instance_update_db(context, instance_uuid, extra_values=None):
'''Clear the host and node - set the scheduled_at field of an Instance.
:returns: An Instance with the updated fields set properly.
'''
now = timeutils.utcnow()
values = {'host': None, 'node': None, 'scheduled_at': now}
+ if extra_values:
+ values.update(extra_values)
+
return db.instance_update(context, instance_uuid, values)
@@ -130,6 +135,16 @@ class Scheduler(object):
for service in services
if self.servicegroup_api.service_is_up(service)]
+ def group_hosts(self, context, group):
+ """Return the list of hosts that have VM's from the group."""
+
+ # The system_metadata 'group' will be filtered
+ members = db.instance_get_all_by_filters(context,
+ {'deleted': False, 'group': group})
+ return [member['host']
+ for member in members
+ if member.get('host') is not None]
+
def schedule_prep_resize(self, context, image, request_spec,
filter_properties, instance, instance_type,
reservations):
@@ -261,16 +276,9 @@ class Scheduler(object):
"""
# Getting total available memory of host
- avail = self._get_compute_info(context, dest)['memory_mb']
-
- # Getting total used memory and disk of host
- # It should be sum of memories that are assigned as max value,
- # because overcommitting is risky.
- instance_refs = db.instance_get_all_by_host(context, dest)
- used = sum([i['memory_mb'] for i in instance_refs])
+ avail = self._get_compute_info(context, dest)['free_ram_mb']
mem_inst = instance_ref['memory_mb']
- avail = avail - used
if not mem_inst or avail <= mem_inst:
instance_uuid = instance_ref['uuid']
reason = _("Unable to migrate %(instance_uuid)s to %(dest)s: "
diff --git a/nova/scheduler/filter_scheduler.py b/nova/scheduler/filter_scheduler.py
index 9384e1495..05ca62923 100644
--- a/nova/scheduler/filter_scheduler.py
+++ b/nova/scheduler/filter_scheduler.py
@@ -133,8 +133,17 @@ class FilterScheduler(driver.Scheduler):
'scheduler.run_instance.scheduled', notifier.INFO,
payload)
+ # Update the metadata if necessary
+ scheduler_hints = filter_properties.get('scheduler_hints') or {}
+ group = scheduler_hints.get('group', None)
+ values = None
+ if group:
+ values = request_spec['instance_properties']['system_metadata']
+ values.update({'group': group})
+ values = {'system_metadata': values}
+
updated_instance = driver.instance_update_db(context,
- instance_uuid)
+ instance_uuid, extra_values=values)
self._post_select_populate_filter_properties(filter_properties,
weighed_host.obj)
@@ -248,6 +257,18 @@ class FilterScheduler(driver.Scheduler):
instance_properties = request_spec['instance_properties']
instance_type = request_spec.get("instance_type", None)
+ # Get the group
+ update_group_hosts = False
+ scheduler_hints = filter_properties.get('scheduler_hints') or {}
+ group = scheduler_hints.get('group', None)
+ if group:
+ group_hosts = self.group_hosts(elevated, group)
+ update_group_hosts = True
+ if 'group_hosts' not in filter_properties:
+ filter_properties.update({'group_hosts': []})
+ configured_hosts = filter_properties['group_hosts']
+ filter_properties['group_hosts'] = configured_hosts + group_hosts
+
config_options = self._get_configuration_options()
# check retry policy. Rather ugly use of instance_uuids[0]...
@@ -299,4 +320,34 @@ class FilterScheduler(driver.Scheduler):
# Now consume the resources so the filter/weights
# will change for the next instance.
best_host.obj.consume_from_instance(instance_properties)
+ if update_group_hosts is True:
+ filter_properties['group_hosts'].append(best_host.obj.host)
+
return selected_hosts
+
+ def _assert_compute_node_has_enough_memory(self, context,
+ instance_ref, dest):
+ """Checks if destination host has enough memory for live migration.
+
+
+ :param context: security context
+ :param instance_ref: nova.db.sqlalchemy.models.Instance object
+ :param dest: destination host
+
+ """
+ compute = self._get_compute_info(context, dest)
+ node = compute.get('hypervisor_hostname')
+ host_state = self.host_manager.host_state_cls(dest, node)
+ host_state.update_from_compute_node(compute)
+
+ instance_type = instance_ref['instance_type']
+ filter_properties = {'instance_type': instance_type}
+
+ hosts = self.host_manager.get_filtered_hosts([host_state],
+ filter_properties,
+ 'RamFilter')
+ if not hosts:
+ instance_uuid = instance_ref['uuid']
+ reason = _("Unable to migrate %(instance_uuid)s to %(dest)s: "
+ "Lack of memory")
+ raise exception.MigrationError(reason=reason % locals())
diff --git a/nova/scheduler/filters/affinity_filter.py b/nova/scheduler/filters/affinity_filter.py
index 03bf0dd6e..a7e894320 100644
--- a/nova/scheduler/filters/affinity_filter.py
+++ b/nova/scheduler/filters/affinity_filter.py
@@ -18,19 +18,16 @@
import netaddr
from nova.compute import api as compute
+from nova.openstack.common import log as logging
from nova.scheduler import filters
+LOG = logging.getLogger(__name__)
+
class AffinityFilter(filters.BaseHostFilter):
def __init__(self):
self.compute_api = compute.API()
- def _all_hosts(self, context):
- all_hosts = {}
- for instance in self.compute_api.get_all(context):
- all_hosts[instance['uuid']] = instance['host']
- return all_hosts
-
class DifferentHostFilter(AffinityFilter):
'''Schedule the instance on a different host from a set of instances.'''
@@ -38,15 +35,15 @@ class DifferentHostFilter(AffinityFilter):
def host_passes(self, host_state, filter_properties):
context = filter_properties['context']
scheduler_hints = filter_properties.get('scheduler_hints') or {}
- me = host_state.host
affinity_uuids = scheduler_hints.get('different_host', [])
if isinstance(affinity_uuids, basestring):
affinity_uuids = [affinity_uuids]
if affinity_uuids:
- all_hosts = self._all_hosts(context)
- return not any([i for i in affinity_uuids
- if all_hosts.get(i) == me])
+ return not self.compute_api.get_all(context,
+ {'host': host_state.host,
+ 'uuid': affinity_uuids,
+ 'deleted': False})
# With no different_host key
return True
@@ -59,16 +56,14 @@ class SameHostFilter(AffinityFilter):
def host_passes(self, host_state, filter_properties):
context = filter_properties['context']
scheduler_hints = filter_properties.get('scheduler_hints') or {}
- me = host_state.host
affinity_uuids = scheduler_hints.get('same_host', [])
if isinstance(affinity_uuids, basestring):
affinity_uuids = [affinity_uuids]
if affinity_uuids:
- all_hosts = self._all_hosts(context)
- return any([i for i
- in affinity_uuids
- if all_hosts.get(i) == me])
+ return self.compute_api.get_all(context, {'host': host_state.host,
+ 'uuid': affinity_uuids,
+ 'deleted': False})
# With no same_host key
return True
@@ -88,3 +83,20 @@ class SimpleCIDRAffinityFilter(AffinityFilter):
# We don't have an affinity host address.
return True
+
+
+class GroupAntiAffinityFilter(AffinityFilter):
+ """Schedule the instance on a different host from a set of group
+ instances.
+ """
+
+ def host_passes(self, host_state, filter_properties):
+ group_hosts = filter_properties.get('group_hosts') or []
+ LOG.debug(_("Group affinity: %(host)s in %(configured)s"),
+ {'host': host_state.host,
+ 'configured': group_hosts})
+ if group_hosts:
+ return not host_state.host in group_hosts
+
+ # No groups configured
+ return True
diff --git a/nova/scheduler/filters/availability_zone_filter.py b/nova/scheduler/filters/availability_zone_filter.py
index 585acbaf8..390276ea3 100644
--- a/nova/scheduler/filters/availability_zone_filter.py
+++ b/nova/scheduler/filters/availability_zone_filter.py
@@ -14,7 +14,6 @@
# under the License.
-from nova import availability_zones
from nova import db
from nova.openstack.common import cfg
from nova.scheduler import filters
diff --git a/nova/scheduler/filters/json_filter.py b/nova/scheduler/filters/json_filter.py
index 2d070ea8e..f77d6dad3 100644
--- a/nova/scheduler/filters/json_filter.py
+++ b/nova/scheduler/filters/json_filter.py
@@ -32,7 +32,7 @@ class JsonFilter(filters.BaseHostFilter):
if len(args) < 2:
return False
if op is operator.contains:
- bad = not args[0] in args[1:]
+ bad = args[0] not in args[1:]
else:
bad = [arg for arg in args[1:]
if not op(args[0], arg)]
diff --git a/nova/scheduler/filters/trusted_filter.py b/nova/scheduler/filters/trusted_filter.py
index 302d2b3a8..14f1a37b0 100644
--- a/nova/scheduler/filters/trusted_filter.py
+++ b/nova/scheduler/filters/trusted_filter.py
@@ -269,7 +269,7 @@ class ComputeAttestationCache(object):
def get_host_attestation(self, host):
"""Check host's trust level."""
- if not host in self.compute_nodes:
+ if host not in self.compute_nodes:
self._init_cache_entry(host)
if not self._cache_valid(host):
self._update_cache()
diff --git a/nova/scheduler/host_manager.py b/nova/scheduler/host_manager.py
index b472220bd..8d17349fa 100644
--- a/nova/scheduler/host_manager.py
+++ b/nova/scheduler/host_manager.py
@@ -328,10 +328,13 @@ class HostManager(object):
name_to_cls_map = dict([(x.host, x) for x in hosts])
if ignore_hosts:
_strip_ignore_hosts(name_to_cls_map, ignore_hosts)
+ if not name_to_cls_map:
+ return []
if force_hosts:
_match_forced_hosts(name_to_cls_map, force_hosts)
- if not name_to_cls_map:
- return []
+ # NOTE(vish): Skip filters on forced hosts.
+ if name_to_cls_map:
+ return name_to_cls_map.values()
hosts = name_to_cls_map.itervalues()
return self.filter_handler.get_filtered_objects(filter_classes,
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 23e64cd7c..a129a1b6d 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -26,6 +26,7 @@ import sys
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import utils as compute_utils
from nova.compute import vm_states
+from nova.conductor import api as conductor_api
import nova.context
from nova import db
from nova import exception
@@ -103,22 +104,26 @@ class SchedulerManager(manager.Manager):
"""Tries to call schedule_run_instance on the driver.
Sets instance vm_state to ERROR on exceptions
"""
- try:
- return self.driver.schedule_run_instance(context,
- request_spec, admin_password, injected_files,
- requested_networks, is_first_time, filter_properties)
- except exception.NoValidHost as ex:
- # don't re-raise
- self._set_vm_state_and_notify('run_instance',
- {'vm_state': vm_states.ERROR,
- 'task_state': None},
- context, ex, request_spec)
- except Exception as ex:
- with excutils.save_and_reraise_exception():
+ instance_uuids = request_spec['instance_uuids']
+ with compute_utils.EventReporter(context, conductor_api.LocalAPI(),
+ 'schedule', *instance_uuids):
+ try:
+ return self.driver.schedule_run_instance(context,
+ request_spec, admin_password, injected_files,
+ requested_networks, is_first_time, filter_properties)
+
+ except exception.NoValidHost as ex:
+ # don't re-raise
self._set_vm_state_and_notify('run_instance',
- {'vm_state': vm_states.ERROR,
+ {'vm_state': vm_states.ERROR,
'task_state': None},
- context, ex, request_spec)
+ context, ex, request_spec)
+ except Exception as ex:
+ with excutils.save_and_reraise_exception():
+ self._set_vm_state_and_notify('run_instance',
+ {'vm_state': vm_states.ERROR,
+ 'task_state': None},
+ context, ex, request_spec)
def prep_resize(self, context, image, request_spec, filter_properties,
instance, instance_type, reservations):
@@ -126,32 +131,35 @@ class SchedulerManager(manager.Manager):
Sets instance vm_state to ACTIVE on NoHostFound
Sets vm_state to ERROR on other exceptions
"""
- try:
- kwargs = {
- 'context': context,
- 'image': image,
- 'request_spec': request_spec,
- 'filter_properties': filter_properties,
- 'instance': instance,
- 'instance_type': instance_type,
- 'reservations': reservations,
- }
- return self.driver.schedule_prep_resize(**kwargs)
- except exception.NoValidHost as ex:
- self._set_vm_state_and_notify('prep_resize',
- {'vm_state': vm_states.ACTIVE,
- 'task_state': None},
- context, ex, request_spec)
- if reservations:
- QUOTAS.rollback(context, reservations)
- except Exception as ex:
- with excutils.save_and_reraise_exception():
+ instance_uuid = instance['uuid']
+ with compute_utils.EventReporter(context, conductor_api.LocalAPI(),
+ 'schedule', instance_uuid):
+ try:
+ kwargs = {
+ 'context': context,
+ 'image': image,
+ 'request_spec': request_spec,
+ 'filter_properties': filter_properties,
+ 'instance': instance,
+ 'instance_type': instance_type,
+ 'reservations': reservations,
+ }
+ return self.driver.schedule_prep_resize(**kwargs)
+ except exception.NoValidHost as ex:
self._set_vm_state_and_notify('prep_resize',
- {'vm_state': vm_states.ERROR,
+ {'vm_state': vm_states.ACTIVE,
'task_state': None},
context, ex, request_spec)
if reservations:
QUOTAS.rollback(context, reservations)
+ except Exception as ex:
+ with excutils.save_and_reraise_exception():
+ self._set_vm_state_and_notify('prep_resize',
+ {'vm_state': vm_states.ERROR,
+ 'task_state': None},
+ context, ex, request_spec)
+ if reservations:
+ QUOTAS.rollback(context, reservations)
def _set_vm_state_and_notify(self, method, updates, context, ex,
request_spec):
@@ -190,6 +198,7 @@ class SchedulerManager(manager.Manager):
notifications.send_update(context, old_ref, new_ref,
service="scheduler")
compute_utils.add_instance_fault_from_exc(context,
+ conductor_api.LocalAPI(),
new_ref, ex, sys.exc_info())
payload = dict(request_spec=request_spec,
diff --git a/nova/service.py b/nova/service.py
index 4fc12f374..3d556a202 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -38,7 +38,6 @@ from nova.openstack.common import eventlet_backdoor
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
-from nova.openstack.common.rpc import common as rpc_common
from nova import servicegroup
from nova import utils
from nova import version
@@ -622,7 +621,7 @@ class WSGIService(object):
"""
fl = '%s_manager' % self.name
- if not fl in CONF:
+ if fl not in CONF:
return None
manager_class_name = CONF.get(fl, None)
diff --git a/nova/servicegroup/api.py b/nova/servicegroup/api.py
index 0fb30cdf5..793d4bfc9 100644
--- a/nova/servicegroup/api.py
+++ b/nova/servicegroup/api.py
@@ -40,7 +40,8 @@ class API(object):
_driver = None
_driver_name_class_mapping = {
- 'db': 'nova.servicegroup.drivers.db.DbDriver'
+ 'db': 'nova.servicegroup.drivers.db.DbDriver',
+ 'zk': 'nova.servicegroup.drivers.zk.ZooKeeperDriver'
}
@lockutils.synchronized('nova.servicegroup.api.new', 'nova-')
diff --git a/nova/servicegroup/drivers/db.py b/nova/servicegroup/drivers/db.py
index 686ee728b..18b4b74e5 100644
--- a/nova/servicegroup/drivers/db.py
+++ b/nova/servicegroup/drivers/db.py
@@ -16,7 +16,6 @@
from nova import conductor
from nova import context
-from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
diff --git a/nova/servicegroup/drivers/zk.py b/nova/servicegroup/drivers/zk.py
new file mode 100644
index 000000000..c4e3f7b71
--- /dev/null
+++ b/nova/servicegroup/drivers/zk.py
@@ -0,0 +1,157 @@
+# Copyright (c) AT&T 2012-2013 Yun Mao <yunmao@gmail.com>
+#
+# Copyright (c) IBM 2012 Pavel Kravchenco <kpavel at il dot ibm dot com>
+# Alexey Roytman <roytman at il dot ibm dot com>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+import eventlet
+import evzookeeper
+from evzookeeper import membership
+import zookeeper
+
+from nova import exception
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova.servicegroup import api
+from nova import utils
+
+
+zk_driver_opts = [
+ cfg.StrOpt('address',
+ default=None,
+ help='The ZooKeeper addresses for servicegroup service in the '
+ 'format of host1:port,host2:port,host3:port'),
+ cfg.IntOpt('recv_timeout',
+ default=4000,
+ help='recv_timeout parameter for the zk session'),
+ cfg.StrOpt('sg_prefix',
+ default="/servicegroups",
+ help='The prefix used in ZooKeeper to store ephemeral nodes'),
+ cfg.IntOpt('sg_retry_interval',
+ default=5,
+ help='Number of seconds to wait until retrying to join the '
+ 'session'),
+ ]
+
+CONF = cfg.CONF
+CONF.register_opts(zk_driver_opts, group="zk")
+
+LOG = logging.getLogger(__name__)
+
+
+class ZooKeeperDriver(api.ServiceGroupDriver):
+ """ZooKeeper driver for the service group API."""
+
+ def __init__(self, *args, **kwargs):
+ """Create the zk session object."""
+ null = open(os.devnull, "w")
+ self._session = evzookeeper.ZKSession(CONF.zk.address,
+ recv_timeout=
+ CONF.zk.recv_timeout,
+ zklog_fd=null)
+ self._memberships = {}
+ self._monitors = {}
+ # Make sure the prefix exists
+ try:
+ self._session.create(CONF.zk.sg_prefix, "",
+ acl=[evzookeeper.ZOO_OPEN_ACL_UNSAFE])
+ except zookeeper.NodeExistsException:
+ pass
+
+ super(ZooKeeperDriver, self).__init__()
+
+ def join(self, member_id, group, service=None):
+ """Join the given service with its group."""
+ LOG.debug(_('ZooKeeperDriver: join new member %(id)s to the '
+ '%(gr)s group, service=%(sr)s'),
+ {'id': member_id, 'gr': group, 'sr': service})
+ member = self._memberships.get((group, member_id), None)
+ if member is None:
+ # the first time to join. Generate a new object
+ path = "%s/%s" % (CONF.zk.sg_prefix, group)
+ try:
+ member = membership.Membership(self._session, path, member_id)
+ except RuntimeError:
+ LOG.exception(_("Unable to join. It is possible that either "
+ "another node exists with the same name, or "
+ "this node just restarted. We will try "
+ "again in a short while to make sure."))
+ eventlet.sleep(CONF.zk.sg_retry_interval)
+ member = membership.Membership(self._session, path, member_id)
+ self._memberships[(group, member_id)] = member
+ return FakeLoopingCall(self, member_id, group)
+
+ def leave(self, member_id, group):
+ """Remove the given member from the service group."""
+ LOG.debug(_('ZooKeeperDriver.leave: %(member)s from group %(group)s'),
+ {'member': member_id, 'group': group})
+ try:
+ key = (group, member_id)
+ member = self._memberships[key]
+ member.leave()
+ del self._memberships[key]
+ except KeyError:
+ LOG.error(_('ZooKeeperDriver.leave: %(id)s has not joined to the '
+ '%(gr)s group'), {'id': member_id, 'gr': group})
+
+ def is_up(self, service_ref):
+ group_id = service_ref['topic']
+ member_id = service_ref['host']
+ all_members = self.get_all(group_id)
+ return member_id in all_members
+
+ def get_all(self, group_id):
+ """Return all members in a list, or a ServiceGroupUnavailable
+ exception.
+ """
+ monitor = self._monitors.get(group_id, None)
+ if monitor is None:
+ path = "%s/%s" % (CONF.zk.sg_prefix, group_id)
+ monitor = membership.MembershipMonitor(self._session, path)
+ self._monitors[group_id] = monitor
+ # Note(maoy): When initialized for the first time, it takes a
+ # while to retrieve all members from zookeeper. To prevent
+ # None to be returned, we sleep 5 sec max to wait for data to
+ # be ready.
+ for _retry in range(50):
+ eventlet.sleep(0.1)
+ all_members = monitor.get_all()
+ if all_members is not None:
+ return all_members
+ all_members = monitor.get_all()
+ if all_members is None:
+ raise exception.ServiceGroupUnavailable(driver="ZooKeeperDriver")
+ return all_members
+
+
+class FakeLoopingCall(utils.LoopingCallBase):
+ """The fake Looping Call implementation, created for backward
+ compatibility with a membership based on DB.
+ """
+ def __init__(self, driver, host, group):
+ self._driver = driver
+ self._group = group
+ self._host = host
+
+ def stop(self):
+ self._driver.leave(self._host, self._group)
+
+ def start(self, interval, initial_delay=None):
+ pass
+
+ def wait(self):
+ pass
diff --git a/nova/test.py b/nova/test.py
index b3f851dc4..e5c11081c 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -37,9 +37,9 @@ import testtools
from nova import context
from nova import db
from nova.db import migration
-from nova.db.sqlalchemy import session
from nova.network import manager as network_manager
from nova.openstack.common import cfg
+from nova.openstack.common.db.sqlalchemy import session
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import paths
@@ -56,8 +56,9 @@ test_opts = [
CONF = cfg.CONF
CONF.register_opts(test_opts)
-CONF.import_opt('sql_connection', 'nova.db.sqlalchemy.session')
-CONF.import_opt('sqlite_db', 'nova.db.sqlalchemy.session')
+CONF.import_opt('sql_connection',
+ 'nova.openstack.common.db.sqlalchemy.session')
+CONF.import_opt('sqlite_db', 'nova.openstack.common.db.sqlalchemy.session')
CONF.set_override('use_stderr', False)
logging.setup('nova')
diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py
index a00dceff1..f8219e7a0 100644
--- a/nova/tests/api/ec2/test_cloud.py
+++ b/nova/tests/api/ec2/test_cloud.py
@@ -269,7 +269,8 @@ class CloudTestCase(test.TestCase):
host=inst['host'],
vpn=None,
rxtx_factor=3,
- project_id=project_id)
+ project_id=project_id,
+ macs=None)
fixed_ips = nw_info.fixed_ips()
ec2_id = ec2utils.id_to_ec2_inst_id(inst['uuid'])
@@ -712,10 +713,10 @@ class CloudTestCase(test.TestCase):
# Aggregate based zones
agg = db.aggregate_create(self.context,
{'name': 'agg1'}, {'availability_zone': 'zone1'})
- db.aggregate_host_add(self.context, agg.id, 'host1_zones')
+ db.aggregate_host_add(self.context, agg['id'], 'host1_zones')
agg = db.aggregate_create(self.context,
{'name': 'agg2'}, {'availability_zone': 'zone2'})
- db.aggregate_host_add(self.context, agg.id, 'host2_zones')
+ db.aggregate_host_add(self.context, agg['id'], 'host2_zones')
result = self.cloud.describe_availability_zones(self.context)
self.assertEqual(len(result['availabilityZoneInfo']), 3)
admin_ctxt = context.get_admin_context(read_deleted="no")
@@ -737,7 +738,7 @@ class CloudTestCase(test.TestCase):
'report_count': 0})
agg = db.aggregate_create(self.context,
{'name': 'agg1'}, {'availability_zone': 'second_zone'})
- db.aggregate_host_add(self.context, agg.id, 'host2_zones')
+ db.aggregate_host_add(self.context, agg['id'], 'host2_zones')
admin_ctxt = context.get_admin_context(read_deleted="no")
result = self.cloud.describe_availability_zones(admin_ctxt,
@@ -771,13 +772,13 @@ class CloudTestCase(test.TestCase):
'topic': "compute"})
agg = db.aggregate_create(self.context,
{'name': 'agg1'}, {'availability_zone': 'zone1'})
- db.aggregate_host_add(self.context, agg.id, 'host1')
+ db.aggregate_host_add(self.context, agg['id'], 'host1')
comp2 = db.service_create(self.context, {'host': 'host2',
'topic': "compute"})
agg2 = db.aggregate_create(self.context,
{'name': 'agg2'}, {'availability_zone': 'zone2'})
- db.aggregate_host_add(self.context, agg2.id, 'host2')
+ db.aggregate_host_add(self.context, agg2['id'], 'host2')
result = self.cloud.describe_instances(self.context)
result = result['reservationSet'][0]
@@ -824,6 +825,19 @@ class CloudTestCase(test.TestCase):
self.cloud.describe_instances, self.context,
instance_id=[instance_id])
+ def test_describe_instances_with_filters(self):
+ # Makes sure describe_instances works and filters results.
+ filters = {'filter': [{'name': 'test',
+ 'value': ['a', 'b']},
+ {'name': 'another_test',
+ 'value': 'a string'}]}
+
+ self._stub_instance_get_with_fixed_ips('get_all')
+ self._stub_instance_get_with_fixed_ips('get')
+
+ result = self.cloud.describe_instances(self.context, **filters)
+ self.assertEqual(result, {'reservationSet': []})
+
def test_describe_instances_sorting(self):
# Makes sure describe_instances works and is sorted as expected.
self.flags(use_ipv6=True)
diff --git a/nova/tests/api/openstack/compute/contrib/test_console_output.py b/nova/tests/api/openstack/compute/contrib/test_console_output.py
index d3f80b655..14b61abb7 100644
--- a/nova/tests/api/openstack/compute/contrib/test_console_output.py
+++ b/nova/tests/api/openstack/compute/contrib/test_console_output.py
@@ -35,6 +35,10 @@ def fake_get_console_output(self, _context, _instance, tail_length):
return '\n'.join(fixture)
+def fake_get_console_output_not_ready(self, _context, _instance, tail_length):
+ raise exception.InstanceNotReady(instance_id=_instance["uuid"])
+
+
def fake_get(self, context, instance_uuid):
return {'uuid': instance_uuid}
@@ -133,3 +137,15 @@ class ConsoleOutputExtensionTest(test.TestCase):
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
+
+ def test_get_console_output_not_ready(self):
+ self.stubs.Set(compute_api.API, 'get_console_output',
+ fake_get_console_output_not_ready)
+ body = {'os-getConsoleOutput': {'length': 3}}
+ req = webob.Request.blank('/v2/fake/servers/1/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 409)
diff --git a/nova/tests/api/openstack/compute/contrib/test_evacuate.py b/nova/tests/api/openstack/compute/contrib/test_evacuate.py
new file mode 100644
index 000000000..f76bf7bcf
--- /dev/null
+++ b/nova/tests/api/openstack/compute/contrib/test_evacuate.py
@@ -0,0 +1,156 @@
+# Copyright 2013 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+import webob
+
+from nova.compute import api as compute_api
+from nova.compute import vm_states
+from nova import context
+from nova.openstack.common import cfg
+from nova.openstack.common import jsonutils
+from nova import test
+from nova.tests.api.openstack import fakes
+
+CONF = cfg.CONF
+CONF.import_opt('password_length', 'nova.utils')
+
+
+def fake_compute_api(*args, **kwargs):
+ return True
+
+
+def fake_compute_api_get(self, context, instance_id):
+ return {
+ 'id': 1,
+ 'uuid': instance_id,
+ 'vm_state': vm_states.ACTIVE,
+ 'task_state': None, 'host': 'host1'
+ }
+
+
+class EvacuateTest(test.TestCase):
+
+ _methods = ('resize', 'evacuate')
+
+ def setUp(self):
+ super(EvacuateTest, self).setUp()
+ self.stubs.Set(compute_api.API, 'get', fake_compute_api_get)
+ self.UUID = uuid.uuid4()
+ for _method in self._methods:
+ self.stubs.Set(compute_api.API, _method, fake_compute_api)
+
+ def test_evacuate_instance_with_no_target(self):
+ ctxt = context.get_admin_context()
+ ctxt.user_id = 'fake'
+ ctxt.project_id = 'fake'
+ ctxt.is_admin = True
+ app = fakes.wsgi_app(fake_auth_context=ctxt)
+ req = webob.Request.blank('/v2/fake/servers/%s/action' % self.UUID)
+ req.method = 'POST'
+ req.body = jsonutils.dumps({
+ 'evacuate': {
+ 'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'
+ }
+ })
+ req.content_type = 'application/json'
+ res = req.get_response(app)
+ self.assertEqual(res.status_int, 400)
+
+ def test_evacuate_instance_with_target(self):
+ ctxt = context.get_admin_context()
+ ctxt.user_id = 'fake'
+ ctxt.project_id = 'fake'
+ ctxt.is_admin = True
+ app = fakes.wsgi_app(fake_auth_context=ctxt)
+ uuid = self.UUID
+ req = webob.Request.blank('/v2/fake/servers/%s/action' % uuid)
+ req.method = 'POST'
+ req.body = jsonutils.dumps({
+ 'evacuate': {
+ 'host': 'my_host',
+ 'onSharedStorage': 'false',
+ 'adminPass': 'MyNewPass'
+ }
+ })
+ req.content_type = 'application/json'
+
+ def fake_update(inst, context, instance,
+ task_state, expected_task_state):
+ return None
+
+ self.stubs.Set(compute_api.API, 'update', fake_update)
+
+ resp = req.get_response(app)
+ self.assertEqual(resp.status_int, 200)
+ resp_json = jsonutils.loads(resp.body)
+ self.assertEqual("MyNewPass", resp_json['adminPass'])
+
+ def test_evacuate_shared_and_pass(self):
+ ctxt = context.get_admin_context()
+ ctxt.user_id = 'fake'
+ ctxt.project_id = 'fake'
+ ctxt.is_admin = True
+ app = fakes.wsgi_app(fake_auth_context=ctxt)
+ uuid = self.UUID
+ req = webob.Request.blank('/v2/fake/servers/%s/action' % uuid)
+ req.method = 'POST'
+ req.body = jsonutils.dumps({
+ 'evacuate': {
+ 'host': 'my_host',
+ 'onSharedStorage': 'True',
+ 'adminPass': 'MyNewPass'
+ }
+ })
+ req.content_type = 'application/json'
+
+ def fake_update(inst, context, instance,
+ task_state, expected_task_state):
+ return None
+
+ self.stubs.Set(compute_api.API, 'update', fake_update)
+
+ res = req.get_response(app)
+ self.assertEqual(res.status_int, 400)
+
+ def test_evacuate_not_shared_pass_generated(self):
+ ctxt = context.get_admin_context()
+ ctxt.user_id = 'fake'
+ ctxt.project_id = 'fake'
+ ctxt.is_admin = True
+ app = fakes.wsgi_app(fake_auth_context=ctxt)
+ uuid = self.UUID
+ req = webob.Request.blank('/v2/fake/servers/%s/action' % uuid)
+ req.method = 'POST'
+ req.body = jsonutils.dumps({
+ 'evacuate': {
+ 'host': 'my_host',
+ 'onSharedStorage': 'False',
+ }
+ })
+
+ req.content_type = 'application/json'
+
+ def fake_update(inst, context, instance,
+ task_state, expected_task_state):
+ return None
+
+ self.stubs.Set(compute_api.API, 'update', fake_update)
+
+ resp = req.get_response(app)
+ self.assertEqual(resp.status_int, 200)
+ resp_json = jsonutils.loads(resp.body)
+ self.assertEqual(CONF.password_length, len(resp_json['adminPass']))
diff --git a/nova/tests/api/openstack/compute/contrib/test_extended_availability_zone.py b/nova/tests/api/openstack/compute/contrib/test_extended_availability_zone.py
new file mode 100644
index 000000000..8ebd810ac
--- /dev/null
+++ b/nova/tests/api/openstack/compute/contrib/test_extended_availability_zone.py
@@ -0,0 +1,118 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+import webob
+
+from nova.api.openstack.compute.contrib import extended_availability_zone
+from nova import compute
+from nova import exception
+from nova.openstack.common import jsonutils
+from nova import test
+from nova.tests.api.openstack import fakes
+
+UUID1 = '00000000-0000-0000-0000-000000000001'
+UUID2 = '00000000-0000-0000-0000-000000000002'
+UUID3 = '00000000-0000-0000-0000-000000000003'
+
+
+def fake_compute_get(*args, **kwargs):
+ inst = fakes.stub_instance(1, uuid=UUID3, host="host-fake")
+ inst['availability_zone'] = 'az-i'
+ return inst
+
+
+def fake_compute_get_all(*args, **kwargs):
+ inst1 = fakes.stub_instance(1, uuid=UUID1, host="host-1")
+ inst2 = fakes.stub_instance(2, uuid=UUID2, host="host-2")
+ inst1['availability_zone'] = 'az-i'
+ inst2['availability_zone'] = 'az-i'
+ return [inst1, inst2]
+
+
+class ExtendedServerAttributesTest(test.TestCase):
+ content_type = 'application/json'
+ prefix = 'OS-EXT-AZ:'
+
+ def setUp(self):
+ super(ExtendedServerAttributesTest, self).setUp()
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
+
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Extended_availability_zone'])
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
+ return res
+
+ def _get_server(self, body):
+ return jsonutils.loads(body).get('server')
+
+ def _get_servers(self, body):
+ return jsonutils.loads(body).get('servers')
+
+ def assertServerAttributes(self, server, az_instance, az_host):
+ self.assertEqual(server.get('%savailability_zone' % self.prefix),
+ az_instance)
+ self.assertEqual(server.get('%shost_availability_zone' % self.prefix),
+ az_host)
+
+ def test_show(self):
+ url = '/v2/fake/servers/%s' % UUID3
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertServerAttributes(self._get_server(res.body),
+ az_instance='az-i',
+ az_host='nova')
+
+ def test_detail(self):
+ url = '/v2/fake/servers/detail'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ for i, server in enumerate(self._get_servers(res.body)):
+ self.assertServerAttributes(server,
+ az_instance='az-i',
+ az_host='nova')
+
+ def test_no_instance_passthrough_404(self):
+
+ def fake_compute_get(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='fake')
+
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ url = '/v2/fake/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 404)
+
+
+class ExtendedServerAttributesXmlTest(ExtendedServerAttributesTest):
+ content_type = 'application/xml'
+ prefix = '{%s}' % extended_availability_zone.\
+ Extended_availability_zone.namespace
+
+ def _get_server(self, body):
+ return etree.XML(body)
+
+ def _get_servers(self, body):
+ return etree.XML(body).getchildren()
diff --git a/nova/tests/api/openstack/compute/contrib/test_flavor_access.py b/nova/tests/api/openstack/compute/contrib/test_flavor_access.py
index 0818dfdd3..1475edc0a 100644
--- a/nova/tests/api/openstack/compute/contrib/test_flavor_access.py
+++ b/nova/tests/api/openstack/compute/contrib/test_flavor_access.py
@@ -252,35 +252,21 @@ class FlavorAccessTest(test.TestCase):
class FlavorAccessSerializerTest(test.TestCase):
- def test_xml_declaration(self):
- access_list = [{'flavor_id': '2', 'tenant_id': 'proj2'}]
- serializer = flavor_access.FlavorAccessTemplate()
- output = serializer.serialize(access_list)
- has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
- self.assertTrue(has_dec)
-
def test_serializer_empty(self):
- access_list = []
-
serializer = flavor_access.FlavorAccessTemplate()
- text = serializer.serialize(access_list)
+ text = serializer.serialize(dict(flavor_access=[]))
tree = etree.fromstring(text)
self.assertEqual(len(tree), 0)
def test_serializer(self):
+ expected = ("<?xml version='1.0' encoding='UTF-8'?>\n"
+ '<flavor_access>'
+ '<access tenant_id="proj2" flavor_id="2"/>'
+ '<access tenant_id="proj3" flavor_id="2"/>'
+ '</flavor_access>')
access_list = [{'flavor_id': '2', 'tenant_id': 'proj2'},
{'flavor_id': '2', 'tenant_id': 'proj3'}]
serializer = flavor_access.FlavorAccessTemplate()
- text = serializer.serialize(access_list)
- tree = etree.fromstring(text)
-
- self.assertEqual('flavor_access', tree.tag)
- self.assertEqual(len(access_list), len(tree))
-
- for i in range(len(access_list)):
- self.assertEqual('access', tree[i].tag)
- self.assertEqual(access_list[i]['flavor_id'],
- tree[i].get('flavor_id'))
- self.assertEqual(access_list[i]['tenant_id'],
- tree[i].get('tenant_id'))
+ text = serializer.serialize(dict(flavor_access=access_list))
+ self.assertEqual(text, expected)
diff --git a/nova/tests/api/openstack/compute/contrib/test_flavors_extra_specs.py b/nova/tests/api/openstack/compute/contrib/test_flavors_extra_specs.py
index bc9f66eb2..269937b82 100644
--- a/nova/tests/api/openstack/compute/contrib/test_flavors_extra_specs.py
+++ b/nova/tests/api/openstack/compute/contrib/test_flavors_extra_specs.py
@@ -18,10 +18,10 @@
import webob
from nova.api.openstack.compute.contrib import flavorextraspecs
-from nova.api.openstack import wsgi
+import nova.db
+from nova import exception
from nova import test
from nova.tests.api.openstack import fakes
-import nova.wsgi
def return_create_flavor_extra_specs(context, flavor_id, extra_specs):
@@ -99,26 +99,47 @@ class FlavorsExtraSpecsTest(test.TestCase):
delete_flavor_extra_specs)
req = fakes.HTTPRequest.blank('/v2/fake/flavors/1/os-extra_specs' +
- '/key5')
+ '/key5', use_admin_context=True)
self.controller.delete(req, 1, 'key5')
+ def test_delete_no_admin(self):
+ self.stubs.Set(nova.db, 'instance_type_extra_specs_delete',
+ delete_flavor_extra_specs)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/flavors/1/os-extra_specs' +
+ '/key5')
+ self.assertRaises(exception.NotAuthorized, self.controller.delete,
+ req, 1, 'key 5')
+
def test_create(self):
self.stubs.Set(nova.db,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
body = {"extra_specs": {"key1": "value1"}}
- req = fakes.HTTPRequest.blank('/v2/fake/flavors/1/os-extra_specs')
+ req = fakes.HTTPRequest.blank('/v2/fake/flavors/1/os-extra_specs',
+ use_admin_context=True)
res_dict = self.controller.create(req, 1, body)
self.assertEqual('value1', res_dict['extra_specs']['key1'])
- def test_create_empty_body(self):
+ def test_create_no_admin(self):
self.stubs.Set(nova.db,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
+ body = {"extra_specs": {"key1": "value1"}}
req = fakes.HTTPRequest.blank('/v2/fake/flavors/1/os-extra_specs')
+ self.assertRaises(exception.NotAuthorized, self.controller.create,
+ req, 1, body)
+
+ def test_create_empty_body(self):
+ self.stubs.Set(nova.db,
+ 'instance_type_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/flavors/1/os-extra_specs',
+ use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, 1, '')
@@ -129,18 +150,29 @@ class FlavorsExtraSpecsTest(test.TestCase):
body = {"key1": "value1"}
req = fakes.HTTPRequest.blank('/v2/fake/flavors/1/os-extra_specs' +
- '/key1')
+ '/key1', use_admin_context=True)
res_dict = self.controller.update(req, 1, 'key1', body)
self.assertEqual('value1', res_dict['key1'])
- def test_update_item_empty_body(self):
+ def test_update_item_no_admin(self):
self.stubs.Set(nova.db,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
+ body = {"key1": "value1"}
req = fakes.HTTPRequest.blank('/v2/fake/flavors/1/os-extra_specs' +
'/key1')
+ self.assertRaises(exception.NotAuthorized, self.controller.update,
+ req, 1, 'key1', body)
+
+ def test_update_item_empty_body(self):
+ self.stubs.Set(nova.db,
+ 'instance_type_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/flavors/1/os-extra_specs' +
+ '/key1', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 1, 'key1', '')
@@ -151,7 +183,7 @@ class FlavorsExtraSpecsTest(test.TestCase):
body = {"key1": "value1", "key2": "value2"}
req = fakes.HTTPRequest.blank('/v2/fake/flavors/1/os-extra_specs' +
- '/key1')
+ '/key1', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 1, 'key1', body)
@@ -161,7 +193,8 @@ class FlavorsExtraSpecsTest(test.TestCase):
return_create_flavor_extra_specs)
body = {"key1": "value1"}
- req = fakes.HTTPRequest.blank('/v2/fake/flavors/1/os-extra_specs/bad')
+ req = fakes.HTTPRequest.blank('/v2/fake/flavors/1/os-extra_specs/bad',
+ use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 1, 'bad', body)
@@ -172,13 +205,11 @@ class FlavorsExtraSpecsXMLSerializerTest(test.TestCase):
expected = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<extra_specs><key1>value1</key1></extra_specs>')
text = serializer.serialize(dict(extra_specs={"key1": "value1"}))
- print text
self.assertEqual(text, expected)
- def test_deserializer(self):
- deserializer = wsgi.XMLDeserializer()
- expected = dict(extra_specs={"key1": "value1"})
- intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
- '<extra_specs><key1>value1</key1></extra_specs>')
- result = deserializer.deserialize(intext)['body']
- self.assertEqual(result, expected)
+ def test_show_update_serializer(self):
+ serializer = flavorextraspecs.ExtraSpecTemplate()
+ expected = ("<?xml version='1.0' encoding='UTF-8'?>\n"
+ '<extra_spec key="key1">value1</extra_spec>')
+ text = serializer.serialize(dict({"key1": "value1"}))
+ self.assertEqual(text, expected)
diff --git a/nova/tests/api/openstack/compute/contrib/test_floating_ip_dns.py b/nova/tests/api/openstack/compute/contrib/test_floating_ip_dns.py
index 4753ede32..ac636a4b5 100644
--- a/nova/tests/api/openstack/compute/contrib/test_floating_ip_dns.py
+++ b/nova/tests/api/openstack/compute/contrib/test_floating_ip_dns.py
@@ -29,8 +29,10 @@ from nova.tests.api.openstack import fakes
name = "arbitraryname"
name2 = "anotherarbitraryname"
-testaddress = '10.0.0.66'
-testaddress2 = '10.0.0.67'
+test_ipv4_address = '10.0.0.66'
+test_ipv4_address2 = '10.0.0.67'
+
+test_ipv6_address = 'fe80:0:0:0:0:0:a00:42'
domain = "example.org"
domain2 = "example.net"
@@ -48,7 +50,7 @@ def _quote_domain(domain):
def network_api_get_floating_ip(self, context, id):
- return {'id': floating_ip_id, 'address': testaddress,
+ return {'id': floating_ip_id, 'address': test_ipv4_address,
'fixed_ip': None}
@@ -65,11 +67,11 @@ def network_get_dns_entries_by_address(self, context, address, domain):
def network_get_dns_entries_by_name(self, context, address, domain):
- return [testaddress]
+ return [test_ipv4_address]
def network_add_dns_entry(self, context, address, name, dns_type, domain):
- return {'dns_entry': {'ip': testaddress,
+ return {'dns_entry': {'ip': test_ipv4_address,
'name': name,
'type': dns_type,
'domain': domain}}
@@ -85,12 +87,16 @@ class FloatingIpDNSTest(test.TestCase):
def _create_floating_ip(self):
"""Create a floating ip object."""
host = "fake_host"
- return db.floating_ip_create(self.context,
- {'address': testaddress,
- 'host': host})
+ db.floating_ip_create(self.context,
+ {'address': test_ipv4_address,
+ 'host': host})
+ db.floating_ip_create(self.context,
+ {'address': test_ipv6_address,
+ 'host': host})
def _delete_floating_ip(self):
- db.floating_ip_destroy(self.context, testaddress)
+ db.floating_ip_destroy(self.context, test_ipv4_address)
+ db.floating_ip_destroy(self.context, test_ipv6_address)
def setUp(self):
super(FloatingIpDNSTest, self).setUp()
@@ -133,14 +139,17 @@ class FloatingIpDNSTest(test.TestCase):
self.assertFalse(entries[2]['project'])
self.assertEqual(entries[2]['availability_zone'], "avzone")
- def test_get_dns_entries_by_address(self):
- qparams = {'ip': testaddress}
- params = "?%s" % urllib.urlencode(qparams) if qparams else ""
+ def _test_get_dns_entries_by_address(self, address):
- req = fakes.HTTPRequest.blank('/v2/123/os-floating-ip-dns/%s/entries%s'
- % (_quote_domain(domain), params))
- entries = self.entry_controller.index(req, _quote_domain(domain))
+ qparams = {'ip': address}
+ params = "?%s" % urllib.urlencode(qparams) if qparams else ""
+ req = fakes.HTTPRequest.blank(
+ '/v2/123/os-floating-ip-dns/%s/entries/%s'
+ % (_quote_domain(domain), params))
+ entries = self.entry_controller.show(req, _quote_domain(domain),
+ address)
+ entries = entries.obj
self.assertEqual(len(entries['dns_entries']), 2)
self.assertEqual(entries['dns_entries'][0]['name'],
name)
@@ -149,6 +158,35 @@ class FloatingIpDNSTest(test.TestCase):
self.assertEqual(entries['dns_entries'][0]['domain'],
domain)
+ def test_get_dns_entries_by_ipv4_address(self):
+ self._test_get_dns_entries_by_address(test_ipv4_address)
+
+ def test_get_dns_entries_by_ipv6_address(self):
+ self._test_get_dns_entries_by_address(test_ipv6_address)
+
+ def test_get_dns_entries_by_invalid_ipv4_or_ipv6(self):
+ # If it's not a valid ipv4 neither ipv6, the method 'show'
+ # will try to get dns entries by name instead. We use this
+ # to test if address is being correctly validated.
+ def fake_get_dns_entries_by_name(self, context, address, domain):
+ raise webob.exc.HTTPUnprocessableEntity()
+
+ self.stubs.Set(network.api.API, "get_dns_entries_by_name",
+ fake_get_dns_entries_by_name)
+
+ invalid_addr = '333.333.333.333'
+
+ qparams = {'ip': invalid_addr}
+ params = "?%s" % urllib.urlencode(qparams) if qparams else ""
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/123/os-floating-ip-dns/%s/entries/%s'
+ % (_quote_domain(domain), params))
+
+ self.assertRaises(webob.exc.HTTPUnprocessableEntity,
+ self.entry_controller.show,
+ req, _quote_domain(domain), invalid_addr)
+
def test_get_dns_entries_by_name(self):
req = fakes.HTTPRequest.blank(
'/v2/123/os-floating-ip-dns/%s/entries/%s' %
@@ -156,20 +194,34 @@ class FloatingIpDNSTest(test.TestCase):
entry = self.entry_controller.show(req, _quote_domain(domain), name)
self.assertEqual(entry['dns_entry']['ip'],
- testaddress)
+ test_ipv4_address)
self.assertEqual(entry['dns_entry']['domain'],
domain)
+ def test_dns_entries_not_found(self):
+ def fake_get_dns_entries_by_name(self, context, address, domain):
+ raise webob.exc.HTTPNotFound()
+
+ self.stubs.Set(network.api.API, "get_dns_entries_by_name",
+ fake_get_dns_entries_by_name)
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/123/os-floating-ip-dns/%s/entries/%s' %
+ (_quote_domain(domain), 'nonexistent'))
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.entry_controller.show,
+ req, _quote_domain(domain), 'nonexistent')
+
def test_create_entry(self):
body = {'dns_entry':
- {'ip': testaddress,
+ {'ip': test_ipv4_address,
'dns_type': 'A'}}
req = fakes.HTTPRequest.blank(
'/v2/123/os-floating-ip-dns/%s/entries/%s' %
(_quote_domain(domain), name))
entry = self.entry_controller.update(req, _quote_domain(domain),
name, body)
- self.assertEqual(entry['dns_entry']['ip'], testaddress)
+ self.assertEqual(entry['dns_entry']['ip'], test_ipv4_address)
def test_create_domain(self):
req = fakes.HTTPRequest.blank('/v2/123/os-floating-ip-dns/%s' %
@@ -264,13 +316,13 @@ class FloatingIpDNSTest(test.TestCase):
def test_modify(self):
body = {'dns_entry':
- {'ip': testaddress2,
+ {'ip': test_ipv4_address2,
'dns_type': 'A'}}
req = fakes.HTTPRequest.blank(
'/v2/123/os-floating-ip-dns/%s/entries/%s' % (domain, name))
entry = self.entry_controller.update(req, domain, name, body)
- self.assertEqual(entry['dns_entry']['ip'], testaddress2)
+ self.assertEqual(entry['dns_entry']['ip'], test_ipv4_address2)
class FloatingIpDNSSerializerTest(test.TestCase):
@@ -305,11 +357,11 @@ class FloatingIpDNSSerializerTest(test.TestCase):
serializer = floating_ip_dns.FloatingIPDNSsTemplate()
text = serializer.serialize(dict(
dns_entries=[
- dict(ip=testaddress,
+ dict(ip=test_ipv4_address,
type='A',
domain=domain,
name=name),
- dict(ip=testaddress2,
+ dict(ip=test_ipv4_address2,
type='C',
domain=domain,
name=name2)]))
@@ -319,11 +371,11 @@ class FloatingIpDNSSerializerTest(test.TestCase):
self.assertEqual(2, len(tree))
self.assertEqual('dns_entry', tree[0].tag)
self.assertEqual('dns_entry', tree[1].tag)
- self.assertEqual(testaddress, tree[0].get('ip'))
+ self.assertEqual(test_ipv4_address, tree[0].get('ip'))
self.assertEqual('A', tree[0].get('type'))
self.assertEqual(domain, tree[0].get('domain'))
self.assertEqual(name, tree[0].get('name'))
- self.assertEqual(testaddress2, tree[1].get('ip'))
+ self.assertEqual(test_ipv4_address2, tree[1].get('ip'))
self.assertEqual('C', tree[1].get('type'))
self.assertEqual(domain, tree[1].get('domain'))
self.assertEqual(name2, tree[1].get('name'))
@@ -332,7 +384,7 @@ class FloatingIpDNSSerializerTest(test.TestCase):
serializer = floating_ip_dns.FloatingIPDNSTemplate()
text = serializer.serialize(dict(
dns_entry=dict(
- ip=testaddress,
+ ip=test_ipv4_address,
type='A',
domain=domain,
name=name)))
@@ -340,6 +392,6 @@ class FloatingIpDNSSerializerTest(test.TestCase):
tree = etree.fromstring(text)
self.assertEqual('dns_entry', tree.tag)
- self.assertEqual(testaddress, tree.get('ip'))
+ self.assertEqual(test_ipv4_address, tree.get('ip'))
self.assertEqual(domain, tree.get('domain'))
self.assertEqual(name, tree.get('name'))
diff --git a/nova/tests/api/openstack/compute/contrib/test_floating_ips.py b/nova/tests/api/openstack/compute/contrib/test_floating_ips.py
index efc9b36cc..864ab7a9f 100644
--- a/nova/tests/api/openstack/compute/contrib/test_floating_ips.py
+++ b/nova/tests/api/openstack/compute/contrib/test_floating_ips.py
@@ -27,7 +27,6 @@ from nova import db
from nova import exception
from nova import network
from nova.openstack.common import jsonutils
-from nova.openstack.common import rpc
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_network
@@ -36,12 +35,6 @@ from nova.tests import fake_network
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
-def network_api_get_fixed_ip(self, context, id):
- if id is None:
- return None
- return {'address': '10.0.0.1', 'id': id, 'instance_uuid': 1}
-
-
def network_api_get_floating_ip(self, context, id):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
'fixed_ip_id': None}
@@ -56,11 +49,12 @@ def network_api_get_floating_ips_by_project(self, context):
return [{'id': 1,
'address': '10.10.10.10',
'pool': 'nova',
- 'fixed_ip_id': 20},
+ 'fixed_ip': {'address': '10.0.0.1',
+ 'instance': {'uuid': FAKE_UUID}}},
{'id': 2,
'pool': 'nova', 'interface': 'eth0',
'address': '10.10.10.11',
- 'fixed_ip_id': None}]
+ 'fixed_ip': None}]
def compute_api_get(self, context, instance_id):
@@ -131,8 +125,6 @@ class FloatingIpTest(test.TestCase):
def setUp(self):
super(FloatingIpTest, self).setUp()
- self.stubs.Set(network.api.API, "get_fixed_ip",
- network_api_get_fixed_ip)
self.stubs.Set(compute.api.API, "get",
compute_api_get)
self.stubs.Set(network.api.API, "get_floating_ip",
@@ -173,8 +165,9 @@ class FloatingIpTest(test.TestCase):
floating_ip_address = self.floating_ip
floating_ip = db.floating_ip_get_by_address(self.context,
floating_ip_address)
- floating_ip['fixed_ip'] = None
- floating_ip['instance'] = None
+ # NOTE(vish): network_get uses the id not the address
+ floating_ip = db.floating_ip_get(self.context, floating_ip['id'])
+ self.controller._normalize_ip(floating_ip)
view = floating_ips._translate_floating_ip_view(floating_ip)
self.assertTrue('floating_ip' in view)
self.assertTrue(view['floating_ip']['id'])
@@ -185,6 +178,7 @@ class FloatingIpTest(test.TestCase):
def test_translate_floating_ip_view_dict(self):
floating_ip = {'id': 0, 'address': '10.0.0.10', 'pool': 'nova',
'fixed_ip': None}
+ self.controller._normalize_ip(floating_ip)
view = floating_ips._translate_floating_ip_view(floating_ip)
self.assertTrue('floating_ip' in view)
@@ -245,19 +239,17 @@ class FloatingIpTest(test.TestCase):
def test_show_associated_floating_ip(self):
def get_floating_ip(self, context, id):
return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
- 'fixed_ip_id': 11}
-
- def get_fixed_ip(self, context, id):
- return {'address': '10.0.0.1', 'instance_uuid': 1}
+ 'fixed_ip': {'address': '10.0.0.1',
+ 'instance': {'uuid': FAKE_UUID}}}
self.stubs.Set(network.api.API, "get_floating_ip", get_floating_ip)
- self.stubs.Set(network.api.API, "get_fixed_ip", get_fixed_ip)
req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips/1')
res_dict = self.controller.show(req, 1)
self.assertEqual(res_dict['floating_ip']['id'], 1)
self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10')
+ self.assertEqual(res_dict['floating_ip']['fixed_ip'], '10.0.0.1')
self.assertEqual(res_dict['floating_ip']['instance_id'], FAKE_UUID)
def test_recreation_of_floating_ip(self):
@@ -282,12 +274,11 @@ class FloatingIpTest(test.TestCase):
self.assertIn(self.floating_ip, ip_list)
self.assertNotIn(self.floating_ip_2, ip_list)
-# test floating ip allocate/release(deallocate)
def test_floating_ip_allocate_no_free_ips(self):
- def fake_call(*args, **kwargs):
+ def fake_allocate(*args, **kwargs):
raise exception.NoMoreFloatingIps()
- self.stubs.Set(rpc, "call", fake_call)
+ self.stubs.Set(network.api.API, "allocate_floating_ip", fake_allocate)
req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips')
self.assertRaises(exception.NoMoreFloatingIps,
@@ -323,9 +314,12 @@ class FloatingIpTest(test.TestCase):
req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips/1')
self.controller.delete(req, 1)
-# test floating ip add/remove -> associate/disassociate
-
def test_floating_ip_associate(self):
+ def fake_associate_floating_ip(*args, **kwargs):
+ pass
+
+ self.stubs.Set(network.api.API, "associate_floating_ip",
+ fake_associate_floating_ip)
body = dict(addFloatingIp=dict(address=self.floating_ip))
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
diff --git a/nova/tests/api/openstack/compute/contrib/test_hypervisors.py b/nova/tests/api/openstack/compute/contrib/test_hypervisors.py
index 4e4d214cc..f93c64487 100644
--- a/nova/tests/api/openstack/compute/contrib/test_hypervisors.py
+++ b/nova/tests/api/openstack/compute/contrib/test_hypervisors.py
@@ -267,7 +267,7 @@ class HypervisorsTest(test.TestCase):
def fake_get_host_uptime(context, hyp):
raise exc.HTTPNotImplemented()
- self.stubs.Set(self.controller.api, 'get_host_uptime',
+ self.stubs.Set(self.controller.host_api, 'get_host_uptime',
fake_get_host_uptime)
req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/1')
@@ -278,7 +278,7 @@ class HypervisorsTest(test.TestCase):
def fake_get_host_uptime(context, hyp):
return "fake uptime"
- self.stubs.Set(self.controller.api, 'get_host_uptime',
+ self.stubs.Set(self.controller.host_api, 'get_host_uptime',
fake_get_host_uptime)
req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/1')
diff --git a/nova/tests/api/openstack/compute/contrib/test_instance_usage_audit_log.py b/nova/tests/api/openstack/compute/contrib/test_instance_usage_audit_log.py
index 4ada22a17..920fd86d5 100644
--- a/nova/tests/api/openstack/compute/contrib/test_instance_usage_audit_log.py
+++ b/nova/tests/api/openstack/compute/contrib/test_instance_usage_audit_log.py
@@ -80,11 +80,8 @@ TEST_LOGS3 = [
]
-def fake_service_get_all(context):
- return TEST_COMPUTE_SERVICES
-
-
-def fake_task_log_get_all(context, task_name, begin, end):
+def fake_task_log_get_all(context, task_name, begin, end,
+ host=None, state=None):
assert task_name == "instance_usage_audit"
if begin == begin1 and end == end1:
@@ -114,13 +111,18 @@ class InstanceUsageAuditLogTest(test.TestCase):
self.context = context.get_admin_context()
timeutils.set_time_override(datetime.datetime(2012, 7, 5, 10, 0, 0))
self.controller = ial.InstanceUsageAuditLogController()
+ self.host_api = self.controller.host_api
+
+ def fake_service_get_all(context, disabled):
+ self.assertTrue(disabled is None)
+ return TEST_COMPUTE_SERVICES
self.stubs.Set(utils, 'last_completed_audit_period',
fake_last_completed_audit_period)
self.stubs.Set(db, 'service_get_all',
- fake_service_get_all)
+ fake_service_get_all)
self.stubs.Set(db, 'task_log_get_all',
- fake_task_log_get_all)
+ fake_task_log_get_all)
def tearDown(self):
super(InstanceUsageAuditLogTest, self).tearDown()
diff --git a/nova/tests/api/openstack/compute/contrib/test_quota_classes.py b/nova/tests/api/openstack/compute/contrib/test_quota_classes.py
index a72f5bf0f..0c1378a67 100644
--- a/nova/tests/api/openstack/compute/contrib/test_quota_classes.py
+++ b/nova/tests/api/openstack/compute/contrib/test_quota_classes.py
@@ -138,7 +138,6 @@ class QuotaTemplateXMLSerializerTest(test.TestCase):
cores=90))
text = self.serializer.serialize(exemplar)
- print text
tree = etree.fromstring(text)
self.assertEqual('quota_class_set', tree.tag)
diff --git a/nova/tests/api/openstack/compute/contrib/test_quotas.py b/nova/tests/api/openstack/compute/contrib/test_quotas.py
index dab8c136e..8d518b815 100644
--- a/nova/tests/api/openstack/compute/contrib/test_quotas.py
+++ b/nova/tests/api/openstack/compute/contrib/test_quotas.py
@@ -166,7 +166,6 @@ class QuotaXMLSerializerTest(test.TestCase):
cores=90))
text = self.serializer.serialize(exemplar)
- print text
tree = etree.fromstring(text)
self.assertEqual('quota_set', tree.tag)
diff --git a/nova/tests/api/openstack/compute/contrib/test_security_groups.py b/nova/tests/api/openstack/compute/contrib/test_security_groups.py
index 58f1862c3..75da8efa5 100644
--- a/nova/tests/api/openstack/compute/contrib/test_security_groups.py
+++ b/nova/tests/api/openstack/compute/contrib/test_security_groups.py
@@ -1212,7 +1212,6 @@ class TestSecurityGroupXMLSerializer(test.TestCase):
rule = dict(security_group_rule=raw_rule)
text = self.rule_serializer.serialize(rule)
- print text
tree = etree.fromstring(text)
self.assertEqual('security_group_rule', self._tag(tree))
@@ -1244,7 +1243,6 @@ class TestSecurityGroupXMLSerializer(test.TestCase):
sg_group = dict(security_group=raw_group)
text = self.default_serializer.serialize(sg_group)
- print text
tree = etree.fromstring(text)
self._verify_security_group(raw_group, tree)
@@ -1297,7 +1295,6 @@ class TestSecurityGroupXMLSerializer(test.TestCase):
sg_groups = dict(security_groups=groups)
text = self.index_serializer.serialize(sg_groups)
- print text
tree = etree.fromstring(text)
self.assertEqual('security_groups', self._tag(tree))
diff --git a/nova/tests/api/openstack/compute/contrib/test_server_diagnostics.py b/nova/tests/api/openstack/compute/contrib/test_server_diagnostics.py
index ea4565e14..783275ea2 100644
--- a/nova/tests/api/openstack/compute/contrib/test_server_diagnostics.py
+++ b/nova/tests/api/openstack/compute/contrib/test_server_diagnostics.py
@@ -74,7 +74,6 @@ class TestServerDiagnosticsXMLSerializer(test.TestCase):
exemplar = dict(diag1='foo', diag2='bar')
text = serializer.serialize(exemplar)
- print text
tree = etree.fromstring(text)
self.assertEqual('diagnostics', self._tag(tree))
diff --git a/nova/tests/api/openstack/compute/contrib/test_server_password.py b/nova/tests/api/openstack/compute/contrib/test_server_password.py
index 600c4eda4..87da90efe 100644
--- a/nova/tests/api/openstack/compute/contrib/test_server_password.py
+++ b/nova/tests/api/openstack/compute/contrib/test_server_password.py
@@ -40,11 +40,12 @@ class ServerPasswordTest(test.TestCase):
def fake_extract_password(instance):
return self.password
- def fake_set_password(context, instance_uuid, password):
+ def fake_convert_password(context, password):
self.password = password
+ return {}
self.stubs.Set(password, 'extract_password', fake_extract_password)
- self.stubs.Set(password, 'set_password', fake_set_password)
+ self.stubs.Set(password, 'convert_password', fake_convert_password)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
diff --git a/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py b/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py
index 554379e74..eb708a574 100644
--- a/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py
+++ b/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py
@@ -17,6 +17,7 @@ import webob
from nova.api.openstack.compute.contrib import server_start_stop
from nova.compute import api as compute_api
+from nova import exception
from nova import test
from nova.tests.api.openstack import fakes
@@ -25,6 +26,10 @@ def fake_compute_api_get(self, context, instance_id):
return {'id': 1, 'uuid': instance_id}
+def fake_start_stop_not_ready(self, context, instance):
+ raise exception.InstanceNotReady(instance_id=instance["uuid"])
+
+
class ServerStartStopTest(test.TestCase):
def setUp(self):
@@ -41,6 +46,14 @@ class ServerStartStopTest(test.TestCase):
body = dict(start="")
self.controller._start_server(req, 'test_inst', body)
+ def test_start_not_ready(self):
+ self.stubs.Set(compute_api.API, 'get', fake_compute_api_get)
+ self.stubs.Set(compute_api.API, 'start', fake_start_stop_not_ready)
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+ body = dict(start="")
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._start_server, req, 'test_inst', body)
+
def test_stop(self):
self.stubs.Set(compute_api.API, 'get', fake_compute_api_get)
self.mox.StubOutWithMock(compute_api.API, 'stop')
@@ -51,6 +64,14 @@ class ServerStartStopTest(test.TestCase):
body = dict(stop="")
self.controller._stop_server(req, 'test_inst', body)
+ def test_stop_not_ready(self):
+ self.stubs.Set(compute_api.API, 'get', fake_compute_api_get)
+ self.stubs.Set(compute_api.API, 'stop', fake_start_stop_not_ready)
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+ body = dict(start="")
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._stop_server, req, 'test_inst', body)
+
def test_start_with_bogus_id(self):
req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
body = dict(start="")
diff --git a/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py b/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py
index b49a1feb4..440c97fbd 100644
--- a/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py
+++ b/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py
@@ -65,7 +65,8 @@ def get_fake_db_instance(start, end, instance_id, tenant_id):
'terminated_at': end}
-def fake_instance_get_active_by_window(self, context, begin, end, project_id):
+def fake_instance_get_active_by_window_joined(self, context, begin, end,
+ project_id):
return [get_fake_db_instance(START,
STOP,
x,
@@ -79,7 +80,7 @@ class SimpleTenantUsageTest(test.TestCase):
self.stubs.Set(api.API, "get_instance_type",
fake_instance_type_get)
self.stubs.Set(api.API, "get_active_by_window",
- fake_instance_get_active_by_window)
+ fake_instance_get_active_by_window_joined)
self.admin_context = context.RequestContext('fakeadmin_0',
'faketenant_0',
is_admin=True)
@@ -293,7 +294,6 @@ class SimpleTenantUsageSerializerTest(test.TestCase):
tenant_usage = dict(tenant_usage=raw_usage)
text = serializer.serialize(tenant_usage)
- print text
tree = etree.fromstring(text)
self._verify_tenant_usage(raw_usage, tree)
@@ -378,7 +378,6 @@ class SimpleTenantUsageSerializerTest(test.TestCase):
tenant_usages = dict(tenant_usages=raw_usages)
text = serializer.serialize(tenant_usages)
- print text
tree = etree.fromstring(text)
self.assertEqual('tenant_usages', tree.tag)
diff --git a/nova/tests/api/openstack/compute/contrib/test_snapshots.py b/nova/tests/api/openstack/compute/contrib/test_snapshots.py
index a223178fb..fa0c521fe 100644
--- a/nova/tests/api/openstack/compute/contrib/test_snapshots.py
+++ b/nova/tests/api/openstack/compute/contrib/test_snapshots.py
@@ -271,7 +271,6 @@ class SnapshotSerializerTest(test.TestCase):
)
text = serializer.serialize(dict(snapshot=raw_snapshot))
- print text
tree = etree.fromstring(text)
self._verify_snapshot(raw_snapshot, tree)
@@ -298,7 +297,6 @@ class SnapshotSerializerTest(test.TestCase):
)]
text = serializer.serialize(dict(snapshots=raw_snapshots))
- print text
tree = etree.fromstring(text)
self.assertEqual('snapshots', tree.tag)
diff --git a/nova/tests/api/openstack/compute/contrib/test_virtual_interfaces.py b/nova/tests/api/openstack/compute/contrib/test_virtual_interfaces.py
index 7c61cd51b..cf1c1593f 100644
--- a/nova/tests/api/openstack/compute/contrib/test_virtual_interfaces.py
+++ b/nova/tests/api/openstack/compute/contrib/test_virtual_interfaces.py
@@ -91,7 +91,6 @@ class ServerVirtualInterfaceSerializerTest(test.TestCase):
vifs = dict(virtual_interfaces=raw_vifs)
text = self.serializer.serialize(vifs)
- print text
tree = etree.fromstring(text)
self.assertEqual('virtual_interfaces', self._tag(tree))
diff --git a/nova/tests/api/openstack/compute/contrib/test_volumes.py b/nova/tests/api/openstack/compute/contrib/test_volumes.py
index 3119f55e8..1a8a570e8 100644
--- a/nova/tests/api/openstack/compute/contrib/test_volumes.py
+++ b/nova/tests/api/openstack/compute/contrib/test_volumes.py
@@ -348,7 +348,6 @@ class VolumeSerializerTest(test.TestCase):
device='/foo')
text = serializer.serialize(dict(volumeAttachment=raw_attach))
- print text
tree = etree.fromstring(text)
self.assertEqual('volumeAttachment', tree.tag)
@@ -368,7 +367,6 @@ class VolumeSerializerTest(test.TestCase):
device='/foo2')]
text = serializer.serialize(dict(volumeAttachments=raw_attaches))
- print text
tree = etree.fromstring(text)
self.assertEqual('volumeAttachments', tree.tag)
@@ -401,7 +399,6 @@ class VolumeSerializerTest(test.TestCase):
)
text = serializer.serialize(dict(volume=raw_volume))
- print text
tree = etree.fromstring(text)
self._verify_volume(raw_volume, tree)
@@ -450,7 +447,6 @@ class VolumeSerializerTest(test.TestCase):
)]
text = serializer.serialize(dict(volumes=raw_volumes))
- print text
tree = etree.fromstring(text)
self.assertEqual('volumes', tree.tag)
diff --git a/nova/tests/api/openstack/compute/test_extensions.py b/nova/tests/api/openstack/compute/test_extensions.py
index 485968209..a52b0e0fc 100644
--- a/nova/tests/api/openstack/compute/test_extensions.py
+++ b/nova/tests/api/openstack/compute/test_extensions.py
@@ -166,6 +166,8 @@ class ExtensionControllerTest(ExtensionTestCase):
"Createserverext",
"DeferredDelete",
"DiskConfig",
+ "ExtendedAvailabilityZone",
+ "Evacuate",
"ExtendedStatus",
"ExtendedServerAttributes",
"FixedIPs",
diff --git a/nova/tests/api/openstack/compute/test_limits.py b/nova/tests/api/openstack/compute/test_limits.py
index 375355a70..e3fff380d 100644
--- a/nova/tests/api/openstack/compute/test_limits.py
+++ b/nova/tests/api/openstack/compute/test_limits.py
@@ -874,7 +874,6 @@ class LimitsXMLSerializationTest(test.TestCase):
"absolute": {}}}
output = serializer.serialize(fixture)
- print output
has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
self.assertTrue(has_dec)
@@ -905,7 +904,6 @@ class LimitsXMLSerializationTest(test.TestCase):
"maxPersonalitySize": 10240}}}
output = serializer.serialize(fixture)
- print output
root = etree.XML(output)
xmlutil.validate_schema(root, 'limits')
@@ -940,7 +938,6 @@ class LimitsXMLSerializationTest(test.TestCase):
"absolute": {}}}
output = serializer.serialize(fixture)
- print output
root = etree.XML(output)
xmlutil.validate_schema(root, 'limits')
diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py
index 5456c23af..4bfb1c1e3 100644
--- a/nova/tests/api/openstack/compute/test_servers.py
+++ b/nova/tests/api/openstack/compute/test_servers.py
@@ -4508,7 +4508,6 @@ class ServerXMLSerializationTest(test.TestCase):
}
output = serializer.serialize(fixture)
- print output
has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
self.assertTrue(has_dec)
@@ -4586,7 +4585,6 @@ class ServerXMLSerializationTest(test.TestCase):
}
output = serializer.serialize(fixture)
- print output
root = etree.XML(output)
xmlutil.validate_schema(root, 'server')
@@ -4717,7 +4715,6 @@ class ServerXMLSerializationTest(test.TestCase):
}
output = serializer.serialize(fixture)
- print output
root = etree.XML(output)
xmlutil.validate_schema(root, 'server')
@@ -4814,7 +4811,6 @@ class ServerXMLSerializationTest(test.TestCase):
]}
output = serializer.serialize(fixture)
- print output
root = etree.XML(output)
xmlutil.validate_schema(root, 'servers_index')
server_elems = root.findall('{0}server'.format(NS))
@@ -4878,7 +4874,6 @@ class ServerXMLSerializationTest(test.TestCase):
]}
output = serializer.serialize(fixture)
- print output
root = etree.XML(output)
xmlutil.validate_schema(root, 'servers_index')
server_elems = root.findall('{0}server'.format(NS))
@@ -5165,7 +5160,6 @@ class ServerXMLSerializationTest(test.TestCase):
}
output = serializer.serialize(fixture)
- print output
root = etree.XML(output)
xmlutil.validate_schema(root, 'server')
diff --git a/nova/tests/api/openstack/compute/test_versions.py b/nova/tests/api/openstack/compute/test_versions.py
index 28b109215..bd2e9fa7b 100644
--- a/nova/tests/api/openstack/compute/test_versions.py
+++ b/nova/tests/api/openstack/compute/test_versions.py
@@ -228,7 +228,6 @@ class VersionsTest(test.TestCase):
self.assertEqual(res.content_type, "application/xml")
root = etree.XML(res.body)
- print res.body
xmlutil.validate_schema(root, 'versions')
self.assertTrue(root.xpath('/ns:versions', namespaces=NS))
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index 03fc87ac5..3ef98b902 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -372,7 +372,7 @@ def create_info_cache(nw_cache):
def get_fake_uuid(token=0):
- if not token in FAKE_UUIDS:
+ if token not in FAKE_UUIDS:
FAKE_UUIDS[token] = str(uuid.uuid4())
return FAKE_UUIDS[token]
@@ -399,12 +399,12 @@ def fake_instance_get_all_by_filters(num_servers=5, **kwargs):
server = stub_instance(id=i + 1, uuid=uuid,
**kwargs)
servers_list.append(server)
- if not marker is None and uuid == marker:
+ if marker is not None and uuid == marker:
found_marker = True
servers_list = []
- if not marker is None and not found_marker:
+ if marker is not None and not found_marker:
raise exc.MarkerNotFound(marker=marker)
- if not limit is None:
+ if limit is not None:
servers_list = servers_list[:limit]
return servers_list
return _return_servers
diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py
index 7e49e4ab8..68a5f0bf4 100644
--- a/nova/tests/api/openstack/test_common.py
+++ b/nova/tests/api/openstack/test_common.py
@@ -377,7 +377,6 @@ class MetadataXMLSerializationTest(test.TestCase):
}
output = serializer.serialize(fixture)
- print output
has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
self.assertTrue(has_dec)
@@ -390,7 +389,6 @@ class MetadataXMLSerializationTest(test.TestCase):
},
}
output = serializer.serialize(fixture)
- print output
root = etree.XML(output)
xmlutil.validate_schema(root, 'metadata')
metadata_dict = fixture['metadata']
@@ -409,7 +407,6 @@ class MetadataXMLSerializationTest(test.TestCase):
},
}
output = serializer.serialize(fixture)
- print output
root = etree.XML(output)
xmlutil.validate_schema(root, 'metadata')
metadata_dict = fixture['metadata']
@@ -428,7 +425,6 @@ class MetadataXMLSerializationTest(test.TestCase):
},
}
output = serializer.serialize(fixture)
- print output
root = etree.XML(output)
xmlutil.validate_schema(root, 'metadata')
metadata_dict = fixture['metadata']
@@ -447,7 +443,6 @@ class MetadataXMLSerializationTest(test.TestCase):
},
}
output = serializer.serialize(fixture)
- print output
root = etree.XML(output)
meta_dict = fixture['meta']
(meta_key, meta_value) = meta_dict.items()[0]
@@ -463,7 +458,6 @@ class MetadataXMLSerializationTest(test.TestCase):
},
}
output = serializer.serialize(fixture)
- print output
root = etree.XML(output)
xmlutil.validate_schema(root, 'metadata')
metadata_dict = fixture['metadata']
@@ -482,7 +476,6 @@ class MetadataXMLSerializationTest(test.TestCase):
},
}
output = serializer.serialize(fixture)
- print output
root = etree.XML(output)
meta_dict = fixture['meta']
(meta_key, meta_value) = meta_dict.items()[0]
@@ -499,7 +492,6 @@ class MetadataXMLSerializationTest(test.TestCase):
},
}
output = serializer.serialize(fixture)
- print output
root = etree.XML(output)
xmlutil.validate_schema(root, 'metadata')
metadata_dict = fixture['metadata']
diff --git a/nova/tests/baremetal/db/test_bm_interface.py b/nova/tests/baremetal/db/test_bm_interface.py
index 9f051ac9b..32beb1ce0 100644
--- a/nova/tests/baremetal/db/test_bm_interface.py
+++ b/nova/tests/baremetal/db/test_bm_interface.py
@@ -18,6 +18,7 @@ Bare-metal DB testcase for BareMetalInterface
"""
from nova import exception
+from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.tests.baremetal.db import base
from nova.virt.baremetal import db
@@ -27,7 +28,7 @@ class BareMetalInterfaceTestCase(base.BMDBTestCase):
def test_unique_address(self):
pif1_id = db.bm_interface_create(self.context, 1, '11:11:11:11:11:11',
'0x1', 1)
- self.assertRaises(exception.DBError,
+ self.assertRaises(db_session.DBError,
db.bm_interface_create,
self.context, 2, '11:11:11:11:11:11', '0x2', 2)
# succeed after delete pif1
diff --git a/nova/tests/baremetal/db/test_bm_pxe_ip.py b/nova/tests/baremetal/db/test_bm_pxe_ip.py
index 9a93b46ad..9820f3af0 100644
--- a/nova/tests/baremetal/db/test_bm_pxe_ip.py
+++ b/nova/tests/baremetal/db/test_bm_pxe_ip.py
@@ -18,6 +18,7 @@ Bare-metal DB testcase for BareMetalPxeIp
"""
from nova import exception
+from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.tests.baremetal.db import base
from nova.tests.baremetal.db import utils
from nova.virt.baremetal import db
@@ -50,14 +51,14 @@ class BareMetalPxeIpTestCase(base.BMDBTestCase):
# address duplicates
i = utils.new_bm_pxe_ip(address='10.1.1.1',
server_address='10.1.1.201')
- self.assertRaises(exception.DBError,
+ self.assertRaises(db_session.DBError,
db.bm_pxe_ip_create_direct,
self.context, i)
# server_address duplicates
i = utils.new_bm_pxe_ip(address='10.1.1.3',
server_address='10.1.1.101')
- self.assertRaises(exception.DBError,
+ self.assertRaises(db_session.DBError,
db.bm_pxe_ip_create_direct,
self.context, i)
diff --git a/nova/tests/baremetal/test_nova_baremetal_manage.py b/nova/tests/baremetal/test_nova_baremetal_manage.py
index 4d152a028..c4fdaac6b 100644
--- a/nova/tests/baremetal/test_nova_baremetal_manage.py
+++ b/nova/tests/baremetal/test_nova_baremetal_manage.py
@@ -20,10 +20,6 @@ import imp
import os
import sys
-from nova import context
-from nova import test
-from nova.virt.baremetal import db as bmdb
-
from nova.tests.baremetal.db import base as bm_db_base
TOPDIR = os.path.normpath(os.path.join(
diff --git a/nova/tests/baremetal/test_pxe.py b/nova/tests/baremetal/test_pxe.py
index dafa9bab7..9703feb40 100644
--- a/nova/tests/baremetal/test_pxe.py
+++ b/nova/tests/baremetal/test_pxe.py
@@ -21,12 +21,11 @@
import os
-import mox
from testtools import matchers
from nova import exception
from nova.openstack.common import cfg
-from nova import test
+from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.tests.baremetal.db import base as bm_db_base
from nova.tests.baremetal.db import utils as bm_db_utils
from nova.tests.image import fake as fake_image
@@ -70,7 +69,7 @@ class BareMetalPXETestCase(bm_db_base.BMDBTestCase):
self.node_info = bm_db_utils.new_bm_node(
id=123,
service_host='test_host',
- cpus=2,
+ cpus=4,
memory_mb=2048,
prov_mac_address='11:11:11:11:11:11',
)
@@ -222,19 +221,20 @@ class PXEClassMethodsTestCase(BareMetalPXETestCase):
pxe.get_deploy_ari_id(self.instance), 'bbbb')
def test_get_partition_sizes(self):
- # m1.tiny: 10GB root, 0GB swap
- self.instance['instance_type_id'] = 1
+ # default "kinda.big" instance
sizes = pxe.get_partition_sizes(self.instance)
- self.assertEqual(sizes[0], 10240)
- self.assertEqual(sizes[1], 1)
+ self.assertEqual(sizes[0], 40960)
+ self.assertEqual(sizes[1], 1024)
+
+ def test_swap_not_zero(self):
+ # override swap to 0
+ instance_type = utils.get_test_instance_type(self.context)
+ instance_type['swap'] = 0
+ self.instance = utils.get_test_instance(self.context, instance_type)
- # kinda.big: 40GB root, 1GB swap
- ref = utils.get_test_instance_type()
- self.instance['instance_type_id'] = ref['id']
- self.instance['root_gb'] = ref['root_gb']
sizes = pxe.get_partition_sizes(self.instance)
self.assertEqual(sizes[0], 40960)
- self.assertEqual(sizes[1], 1024)
+ self.assertEqual(sizes[1], 1)
def test_get_tftp_image_info(self):
# Raises an exception when options are neither specified
@@ -523,7 +523,7 @@ class PXEPublicMethodsTestCase(BareMetalPXETestCase):
AndRaise(exception.NovaException)
bm_utils.unlink_without_raise(pxe_path)
self.driver._collect_mac_addresses(self.context, self.node).\
- AndRaise(exception.DBError)
+ AndRaise(db_session.DBError)
bm_utils.rmtree_without_raise(
os.path.join(CONF.baremetal.tftp_root, 'fake-uuid'))
self.mox.ReplayAll()
diff --git a/nova/tests/baremetal/test_utils.py b/nova/tests/baremetal/test_utils.py
index 2615a26cb..df5112deb 100644
--- a/nova/tests/baremetal/test_utils.py
+++ b/nova/tests/baremetal/test_utils.py
@@ -18,12 +18,9 @@
"""Tests for baremetal utils."""
-import mox
-
import errno
import os
-from nova import exception
from nova import test
from nova.virt.baremetal import utils
diff --git a/nova/tests/baremetal/test_virtual_power_driver.py b/nova/tests/baremetal/test_virtual_power_driver.py
new file mode 100644
index 000000000..a63d8add1
--- /dev/null
+++ b/nova/tests/baremetal/test_virtual_power_driver.py
@@ -0,0 +1,358 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# coding=utf-8
+
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for baremetal virtual power driver."""
+
+import mox
+
+from nova import exception
+from nova.openstack.common import cfg
+from nova import utils as nutils
+
+from nova.tests.baremetal.db import base as bm_db_base
+from nova.tests.baremetal.db import utils as bm_db_utils
+from nova.tests.image import fake as fake_image
+from nova.tests import utils
+
+from nova.virt.baremetal import db
+from nova.virt.baremetal import virtual_power_driver
+import nova.virt.powervm.common as connection
+
+CONF = cfg.CONF
+
+COMMON_FLAGS = dict(
+ firewall_driver='nova.virt.baremetal.fake.FakeFirewallDriver',
+ host='test_host',
+)
+
+BAREMETAL_FLAGS = dict(
+ driver='nova.virt.baremetal.pxe.PXE',
+ instance_type_extra_specs=['cpu_arch:test', 'test_spec:test_value'],
+ power_manager=
+ 'nova.virt.baremetal.virtual_power_driver.VirtualPowerManager',
+ vif_driver='nova.virt.baremetal.fake.FakeVifDriver',
+ volume_driver='nova.virt.baremetal.fake.FakeVolumeDriver',
+ virtual_power_ssh_host=None,
+ virtual_power_type='vbox',
+ virtual_power_host_user=None,
+ virtual_power_host_pass=None,
+ group='baremetal',
+)
+
+
+class BareMetalVPDTestCase(bm_db_base.BMDBTestCase):
+
+ def setUp(self):
+ super(BareMetalVPDTestCase, self).setUp()
+ self.flags(**COMMON_FLAGS)
+ self.flags(**BAREMETAL_FLAGS)
+
+ fake_image.stub_out_image_service(self.stubs)
+ self.context = utils.get_test_admin_context()
+ self.test_block_device_info = None,
+ self.instance = utils.get_test_instance()
+ self.test_network_info = utils.get_test_network_info(),
+ self.node_info = bm_db_utils.new_bm_node(
+ id=123,
+ service_host='test_host',
+ cpus=2,
+ memory_mb=2048,
+ prov_mac_address='11:11:11:11:11:11',
+ )
+ self.nic_info = [
+ {'address': '22:22:22:22:22:22', 'datapath_id': '0x1',
+ 'port_no': 1},
+ {'address': '33:33:33:33:33:33', 'datapath_id': '0x2',
+ 'port_no': 2},
+ ]
+ self.addCleanup(fake_image.FakeImageService_reset)
+
+ def _create_node(self):
+ self.node = db.bm_node_create(self.context, self.node_info)
+ for nic in self.nic_info:
+ db.bm_interface_create(
+ self.context,
+ self.node['id'],
+ nic['address'],
+ nic['datapath_id'],
+ nic['port_no'],
+ )
+ self.instance['node'] = self.node['id']
+
+ def _create_pm(self):
+ self.pm = virtual_power_driver.VirtualPowerManager(
+ node=self.node,
+ instance=self.instance)
+ return self.pm
+
+
+class VPDClassMethodsTestCase(BareMetalVPDTestCase):
+
+ def test_get_conn_missing_options(self):
+ self.flags(virtual_power_ssh_host=None, group="baremetal")
+ self.flags(virtual_power_host_user=None, group="baremetal")
+ self.flags(virtual_power_host_pass=None, group="baremetal")
+ self._create_node()
+ self._create_pm()
+ self._conn = None
+ self.assertRaises(exception.NovaException,
+ self.pm._get_conn)
+ self._conn = None
+ self.flags(virtual_power_ssh_host='127.0.0.1', group="baremetal")
+ self.assertRaises(exception.NovaException,
+ self.pm._get_conn)
+ self._conn = None
+ self.flags(virtual_power_host_user='user', group="baremetal")
+ self.assertRaises(exception.NovaException,
+ self.pm._get_conn)
+
+ def test_get_conn_success(self):
+ self.flags(virtual_power_ssh_host='127.0.0.1', group="baremetal")
+ self.flags(virtual_power_host_user='user', group="baremetal")
+ self.flags(virtual_power_host_pass='password', group="baremetal")
+ self._create_node()
+ self._create_pm()
+ self._conn = self.pm._get_conn()
+ self.mox.StubOutWithMock(connection, 'ssh_connect')
+ connection.ssh_connect(mox.IsA(self._conn)).AndReturn(True)
+ self.mox.ReplayAll()
+ self.pm._set_connection()
+ self.assertEqual(self.pm.connection_data.host, '127.0.0.1')
+ self.assertEqual(self.pm.connection_data.username, 'user')
+ self.assertEqual(self.pm.connection_data.password, 'password')
+ self.mox.VerifyAll()
+
+ def test_get_full_node_list(self):
+ self.flags(virtual_power_ssh_host='127.0.0.1', group="baremetal")
+ self.flags(virtual_power_host_user='user', group="baremetal")
+ self.flags(virtual_power_host_pass='password', group="baremetal")
+ self._create_node()
+ self._create_pm()
+
+ self.mox.StubOutWithMock(self.pm, '_run_command')
+ cmd = self.pm._vp_cmd.list_cmd
+ self.pm._run_command(cmd).AndReturn("testNode")
+
+ self.mox.ReplayAll()
+ name = self.pm._get_full_node_list()
+ self.assertEqual(name, 'testNode')
+ self.mox.VerifyAll()
+
+ def test_check_for_node(self):
+ self.flags(virtual_power_ssh_host='127.0.0.1', group="baremetal")
+ self.flags(virtual_power_host_user='user', group="baremetal")
+ self.flags(virtual_power_host_pass='password', group="baremetal")
+ self._create_node()
+ self._create_pm()
+
+ self.mox.StubOutWithMock(self.pm, '_get_full_node_list')
+ self.pm._get_full_node_list().\
+ AndReturn(["testNode"])
+
+ self.mox.StubOutWithMock(self.pm, '_run_command')
+ cmd = self.pm._vp_cmd.get_node_macs.replace('{_NodeName_}', 'testNode')
+ self.pm._run_command(cmd).\
+ AndReturn(["111111111111", "ffeeddccbbaa"])
+
+ self.mox.ReplayAll()
+ name = self.pm._check_for_node()
+ self.assertEqual(name, 'testNode')
+ self.mox.VerifyAll()
+
+ def test_check_for_node_not_found(self):
+ self.flags(virtual_power_ssh_host='127.0.0.1', group="baremetal")
+ self.flags(virtual_power_host_user='user', group="baremetal")
+ self.flags(virtual_power_host_pass='password', group="baremetal")
+ self._create_node()
+ self._create_pm()
+
+ self.mox.StubOutWithMock(self.pm, '_get_full_node_list')
+ self.pm._get_full_node_list().AndReturn(["testNode"])
+
+ self.mox.StubOutWithMock(self.pm, '_run_command')
+ cmd = self.pm._vp_cmd.get_node_macs.replace('{_NodeName_}', 'testNode')
+ self.pm._run_command(cmd).AndReturn(["aabbccddeeff", "ffeeddccbbaa"])
+
+ self.mox.ReplayAll()
+ name = self.pm._check_for_node()
+ self.assertEqual(name, '')
+ self.mox.VerifyAll()
+
+ def test_activate_node(self):
+ self.flags(virtual_power_ssh_host='127.0.0.1', group="baremetal")
+ self.flags(virtual_power_host_user='user', group="baremetal")
+ self.flags(virtual_power_host_pass='password', group="baremetal")
+ self._create_node()
+ self._create_pm()
+
+ self.mox.StubOutWithMock(self.pm, '_check_for_node')
+ self.mox.StubOutWithMock(self.pm, '_run_command')
+ self.mox.StubOutWithMock(self.pm, 'is_power_on')
+ self.pm._check_for_node().AndReturn("testNode")
+ self.pm._run_command(self.pm._vp_cmd.start_cmd).AndReturn("Started")
+ self.pm.is_power_on().AndReturn(True)
+ self.mox.ReplayAll()
+ state = self.pm.activate_node()
+ self.assertEqual(state, 'active')
+ self.mox.VerifyAll()
+
+ def test_activate_node_fail(self):
+ self.flags(virtual_power_ssh_host='127.0.0.1', group="baremetal")
+ self.flags(virtual_power_host_user='user', group="baremetal")
+ self.flags(virtual_power_host_pass='password', group="baremetal")
+ self._create_node()
+ self._create_pm()
+
+ self.mox.StubOutWithMock(self.pm, '_check_for_node')
+ self.mox.StubOutWithMock(self.pm, '_run_command')
+ self.mox.StubOutWithMock(self.pm, 'is_power_on')
+ self.pm._check_for_node().AndReturn("testNode")
+ self.pm._run_command(self.pm._vp_cmd.start_cmd).AndReturn("Started")
+ self.pm.is_power_on().AndReturn(False)
+ self.mox.ReplayAll()
+ state = self.pm.activate_node()
+ self.assertEqual(state, 'error')
+ self.mox.VerifyAll()
+
+ def test_deactivate_node(self):
+ self.flags(virtual_power_ssh_host='127.0.0.1', group="baremetal")
+ self.flags(virtual_power_host_user='user', group="baremetal")
+ self.flags(virtual_power_host_pass='password', group="baremetal")
+ self._create_node()
+ self._create_pm()
+
+ self.mox.StubOutWithMock(self.pm, '_check_for_node')
+ self.mox.StubOutWithMock(self.pm, '_run_command')
+ self.mox.StubOutWithMock(self.pm, 'is_power_on')
+ self.pm._check_for_node().AndReturn("testNode")
+ self.pm.is_power_on().AndReturn(True)
+ self.pm._run_command(self.pm._vp_cmd.stop_cmd).AndReturn("Stopped")
+ self.pm.is_power_on().AndReturn(False)
+ self.mox.ReplayAll()
+ state = self.pm.deactivate_node()
+ self.assertEqual(state, 'deleted')
+ self.mox.VerifyAll()
+
+ def test_deactivate_node_fail(self):
+ self.flags(virtual_power_ssh_host='127.0.0.1', group="baremetal")
+ self.flags(virtual_power_host_user='user', group="baremetal")
+ self.flags(virtual_power_host_pass='password', group="baremetal")
+ self._create_node()
+ self._create_pm()
+
+ self.mox.StubOutWithMock(self.pm, '_check_for_node')
+ self.mox.StubOutWithMock(self.pm, '_run_command')
+ self.mox.StubOutWithMock(self.pm, 'is_power_on')
+ self.pm._check_for_node().AndReturn("testNode")
+ self.pm.is_power_on().AndReturn(True)
+ self.pm._run_command(self.pm._vp_cmd.stop_cmd).AndReturn("Stopped")
+ self.pm.is_power_on().AndReturn(True)
+ self.mox.ReplayAll()
+ state = self.pm.deactivate_node()
+ self.assertEqual(state, 'error')
+ self.mox.VerifyAll()
+
+ def test_reboot_node(self):
+ self.flags(virtual_power_ssh_host='127.0.0.1', group="baremetal")
+ self.flags(virtual_power_host_user='user', group="baremetal")
+ self.flags(virtual_power_host_pass='password', group="baremetal")
+ self._create_node()
+ self._create_pm()
+
+ self.mox.StubOutWithMock(self.pm, '_check_for_node')
+ self.mox.StubOutWithMock(self.pm, '_run_command')
+ self.mox.StubOutWithMock(self.pm, 'is_power_on')
+ self.pm._check_for_node().AndReturn(["testNode"])
+ self.pm._run_command(self.pm._vp_cmd.reboot_cmd).AndReturn("Restarted")
+ self.pm.is_power_on().AndReturn(True)
+ self.mox.ReplayAll()
+ state = self.pm.reboot_node()
+ self.assertEqual(state, 'active')
+ self.mox.VerifyAll()
+
+ def test_reboot_node_fail(self):
+ self.flags(virtual_power_ssh_host='127.0.0.1', group="baremetal")
+ self.flags(virtual_power_host_user='user', group="baremetal")
+ self.flags(virtual_power_host_pass='password', group="baremetal")
+ self._create_node()
+ self._create_pm()
+
+ self.mox.StubOutWithMock(self.pm, '_check_for_node')
+ self.mox.StubOutWithMock(self.pm, '_run_command')
+ self.mox.StubOutWithMock(self.pm, 'is_power_on')
+ self.pm._check_for_node().AndReturn(["testNode"])
+ self.pm._run_command(self.pm._vp_cmd.reboot_cmd).AndReturn("Restarted")
+ self.pm.is_power_on().AndReturn(False)
+ self.mox.ReplayAll()
+ state = self.pm.reboot_node()
+ self.assertEqual(state, 'error')
+ self.mox.VerifyAll()
+
+ def test_is_power_on(self):
+ self.flags(virtual_power_ssh_host='127.0.0.1', group="baremetal")
+ self.flags(virtual_power_host_user='user', group="baremetal")
+ self.flags(virtual_power_host_pass='password', group="baremetal")
+ self._create_node()
+ self._create_pm()
+
+ self.mox.StubOutWithMock(self.pm, '_check_for_node')
+ self.mox.StubOutWithMock(self.pm, '_run_command')
+ self.pm._check_for_node().AndReturn(["testNode"])
+ self.pm._run_command(self.pm._vp_cmd.list_running_cmd).\
+ AndReturn(["testNode"])
+ self.pm._matched_name = 'testNode'
+ self.mox.ReplayAll()
+ state = self.pm.is_power_on()
+ self.assertEqual(state, True)
+ self.mox.VerifyAll()
+
+ def test_is_power_on_fail(self):
+ self.flags(virtual_power_ssh_host='127.0.0.1', group="baremetal")
+ self.flags(virtual_power_host_user='user', group="baremetal")
+ self.flags(virtual_power_host_pass='password', group="baremetal")
+ self._create_node()
+ self._create_pm()
+
+ self.mox.StubOutWithMock(self.pm, '_check_for_node')
+ self.mox.StubOutWithMock(self.pm, '_run_command')
+ self.pm._check_for_node().AndReturn(["NotFoundNode"])
+ self.pm._run_command(self.pm._vp_cmd.list_running_cmd).\
+ AndReturn(["NotFoundNode"])
+ self.pm._matched_name = 'testNode'
+ self.mox.ReplayAll()
+ state = self.pm.is_power_on()
+ self.assertEqual(state, False)
+ self.mox.VerifyAll()
+
+ def test_run_command(self):
+ self.flags(virtual_power_ssh_host='127.0.0.1', group="baremetal")
+ self.flags(virtual_power_host_user='user', group="baremetal")
+ self.flags(virtual_power_host_pass='password', group="baremetal")
+ self._create_node()
+ self._create_pm()
+
+ self.mox.StubOutWithMock(self.pm, '_set_connection')
+ self.mox.StubOutWithMock(nutils, 'ssh_execute')
+ self.pm._set_connection().AndReturn(True)
+ nutils.ssh_execute(None, '/usr/bin/VBoxManage test return',
+ check_exit_code=True).AndReturn(("test\nreturn", ""))
+ self.pm._matched_name = 'testNode'
+ self.mox.ReplayAll()
+ result = self.pm._run_command("test return")
+ self.assertEqual(result, ['test', 'return'])
+ self.mox.VerifyAll()
diff --git a/nova/tests/cells/test_cells_manager.py b/nova/tests/cells/test_cells_manager.py
index ef165f4ed..1ebbc407d 100644
--- a/nova/tests/cells/test_cells_manager.py
+++ b/nova/tests/cells/test_cells_manager.py
@@ -15,15 +15,31 @@
"""
Tests For CellsManager
"""
+import copy
import datetime
from nova.cells import messaging
from nova.cells import utils as cells_utils
from nova import context
+from nova.openstack.common import cfg
+from nova.openstack.common import rpc
from nova.openstack.common import timeutils
from nova import test
from nova.tests.cells import fakes
+CONF = cfg.CONF
+CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
+
+
+FAKE_COMPUTE_NODES = [dict(id=1), dict(id=2)]
+FAKE_SERVICES = [dict(id=1, host='host1',
+ compute_node=[FAKE_COMPUTE_NODES[0]]),
+ dict(id=2, host='host2',
+ compute_node=[FAKE_COMPUTE_NODES[1]]),
+ dict(id=3, host='host3', compute_node=[])]
+FAKE_TASK_LOGS = [dict(id=1, host='host1'),
+ dict(id=2, host='host2')]
+
class CellsManagerClassTestCase(test.TestCase):
"""Test case for CellsManager class."""
@@ -38,13 +54,13 @@ class CellsManagerClassTestCase(test.TestCase):
self.driver = self.cells_manager.driver
self.ctxt = 'fake_context'
- def _get_fake_responses(self):
- responses = []
- expected_responses = []
- for x in xrange(1, 4):
- responses.append(messaging.Response('cell%s' % x, x, False))
- expected_responses.append(('cell%s' % x, x))
- return expected_responses, responses
+ def _get_fake_response(self, raw_response=None, exc=False):
+ if exc:
+ return messaging.Response('fake', test.TestingException(),
+ True)
+ if raw_response is None:
+ raw_response = 'fake-response'
+ return messaging.Response('fake', raw_response, False)
def test_get_cell_info_for_neighbors(self):
self.mox.StubOutWithMock(self.cells_manager.state_manager,
@@ -109,17 +125,13 @@ class CellsManagerClassTestCase(test.TestCase):
cell_name = 'fake-cell-name'
method_info = 'fake-method-info'
- fake_response = messaging.Response('fake', 'fake', False)
-
self.mox.StubOutWithMock(self.msg_runner,
'run_compute_api_method')
- self.mox.StubOutWithMock(fake_response,
- 'value_or_raise')
+ fake_response = self._get_fake_response()
self.msg_runner.run_compute_api_method(self.ctxt,
cell_name,
method_info,
True).AndReturn(fake_response)
- fake_response.value_or_raise().AndReturn('fake-response')
self.mox.ReplayAll()
response = self.cells_manager.run_compute_api_method(
self.ctxt, cell_name=cell_name, method_info=method_info,
@@ -237,3 +249,182 @@ class CellsManagerClassTestCase(test.TestCase):
project_id='fake-project',
updated_since='fake-time',
deleted='fake-deleted')
+
+ def test_service_get_all(self):
+ responses = []
+ expected_response = []
+ # 3 cells... so 3 responses. Each response is a list of services.
+ # Manager should turn these into a single list of responses.
+ for i in xrange(3):
+ cell_name = 'path!to!cell%i' % i
+ services = []
+ for service in FAKE_SERVICES:
+ services.append(copy.deepcopy(service))
+ expected_service = copy.deepcopy(service)
+ cells_utils.add_cell_to_service(expected_service, cell_name)
+ expected_response.append(expected_service)
+ response = messaging.Response(cell_name, services, False)
+ responses.append(response)
+
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'service_get_all')
+ self.msg_runner.service_get_all(self.ctxt,
+ 'fake-filters').AndReturn(responses)
+ self.mox.ReplayAll()
+ response = self.cells_manager.service_get_all(self.ctxt,
+ filters='fake-filters')
+ self.assertEqual(expected_response, response)
+
+ def test_service_get_by_compute_host(self):
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'service_get_by_compute_host')
+ fake_cell = 'fake-cell'
+ fake_response = messaging.Response(fake_cell, FAKE_SERVICES[0],
+ False)
+ expected_response = copy.deepcopy(FAKE_SERVICES[0])
+ cells_utils.add_cell_to_service(expected_response, fake_cell)
+
+ cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
+ self.msg_runner.service_get_by_compute_host(self.ctxt,
+ fake_cell, 'fake-host').AndReturn(fake_response)
+ self.mox.ReplayAll()
+ response = self.cells_manager.service_get_by_compute_host(self.ctxt,
+ host_name=cell_and_host)
+ self.assertEqual(expected_response, response)
+
+ def test_proxy_rpc_to_manager(self):
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'proxy_rpc_to_manager')
+ fake_response = self._get_fake_response()
+ cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
+ topic = rpc.queue_get_for(self.ctxt, CONF.compute_topic,
+ cell_and_host)
+ self.msg_runner.proxy_rpc_to_manager(self.ctxt, 'fake-cell',
+ 'fake-host', topic, 'fake-rpc-msg',
+ True, -1).AndReturn(fake_response)
+ self.mox.ReplayAll()
+ response = self.cells_manager.proxy_rpc_to_manager(self.ctxt,
+ topic=topic, rpc_message='fake-rpc-msg', call=True,
+ timeout=-1)
+ self.assertEqual('fake-response', response)
+
+ def _build_task_log_responses(self, num):
+ responses = []
+ expected_response = []
+ # 3 cells... so 3 responses. Each response is a list of task log
+ # entries. Manager should turn these into a single list of
+ # task log entries.
+ for i in xrange(num):
+ cell_name = 'path!to!cell%i' % i
+ task_logs = []
+ for task_log in FAKE_TASK_LOGS:
+ task_logs.append(copy.deepcopy(task_log))
+ expected_task_log = copy.deepcopy(task_log)
+ cells_utils.add_cell_to_task_log(expected_task_log,
+ cell_name)
+ expected_response.append(expected_task_log)
+ response = messaging.Response(cell_name, task_logs, False)
+ responses.append(response)
+ return expected_response, responses
+
+ def test_task_log_get_all(self):
+ expected_response, responses = self._build_task_log_responses(3)
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'task_log_get_all')
+ self.msg_runner.task_log_get_all(self.ctxt, None,
+ 'fake-name', 'fake-begin',
+ 'fake-end', host=None, state=None).AndReturn(responses)
+ self.mox.ReplayAll()
+ response = self.cells_manager.task_log_get_all(self.ctxt,
+ task_name='fake-name',
+ period_beginning='fake-begin', period_ending='fake-end')
+ self.assertEqual(expected_response, response)
+
+ def test_task_log_get_all_with_filters(self):
+ expected_response, responses = self._build_task_log_responses(1)
+ cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'task_log_get_all')
+ self.msg_runner.task_log_get_all(self.ctxt, 'fake-cell',
+ 'fake-name', 'fake-begin', 'fake-end', host='fake-host',
+ state='fake-state').AndReturn(responses)
+ self.mox.ReplayAll()
+ response = self.cells_manager.task_log_get_all(self.ctxt,
+ task_name='fake-name',
+ period_beginning='fake-begin', period_ending='fake-end',
+ host=cell_and_host, state='fake-state')
+ self.assertEqual(expected_response, response)
+
+ def test_task_log_get_all_with_cell_but_no_host_filters(self):
+ expected_response, responses = self._build_task_log_responses(1)
+ # Host filter only has cell name.
+ cell_and_host = 'fake-cell'
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'task_log_get_all')
+ self.msg_runner.task_log_get_all(self.ctxt, 'fake-cell',
+ 'fake-name', 'fake-begin', 'fake-end', host=None,
+ state='fake-state').AndReturn(responses)
+ self.mox.ReplayAll()
+ response = self.cells_manager.task_log_get_all(self.ctxt,
+ task_name='fake-name',
+ period_beginning='fake-begin', period_ending='fake-end',
+ host=cell_and_host, state='fake-state')
+ self.assertEqual(expected_response, response)
+
+ def test_compute_node_get_all(self):
+ responses = []
+ expected_response = []
+ # 3 cells... so 3 responses. Each response is a list of computes.
+ # Manager should turn these into a single list of responses.
+ for i in xrange(3):
+ cell_name = 'path!to!cell%i' % i
+ compute_nodes = []
+ for compute_node in FAKE_COMPUTE_NODES:
+ compute_nodes.append(copy.deepcopy(compute_node))
+ expected_compute_node = copy.deepcopy(compute_node)
+ cells_utils.add_cell_to_compute_node(expected_compute_node,
+ cell_name)
+ expected_response.append(expected_compute_node)
+ response = messaging.Response(cell_name, compute_nodes, False)
+ responses.append(response)
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'compute_node_get_all')
+ self.msg_runner.compute_node_get_all(self.ctxt,
+ hypervisor_match='fake-match').AndReturn(responses)
+ self.mox.ReplayAll()
+ response = self.cells_manager.compute_node_get_all(self.ctxt,
+ hypervisor_match='fake-match')
+ self.assertEqual(expected_response, response)
+
+ def test_compute_node_stats(self):
+ raw_resp1 = {'key1': 1, 'key2': 2}
+ raw_resp2 = {'key2': 1, 'key3': 2}
+ raw_resp3 = {'key3': 1, 'key4': 2}
+ responses = [messaging.Response('cell1', raw_resp1, False),
+ messaging.Response('cell2', raw_resp2, False),
+ messaging.Response('cell2', raw_resp3, False)]
+ expected_resp = {'key1': 1, 'key2': 3, 'key3': 3, 'key4': 2}
+
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'compute_node_stats')
+ self.msg_runner.compute_node_stats(self.ctxt).AndReturn(responses)
+ self.mox.ReplayAll()
+ response = self.cells_manager.compute_node_stats(self.ctxt)
+ self.assertEqual(expected_resp, response)
+
+ def test_compute_node_get(self):
+ fake_cell = 'fake-cell'
+ fake_response = messaging.Response(fake_cell,
+ FAKE_COMPUTE_NODES[0],
+ False)
+ expected_response = copy.deepcopy(FAKE_COMPUTE_NODES[0])
+ cells_utils.add_cell_to_compute_node(expected_response, fake_cell)
+ cell_and_id = cells_utils.cell_with_item(fake_cell, 'fake-id')
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'compute_node_get')
+ self.msg_runner.compute_node_get(self.ctxt,
+ 'fake-cell', 'fake-id').AndReturn(fake_response)
+ self.mox.ReplayAll()
+ response = self.cells_manager.compute_node_get(self.ctxt,
+ compute_id=cell_and_id)
+ self.assertEqual(expected_response, response)
diff --git a/nova/tests/cells/test_cells_messaging.py b/nova/tests/cells/test_cells_messaging.py
index da45721ed..811ad17fd 100644
--- a/nova/tests/cells/test_cells_messaging.py
+++ b/nova/tests/cells/test_cells_messaging.py
@@ -14,13 +14,12 @@
"""
Tests For Cells Messaging module
"""
-import mox
-
from nova.cells import messaging
from nova.cells import utils as cells_utils
from nova import context
from nova import exception
from nova.openstack.common import cfg
+from nova.openstack.common import rpc
from nova.openstack.common import timeutils
from nova import test
from nova.tests.cells import fakes
@@ -604,7 +603,7 @@ class CellsTargetedMethodsTestCase(test.TestCase):
self.tgt_cell_name,
host_sched_kwargs)
- def test_call_compute_api_method(self):
+ def test_run_compute_api_method(self):
instance_uuid = 'fake_instance_uuid'
method_info = {'method': 'reboot',
@@ -614,8 +613,7 @@ class CellsTargetedMethodsTestCase(test.TestCase):
self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_get_by_uuid')
self.tgt_db_inst.instance_get_by_uuid(self.ctxt,
- instance_uuid).AndReturn(
- 'fake_instance')
+ instance_uuid).AndReturn('fake_instance')
self.tgt_compute_api.reboot(self.ctxt, 'fake_instance', 2, 3,
arg1='val1', arg2='val2').AndReturn('fake_result')
self.mox.ReplayAll()
@@ -628,7 +626,7 @@ class CellsTargetedMethodsTestCase(test.TestCase):
result = response.value_or_raise()
self.assertEqual('fake_result', result)
- def test_call_compute_api_method_unknown_instance(self):
+ def test_run_compute_api_method_unknown_instance(self):
# Unknown instance should send a broadcast up that instance
# is gone.
instance_uuid = 'fake_instance_uuid'
@@ -727,6 +725,105 @@ class CellsTargetedMethodsTestCase(test.TestCase):
self.src_msg_runner.ask_children_for_capacities(self.ctxt)
+ def test_service_get_by_compute_host(self):
+ fake_host_name = 'fake-host-name'
+
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'service_get_by_compute_host')
+
+ self.tgt_db_inst.service_get_by_compute_host(self.ctxt,
+ fake_host_name).AndReturn('fake-service')
+ self.mox.ReplayAll()
+
+ response = self.src_msg_runner.service_get_by_compute_host(
+ self.ctxt,
+ self.tgt_cell_name,
+ fake_host_name)
+ result = response.value_or_raise()
+ self.assertEqual('fake-service', result)
+
+ def test_proxy_rpc_to_manager_call(self):
+ fake_topic = 'fake-topic'
+ fake_rpc_message = 'fake-rpc-message'
+ fake_host_name = 'fake-host-name'
+
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'service_get_by_compute_host')
+ self.mox.StubOutWithMock(rpc, 'call')
+
+ self.tgt_db_inst.service_get_by_compute_host(self.ctxt,
+ fake_host_name)
+ rpc.call(self.ctxt, fake_topic,
+ fake_rpc_message, timeout=5).AndReturn('fake_result')
+
+ self.mox.ReplayAll()
+
+ response = self.src_msg_runner.proxy_rpc_to_manager(
+ self.ctxt,
+ self.tgt_cell_name,
+ fake_host_name,
+ fake_topic,
+ fake_rpc_message, True, timeout=5)
+ result = response.value_or_raise()
+ self.assertEqual('fake_result', result)
+
+ def test_proxy_rpc_to_manager_cast(self):
+ fake_topic = 'fake-topic'
+ fake_rpc_message = 'fake-rpc-message'
+ fake_host_name = 'fake-host-name'
+
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'service_get_by_compute_host')
+ self.mox.StubOutWithMock(rpc, 'cast')
+
+ self.tgt_db_inst.service_get_by_compute_host(self.ctxt,
+ fake_host_name)
+ rpc.cast(self.ctxt, fake_topic, fake_rpc_message)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.proxy_rpc_to_manager(
+ self.ctxt,
+ self.tgt_cell_name,
+ fake_host_name,
+ fake_topic,
+ fake_rpc_message, False, timeout=None)
+
+ def test_task_log_get_all_targetted(self):
+ task_name = 'fake_task_name'
+ begin = 'fake_begin'
+ end = 'fake_end'
+ host = 'fake_host'
+ state = 'fake_state'
+
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'task_log_get_all')
+ self.tgt_db_inst.task_log_get_all(self.ctxt, task_name,
+ begin, end, host=host,
+ state=state).AndReturn(['fake_result'])
+
+ self.mox.ReplayAll()
+
+ response = self.src_msg_runner.task_log_get_all(self.ctxt,
+ self.tgt_cell_name, task_name, begin, end, host=host,
+ state=state)
+ self.assertTrue(isinstance(response, list))
+ self.assertEqual(1, len(response))
+ result = response[0].value_or_raise()
+ self.assertEqual(['fake_result'], result)
+
+ def test_compute_node_get(self):
+ compute_id = 'fake-id'
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'compute_node_get')
+ self.tgt_db_inst.compute_node_get(self.ctxt,
+ compute_id).AndReturn('fake_result')
+
+ self.mox.ReplayAll()
+
+ response = self.src_msg_runner.compute_node_get(self.ctxt,
+ self.tgt_cell_name, compute_id)
+ result = response.value_or_raise()
+ self.assertEqual('fake_result', result)
+
class CellsBroadcastMethodsTestCase(test.TestCase):
"""Test case for _BroadcastMessageMethods class. Most of these
@@ -756,6 +853,13 @@ class CellsBroadcastMethodsTestCase(test.TestCase):
self.src_db_inst = methods_cls.db
self.src_compute_api = methods_cls.compute_api
+ if not up:
+ # fudge things so we only have 1 child to broadcast to
+ state_manager = self.src_msg_runner.state_manager
+ for cell in state_manager.get_child_cells():
+ if cell.name != 'child-cell2':
+ del state_manager.child_cells[cell.name]
+
self.mid_msg_runner = fakes.get_message_runner(mid_cell)
methods_cls = self.mid_msg_runner.methods_by_type['broadcast']
self.mid_methods_cls = methods_cls
@@ -958,3 +1062,177 @@ class CellsBroadcastMethodsTestCase(test.TestCase):
self.src_msg_runner.sync_instances(self.ctxt,
project_id, updated_since_raw, deleted)
+
+ def test_service_get_all_with_disabled(self):
+ # Reset this, as this is a broadcast down.
+ self._setup_attrs(up=False)
+
+ ctxt = self.ctxt.elevated()
+
+ self.mox.StubOutWithMock(self.src_db_inst, 'service_get_all')
+ self.mox.StubOutWithMock(self.mid_db_inst, 'service_get_all')
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'service_get_all')
+
+ self.src_db_inst.service_get_all(ctxt,
+ disabled=None).AndReturn([1, 2])
+ self.mid_db_inst.service_get_all(ctxt,
+ disabled=None).AndReturn([3])
+ self.tgt_db_inst.service_get_all(ctxt,
+ disabled=None).AndReturn([4, 5])
+
+ self.mox.ReplayAll()
+
+ responses = self.src_msg_runner.service_get_all(ctxt,
+ filters={})
+ response_values = [(resp.cell_name, resp.value_or_raise())
+ for resp in responses]
+ expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
+ ('api-cell!child-cell2', [3]),
+ ('api-cell', [1, 2])]
+ self.assertEqual(expected, response_values)
+
+ def test_service_get_all_without_disabled(self):
+ # Reset this, as this is a broadcast down.
+ self._setup_attrs(up=False)
+ disabled = False
+ filters = {'disabled': disabled}
+
+ ctxt = self.ctxt.elevated()
+
+ self.mox.StubOutWithMock(self.src_db_inst, 'service_get_all')
+ self.mox.StubOutWithMock(self.mid_db_inst, 'service_get_all')
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'service_get_all')
+
+ self.src_db_inst.service_get_all(ctxt,
+ disabled=disabled).AndReturn([1, 2])
+ self.mid_db_inst.service_get_all(ctxt,
+ disabled=disabled).AndReturn([3])
+ self.tgt_db_inst.service_get_all(ctxt,
+ disabled=disabled).AndReturn([4, 5])
+
+ self.mox.ReplayAll()
+
+ responses = self.src_msg_runner.service_get_all(ctxt,
+ filters=filters)
+ response_values = [(resp.cell_name, resp.value_or_raise())
+ for resp in responses]
+ expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
+ ('api-cell!child-cell2', [3]),
+ ('api-cell', [1, 2])]
+ self.assertEqual(expected, response_values)
+
+ def test_task_log_get_all_broadcast(self):
+ # Reset this, as this is a broadcast down.
+ self._setup_attrs(up=False)
+ task_name = 'fake_task_name'
+ begin = 'fake_begin'
+ end = 'fake_end'
+ host = 'fake_host'
+ state = 'fake_state'
+
+ ctxt = self.ctxt.elevated()
+
+ self.mox.StubOutWithMock(self.src_db_inst, 'task_log_get_all')
+ self.mox.StubOutWithMock(self.mid_db_inst, 'task_log_get_all')
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'task_log_get_all')
+
+ self.src_db_inst.task_log_get_all(ctxt, task_name,
+ begin, end, host=host, state=state).AndReturn([1, 2])
+ self.mid_db_inst.task_log_get_all(ctxt, task_name,
+ begin, end, host=host, state=state).AndReturn([3])
+ self.tgt_db_inst.task_log_get_all(ctxt, task_name,
+ begin, end, host=host, state=state).AndReturn([4, 5])
+
+ self.mox.ReplayAll()
+
+ responses = self.src_msg_runner.task_log_get_all(ctxt, None,
+ task_name, begin, end, host=host, state=state)
+ response_values = [(resp.cell_name, resp.value_or_raise())
+ for resp in responses]
+ expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
+ ('api-cell!child-cell2', [3]),
+ ('api-cell', [1, 2])]
+ self.assertEqual(expected, response_values)
+
+ def test_compute_node_get_all(self):
+ # Reset this, as this is a broadcast down.
+ self._setup_attrs(up=False)
+
+ ctxt = self.ctxt.elevated()
+
+ self.mox.StubOutWithMock(self.src_db_inst, 'compute_node_get_all')
+ self.mox.StubOutWithMock(self.mid_db_inst, 'compute_node_get_all')
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'compute_node_get_all')
+
+ self.src_db_inst.compute_node_get_all(ctxt).AndReturn([1, 2])
+ self.mid_db_inst.compute_node_get_all(ctxt).AndReturn([3])
+ self.tgt_db_inst.compute_node_get_all(ctxt).AndReturn([4, 5])
+
+ self.mox.ReplayAll()
+
+ responses = self.src_msg_runner.compute_node_get_all(ctxt)
+ response_values = [(resp.cell_name, resp.value_or_raise())
+ for resp in responses]
+ expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
+ ('api-cell!child-cell2', [3]),
+ ('api-cell', [1, 2])]
+ self.assertEqual(expected, response_values)
+
+ def test_compute_node_get_all_with_hyp_match(self):
+ # Reset this, as this is a broadcast down.
+ self._setup_attrs(up=False)
+ hypervisor_match = 'meow'
+
+ ctxt = self.ctxt.elevated()
+
+ self.mox.StubOutWithMock(self.src_db_inst,
+ 'compute_node_search_by_hypervisor')
+ self.mox.StubOutWithMock(self.mid_db_inst,
+ 'compute_node_search_by_hypervisor')
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'compute_node_search_by_hypervisor')
+
+ self.src_db_inst.compute_node_search_by_hypervisor(ctxt,
+ hypervisor_match).AndReturn([1, 2])
+ self.mid_db_inst.compute_node_search_by_hypervisor(ctxt,
+ hypervisor_match).AndReturn([3])
+ self.tgt_db_inst.compute_node_search_by_hypervisor(ctxt,
+ hypervisor_match).AndReturn([4, 5])
+
+ self.mox.ReplayAll()
+
+ responses = self.src_msg_runner.compute_node_get_all(ctxt,
+ hypervisor_match=hypervisor_match)
+ response_values = [(resp.cell_name, resp.value_or_raise())
+ for resp in responses]
+ expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
+ ('api-cell!child-cell2', [3]),
+ ('api-cell', [1, 2])]
+ self.assertEqual(expected, response_values)
+
+ def test_compute_node_stats(self):
+ # Reset this, as this is a broadcast down.
+ self._setup_attrs(up=False)
+
+ ctxt = self.ctxt.elevated()
+
+ self.mox.StubOutWithMock(self.src_db_inst,
+ 'compute_node_statistics')
+ self.mox.StubOutWithMock(self.mid_db_inst,
+ 'compute_node_statistics')
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'compute_node_statistics')
+
+ self.src_db_inst.compute_node_statistics(ctxt).AndReturn([1, 2])
+ self.mid_db_inst.compute_node_statistics(ctxt).AndReturn([3])
+ self.tgt_db_inst.compute_node_statistics(ctxt).AndReturn([4, 5])
+
+ self.mox.ReplayAll()
+
+ responses = self.src_msg_runner.compute_node_stats(ctxt)
+ response_values = [(resp.cell_name, resp.value_or_raise())
+ for resp in responses]
+ expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
+ ('api-cell!child-cell2', [3]),
+ ('api-cell', [1, 2])]
+ self.assertEqual(expected, response_values)
diff --git a/nova/tests/cells/test_cells_rpcapi.py b/nova/tests/cells/test_cells_rpcapi.py
index 5e045aca9..d19ce5b2b 100644
--- a/nova/tests/cells/test_cells_rpcapi.py
+++ b/nova/tests/cells/test_cells_rpcapi.py
@@ -224,3 +224,83 @@ class CellsAPITestCase(test.TestCase):
'deleted': True}
self._check_result(call_info, 'sync_instances', expected_args,
version='1.1')
+
+ def test_service_get_all(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ fake_filters = {'key1': 'val1', 'key2': 'val2'}
+ result = self.cells_rpcapi.service_get_all(self.fake_context,
+ filters=fake_filters)
+
+ expected_args = {'filters': fake_filters}
+ self._check_result(call_info, 'service_get_all', expected_args,
+ version='1.2')
+ self.assertEqual(result, 'fake_response')
+
+ def test_service_get_by_compute_host(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ result = self.cells_rpcapi.service_get_by_compute_host(
+ self.fake_context, host_name='fake-host-name')
+ expected_args = {'host_name': 'fake-host-name'}
+ self._check_result(call_info, 'service_get_by_compute_host',
+ expected_args,
+ version='1.2')
+ self.assertEqual(result, 'fake_response')
+
+ def test_proxy_rpc_to_manager(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ result = self.cells_rpcapi.proxy_rpc_to_manager(
+ self.fake_context, rpc_message='fake-msg',
+ topic='fake-topic', call=True, timeout=-1)
+ expected_args = {'rpc_message': 'fake-msg',
+ 'topic': 'fake-topic',
+ 'call': True,
+ 'timeout': -1}
+ self._check_result(call_info, 'proxy_rpc_to_manager',
+ expected_args,
+ version='1.2')
+ self.assertEqual(result, 'fake_response')
+
+ def test_task_log_get_all(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ result = self.cells_rpcapi.task_log_get_all(self.fake_context,
+ task_name='fake_name',
+ period_beginning='fake_begin',
+ period_ending='fake_end',
+ host='fake_host',
+ state='fake_state')
+
+ expected_args = {'task_name': 'fake_name',
+ 'period_beginning': 'fake_begin',
+ 'period_ending': 'fake_end',
+ 'host': 'fake_host',
+ 'state': 'fake_state'}
+ self._check_result(call_info, 'task_log_get_all', expected_args,
+ version='1.3')
+ self.assertEqual(result, 'fake_response')
+
+ def test_compute_node_get_all(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ result = self.cells_rpcapi.compute_node_get_all(self.fake_context,
+ hypervisor_match='fake-match')
+
+ expected_args = {'hypervisor_match': 'fake-match'}
+ self._check_result(call_info, 'compute_node_get_all', expected_args,
+ version='1.4')
+ self.assertEqual(result, 'fake_response')
+
+ def test_compute_node_stats(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ result = self.cells_rpcapi.compute_node_stats(self.fake_context)
+ expected_args = {}
+ self._check_result(call_info, 'compute_node_stats',
+ expected_args, version='1.4')
+ self.assertEqual(result, 'fake_response')
+
+ def test_compute_node_get(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ result = self.cells_rpcapi.compute_node_get(self.fake_context,
+ 'fake_compute_id')
+ expected_args = {'compute_id': 'fake_compute_id'}
+ self._check_result(call_info, 'compute_node_get',
+ expected_args, version='1.4')
+ self.assertEqual(result, 'fake_response')
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index dc381d800..12bd3cf19 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -38,6 +38,7 @@ from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
+from nova.conductor import manager as conductor_manager
from nova import context
from nova import db
from nova import exception
@@ -59,6 +60,7 @@ from nova import quota
from nova import test
from nova.tests.compute import fake_resource_tracker
from nova.tests.db import fakes as db_fakes
+from nova.tests import fake_instance_actions
from nova.tests import fake_network
from nova.tests.image import fake as fake_image
from nova.tests import matchers
@@ -143,8 +145,24 @@ class BaseTestCase(test.TestCase):
fake_rpcapi = FakeSchedulerAPI()
self.stubs.Set(self.compute, 'scheduler_rpcapi', fake_rpcapi)
fake_network.set_stub_network_methods(self.stubs)
+ fake_instance_actions.stub_out_action_events(self.stubs)
+
+ def fake_get_nw_info(cls, ctxt, instance, *args, **kwargs):
+ self.assertTrue(ctxt.is_admin)
+ return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1,
+ spectacular=True)
+
+ self.stubs.Set(network_api.API, 'get_instance_nw_info',
+ fake_get_nw_info)
+ self.stubs.Set(network_api.API, 'allocate_for_instance',
+ fake_get_nw_info)
+ self.compute_api = compute.API()
+
+ # Just to make long lines short
+ self.rt = self.compute._get_resource_tracker(NODENAME)
def tearDown(self):
+ timeutils.clear_time_override()
ctxt = context.get_admin_context()
fake_image.FakeImageService_reset()
instances = db.instance_get_all(ctxt)
@@ -158,6 +176,13 @@ class BaseTestCase(test.TestCase):
if not params:
params = {}
+ def make_fake_sys_meta():
+ sys_meta = {}
+ inst_type = instance_types.get_instance_type_by_name(type_name)
+ for key in instance_types.system_metadata_instance_type_props:
+ sys_meta['instance_type_%s' % key] = inst_type[key]
+ return sys_meta
+
inst = {}
inst['vm_state'] = vm_states.ACTIVE
inst['image_ref'] = FAKE_IMAGE_REF
@@ -176,6 +201,7 @@ class BaseTestCase(test.TestCase):
inst['ephemeral_gb'] = 0
inst['architecture'] = 'x86_64'
inst['os_type'] = 'Linux'
+ inst['system_metadata'] = make_fake_sys_meta()
inst.update(params)
_create_service_entries(self.context.elevated(),
{'fake_zone': [inst['host']]})
@@ -212,25 +238,6 @@ class BaseTestCase(test.TestCase):
class ComputeTestCase(BaseTestCase):
- def setUp(self):
- def fake_get_nw_info(cls, ctxt, instance, *args, **kwargs):
- self.assertTrue(ctxt.is_admin)
- return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1,
- spectacular=True)
-
- super(ComputeTestCase, self).setUp()
- self.stubs.Set(network_api.API, 'get_instance_nw_info',
- fake_get_nw_info)
- self.stubs.Set(network_api.API, 'allocate_for_instance',
- fake_get_nw_info)
- self.compute_api = compute.API()
- # Just to make long lines short
- self.rt = self.compute._get_resource_tracker(NODENAME)
-
- def tearDown(self):
- super(ComputeTestCase, self).tearDown()
- timeutils.clear_time_override()
-
def test_wrap_instance_fault(self):
inst = {"uuid": "fake_uuid"}
@@ -291,6 +298,64 @@ class ComputeTestCase(BaseTestCase):
self.assertFalse(called['fault_added'])
+ def test_wrap_instance_event(self):
+ inst = {"uuid": "fake_uuid"}
+
+ called = {'started': False,
+ 'finished': False}
+
+ def did_it_update_start(self2, context, values):
+ called['started'] = True
+
+ def did_it_update_finish(self2, context, values):
+ called['finished'] = True
+
+ self.stubs.Set(conductor_manager.ConductorManager,
+ 'action_event_start', did_it_update_start)
+
+ self.stubs.Set(conductor_manager.ConductorManager,
+ 'action_event_finish', did_it_update_finish)
+
+ @compute_manager.wrap_instance_event
+ def fake_event(self, context, instance):
+ pass
+
+ fake_event(self.compute, self.context, instance=inst)
+
+ self.assertTrue(called['started'])
+ self.assertTrue(called['finished'])
+
+ def test_wrap_instance_event_log_exception(self):
+ inst = {"uuid": "fake_uuid"}
+
+ called = {'started': False,
+ 'finished': False,
+ 'message': ''}
+
+ def did_it_update_start(self2, context, values):
+ called['started'] = True
+
+ def did_it_update_finish(self2, context, values):
+ called['finished'] = True
+ called['message'] = values['message']
+
+ self.stubs.Set(conductor_manager.ConductorManager,
+ 'action_event_start', did_it_update_start)
+
+ self.stubs.Set(conductor_manager.ConductorManager,
+ 'action_event_finish', did_it_update_finish)
+
+ @compute_manager.wrap_instance_event
+ def fake_event(self2, context, instance):
+ raise exception.NovaException()
+
+ self.assertRaises(exception.NovaException, fake_event,
+ self.compute, self.context, instance=inst)
+
+ self.assertTrue(called['started'])
+ self.assertTrue(called['finished'])
+ self.assertEqual('An unknown exception occurred.', called['message'])
+
def test_create_instance_with_img_ref_associates_config_drive(self):
# Make sure create associates a config drive.
@@ -1061,8 +1126,9 @@ class ComputeTestCase(BaseTestCase):
# this is called with the wrong args, so we have to hack
# around it.
reboot_call_info = {}
- expected_call_info = {'args': (updated_instance1, expected_nw_info,
- reboot_type, fake_block_dev_info),
+ expected_call_info = {'args': (econtext, updated_instance1,
+ expected_nw_info, reboot_type,
+ fake_block_dev_info),
'kwargs': {}}
def fake_reboot(*args, **kwargs):
@@ -1151,7 +1217,8 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def _do_test_set_admin_password_driver_error(self, exc, expected_vm_state,
- expected_task_state):
+ expected_task_state,
+ expected_exception):
"""Ensure expected exception is raised if set_admin_password fails."""
def fake_sleep(_time):
@@ -1176,7 +1243,7 @@ class ComputeTestCase(BaseTestCase):
#error raised from the driver should not reveal internal information
#so a new error is raised
- self.assertRaises(exception.InstancePasswordSetFailed,
+ self.assertRaises(expected_exception,
self.compute.set_admin_password,
self.context,
instance=jsonutils.to_primitive(inst_ref))
@@ -1194,9 +1261,11 @@ class ComputeTestCase(BaseTestCase):
authorized.
"""
exc = exception.NotAuthorized(_('Internal error'))
+ expected_exception = exception.InstancePasswordSetFailed
self._do_test_set_admin_password_driver_error(exc,
vm_states.ERROR,
- None)
+ None,
+ expected_exception)
def test_set_admin_password_driver_not_implemented(self):
"""
@@ -1204,9 +1273,11 @@ class ComputeTestCase(BaseTestCase):
implemented by driver.
"""
exc = NotImplementedError()
+ expected_exception = NotImplementedError
self._do_test_set_admin_password_driver_error(exc,
vm_states.ACTIVE,
- None)
+ None,
+ expected_exception)
def test_inject_file(self):
# Ensure we can write a file to an instance.
@@ -1570,7 +1641,7 @@ class ComputeTestCase(BaseTestCase):
# Ensure failure when running an instance that already exists.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
- self.assertRaises(exception.Invalid,
+ self.assertRaises(exception.InstanceExists,
self.compute.run_instance,
self.context,
instance=instance)
@@ -1589,7 +1660,8 @@ class ComputeTestCase(BaseTestCase):
mox.IgnoreArg(),
mox.IgnoreArg(),
requested_networks=None,
- vpn=False, macs=macs).AndReturn(
+ vpn=False, macs=macs,
+ conductor_api=self.compute.conductor_api).AndReturn(
fake_network.fake_get_instance_nw_info(self.stubs, 1, 1,
spectacular=True))
self.mox.StubOutWithMock(self.compute.driver, "macs_for_instance")
@@ -1607,8 +1679,9 @@ class ComputeTestCase(BaseTestCase):
mox.IgnoreArg(),
mox.IgnoreArg(),
requested_networks=None,
- vpn=False,
- macs=None).AndRaise(rpc_common.RemoteError())
+ vpn=False, macs=None,
+ conductor_api=self.compute.conductor_api
+ ).AndRaise(rpc_common.RemoteError())
fake_network.unset_stub_network_methods(self.stubs)
@@ -1839,6 +1912,7 @@ class ComputeTestCase(BaseTestCase):
self.context.elevated(), instance['uuid'], 'pre-migrating')
db.instance_update(self.context, instance["uuid"],
{"task_state": task_states.RESIZE_MIGRATED})
+ instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.compute.finish_resize(self.context,
migration=jsonutils.to_primitive(migration_ref),
disk_info={}, image={}, instance=instance,
@@ -1868,6 +1942,7 @@ class ComputeTestCase(BaseTestCase):
db.instance_update(self.context, instance["uuid"],
{"task_state": task_states.RESIZE_MIGRATED})
+ instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertRaises(test.TestingException, self.compute.finish_resize,
self.context,
migration=jsonutils.to_primitive(migration_ref),
@@ -1971,6 +2046,8 @@ class ComputeTestCase(BaseTestCase):
timeutils.set_time_override(cur_time)
test_notifier.NOTIFICATIONS = []
+ new_instance = db.instance_get_by_uuid(self.context,
+ new_instance['uuid'])
self.compute.finish_resize(self.context,
migration=jsonutils.to_primitive(migration_ref),
disk_info={}, image={}, instance=new_instance)
@@ -2067,7 +2144,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=new_instance)
def test_prep_resize_instance_migration_error_on_none_host(self):
- """Ensure prep_resize raise a migration error if destination host is
+ """Ensure prep_resize raises a migration error if destination host is
not defined
"""
instance = jsonutils.to_primitive(self._create_fake_instance())
@@ -2156,7 +2233,8 @@ class ComputeTestCase(BaseTestCase):
def fake_finish_revert_migration_driver(*args, **kwargs):
# Confirm the instance uses the old type in finish_revert_resize
inst = args[0]
- self.assertEqual(inst['instance_type']['flavorid'], '1')
+ sys_meta = utils.metadata_to_dict(inst['system_metadata'])
+ self.assertEqual(sys_meta['instance_type_flavorid'], '1')
self.stubs.Set(self.compute.driver, 'finish_migration', fake)
self.stubs.Set(self.compute.driver, 'finish_revert_migration',
@@ -2190,6 +2268,8 @@ class ComputeTestCase(BaseTestCase):
self.context.elevated(),
inst_ref['uuid'], 'pre-migrating')
+ # NOTE(danms): make sure to refresh our inst_ref after prep_resize
+ inst_ref = db.instance_get_by_uuid(self.context, instance_uuid)
instance = jsonutils.to_primitive(inst_ref)
db.instance_update(self.context, instance_uuid,
{"task_state": task_states.RESIZE_PREP})
@@ -2223,6 +2303,7 @@ class ComputeTestCase(BaseTestCase):
self.stubs.Set(network_api.API, 'setup_networks_on_host',
fake_setup_networks_on_host)
+ rpcinst = db.instance_get_by_uuid(self.context, rpcinst['uuid'])
self.compute.finish_revert_resize(self.context,
migration=jsonutils.to_primitive(migration_ref),
instance=rpcinst, reservations=reservations)
@@ -2532,7 +2613,7 @@ class ComputeTestCase(BaseTestCase):
db.instance_destroy(c, inst_uuid)
def test_post_live_migration_no_shared_storage_working_correctly(self):
- """Confirm post_live_migration() works as expected correctly
+ """Confirm post_live_migration() works correctly as expected
for non shared storage migration.
"""
# Create stubs
@@ -2559,10 +2640,11 @@ class ComputeTestCase(BaseTestCase):
# creating mocks
self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance')
self.compute.driver.unfilter_instance(inst_ref, [])
- self.mox.StubOutWithMock(self.compute.network_api,
- 'migrate_instance_start')
+ self.mox.StubOutWithMock(self.compute.conductor_api,
+ 'network_migrate_instance_start')
migration = {'source_compute': srchost, 'dest_compute': dest, }
- self.compute.network_api.migrate_instance_start(c, inst_ref, migration)
+ self.compute.conductor_api.network_migrate_instance_start(c, inst_ref,
+ migration)
self.mox.StubOutWithMock(rpc, 'call')
rpc.call(c, rpc.queue_get_for(c, CONF.compute_topic, dest),
{"method": "post_live_migration_at_destination",
@@ -2603,11 +2685,12 @@ class ComputeTestCase(BaseTestCase):
# creating mocks
self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance')
self.compute.driver.unfilter_instance(inst_ref, [])
- self.mox.StubOutWithMock(self.compute.network_api,
- 'migrate_instance_start')
+ self.mox.StubOutWithMock(self.compute.conductor_api,
+ 'network_migrate_instance_start')
migration = {'source_compute': srchost,
'dest_compute': dest, }
- self.compute.network_api.migrate_instance_start(c, inst_ref, migration)
+ self.compute.conductor_api.network_migrate_instance_start(c, inst_ref,
+ migration)
self.mox.StubOutWithMock(rpc, 'call')
rpc.call(c, rpc.queue_get_for(c, CONF.compute_topic, dest),
{"method": "post_live_migration_at_destination",
@@ -2629,8 +2712,8 @@ class ComputeTestCase(BaseTestCase):
def test_post_live_migration_at_destination(self):
self.mox.StubOutWithMock(self.compute.network_api,
'setup_networks_on_host')
- self.mox.StubOutWithMock(self.compute.network_api,
- 'migrate_instance_finish')
+ self.mox.StubOutWithMock(self.compute.conductor_api,
+ 'network_migrate_instance_finish')
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.mox.StubOutWithMock(self.compute, '_instance_update')
@@ -2645,8 +2728,8 @@ class ComputeTestCase(BaseTestCase):
self.compute.host)
migration = {'source_compute': instance['host'],
'dest_compute': self.compute.host, }
- self.compute.network_api.migrate_instance_finish(admin_ctxt,
- instance, migration)
+ self.compute.conductor_api.network_migrate_instance_finish(admin_ctxt,
+ instance, migration)
fake_net_info = []
fake_block_dev_info = {'foo': 'bar'}
self.compute.driver.post_live_migration_at_destination(admin_ctxt,
@@ -2695,7 +2778,7 @@ class ComputeTestCase(BaseTestCase):
instances = db.instance_get_all(self.context)
LOG.info(_("After force-killing instances: %s"), instances)
self.assertEqual(len(instances), 1)
- self.assertEqual(task_states.POWERING_OFF, instances[0]['task_state'])
+ self.assertEqual(instances[0]['task_state'], None)
def test_add_instance_fault(self):
instance = self._create_fake_instance()
@@ -2722,8 +2805,11 @@ class ComputeTestCase(BaseTestCase):
self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
ctxt = context.get_admin_context()
- compute_utils.add_instance_fault_from_exc(ctxt, instance,
- NotImplementedError('test'), exc_info)
+ compute_utils.add_instance_fault_from_exc(ctxt,
+ self.compute.conductor_api,
+ instance,
+ NotImplementedError('test'),
+ exc_info)
def test_add_instance_fault_with_remote_error(self):
instance = self._create_fake_instance()
@@ -2751,8 +2837,8 @@ class ComputeTestCase(BaseTestCase):
self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
ctxt = context.get_admin_context()
- compute_utils.add_instance_fault_from_exc(ctxt, instance, exc,
- exc_info)
+ compute_utils.add_instance_fault_from_exc(ctxt,
+ self.compute.conductor_api, instance, exc, exc_info)
def test_add_instance_fault_user_error(self):
instance = self._create_fake_instance()
@@ -2779,8 +2865,8 @@ class ComputeTestCase(BaseTestCase):
self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
ctxt = context.get_admin_context()
- compute_utils.add_instance_fault_from_exc(ctxt, instance, user_exc,
- exc_info)
+ compute_utils.add_instance_fault_from_exc(ctxt,
+ self.compute.conductor_api, instance, user_exc, exc_info)
def test_add_instance_fault_no_exc_info(self):
instance = self._create_fake_instance()
@@ -2798,8 +2884,10 @@ class ComputeTestCase(BaseTestCase):
self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
ctxt = context.get_admin_context()
- compute_utils.add_instance_fault_from_exc(ctxt, instance,
- NotImplementedError('test'))
+ compute_utils.add_instance_fault_from_exc(ctxt,
+ self.compute.conductor_api,
+ instance,
+ NotImplementedError('test'))
def test_cleanup_running_deleted_instances(self):
admin_context = context.get_admin_context()
@@ -2872,16 +2960,12 @@ class ComputeTestCase(BaseTestCase):
self.mox.StubOutWithMock(self.compute.network_api,
'get_instance_nw_info')
- self.mox.StubOutWithMock(fake_nw_info, 'json')
self.mox.StubOutWithMock(self.compute.conductor_api,
'instance_info_cache_update')
self.compute.network_api.get_instance_nw_info(self.context,
- fake_instance, update_cache=False).AndReturn(fake_nw_info)
- fake_nw_info.json().AndReturn('fake-nw-info')
- expected_cache = {'network_info': 'fake-nw-info'}
- self.compute.conductor_api.instance_info_cache_update(self.context,
- fake_instance, expected_cache)
+ fake_instance, conductor_api=self.compute.conductor_api
+ ).AndReturn(fake_nw_info)
self.mox.ReplayAll()
@@ -3392,254 +3476,25 @@ class ComputeTestCase(BaseTestCase):
result = self.compute._get_instances_on_driver(fake_context)
self.assertEqual(driver_instances, result)
- def test_rebuild_on_host_updated_target(self):
- """Confirm evacuate scenario updates host."""
-
- # creating testdata
- c = self.context.elevated()
-
- inst_ref = self._create_fake_instance({'host': 'someotherhost'})
- db.instance_update(self.context, inst_ref['uuid'],
- {"task_state": task_states.REBUILDING})
- inst_id = inst_ref["id"]
- inst_uuid = inst_ref["uuid"]
- dest = self.compute.host
-
- def set_shared_storage(instance):
- return True
-
- self.stubs.Set(self.compute.driver, 'instance_on_disk',
- set_shared_storage)
-
- self.compute.rebuild_instance(c, instance=inst_ref,
- injected_files=None, image_ref=None,
- orig_image_ref=None, new_pass=None,
- orig_sys_metadata=None, bdms=[],
- recreate=True, on_shared_storage=True)
+ def test_instance_usage_audit(self):
+ instances = [{'uuid': 'foo'}]
+ self.flags(instance_usage_audit=True)
+ self.stubs.Set(compute_utils, 'has_audit_been_run',
+ lambda *a, **k: False)
+ self.stubs.Set(self.compute.conductor_api,
+ 'instance_get_active_by_window_joined',
+ lambda *a, **k: instances)
+ self.stubs.Set(compute_utils, 'start_instance_usage_audit',
+ lambda *a, **k: None)
+ self.stubs.Set(compute_utils, 'finish_instance_usage_audit',
+ lambda *a, **k: None)
- # make sure instance is updated with destination hostname.
- instance = db.instance_get(c, inst_id)
- self.assertTrue(instance['host'])
- self.assertEqual(instance['host'], dest)
-
- # cleanup
- db.instance_destroy(c, inst_uuid)
-
- def test_rebuild_with_wrong_shared_storage(self):
- """Confirm evacuate scenario updates host."""
-
- # creating testdata
- c = self.context.elevated()
-
- inst_ref = self._create_fake_instance({'host': 'srchost'})
- db.instance_update(self.context, inst_ref['uuid'],
- {"task_state": task_states.REBUILDING})
- inst_id = inst_ref["id"]
- inst_uuid = inst_ref["uuid"]
- dest = self.compute.host
-
- def set_shared_storage(instance):
- return True
-
- self.stubs.Set(self.compute.driver, 'instance_on_disk',
- set_shared_storage)
-
- self.assertRaises(exception.Invalid,
- self.compute.rebuild_instance, c, instance=inst_ref,
- injected_files=None, image_ref=None,
- orig_image_ref=None, new_pass=None,
- orig_sys_metadata=None,
- recreate=True, on_shared_storage=False)
-
- # make sure instance was not updated with destination hostname.
- instance = db.instance_get(c, inst_id)
- self.assertTrue(instance['host'])
- self.assertEqual(instance['host'], 'srchost')
-
- # cleanup
- db.instance_destroy(c, inst_uuid)
-
- def test_rebuild_on_host_with_volumes(self):
- """Confirm evacuate scenario reconnects volumes."""
-
- # creating testdata
- inst_ref = jsonutils.to_primitive(self._create_fake_instance
- ({'host': 'fake_host_2'}))
- db.instance_update(self.context, inst_ref['uuid'],
- {"task_state": task_states.REBUILDING})
-
- inst_id = inst_ref["id"]
- inst_uuid = inst_ref["uuid"]
-
- volume_id = 'fake'
- values = {'instance_uuid': inst_ref['uuid'],
- 'device_name': '/dev/vdc',
- 'delete_on_termination': False,
- 'volume_id': volume_id,
- }
-
- admin = context.get_admin_context()
- db.block_device_mapping_create(admin, values)
-
- def set_shared_storage(instance):
- return True
-
- self.stubs.Set(self.compute.driver, 'instance_on_disk',
- set_shared_storage)
-
- def fake_volume_get(self, context, volume):
- return {'id': volume_id}
- self.stubs.Set(cinder.API, "get", fake_volume_get)
-
- # Stub out and record whether it gets detached
- result = {"detached": False}
-
- def fake_detach(self, context, volume):
- result["detached"] = volume["id"] == volume_id
- self.stubs.Set(cinder.API, "detach", fake_detach)
-
- def fake_terminate_connection(self, context, volume, connector):
- return {}
- self.stubs.Set(cinder.API, "terminate_connection",
- fake_terminate_connection)
-
- # make sure volumes attach, detach are called
- self.mox.StubOutWithMock(self.compute.volume_api, 'detach')
- self.compute.volume_api.detach(mox.IsA(admin), mox.IgnoreArg())
-
- self.mox.StubOutWithMock(self.compute, '_setup_block_device_mapping')
- self.compute._setup_block_device_mapping(mox.IsA(admin),
- mox.IsA(inst_ref),
- mox.IgnoreArg())
-
- # start test
- self.mox.ReplayAll()
-
- self.compute.rebuild_instance(admin, instance=inst_ref,
- injected_files=None, image_ref=None,
- orig_image_ref=None, new_pass=None,
- orig_sys_metadata=None, bdms=[],
- recreate=True, on_shared_storage=True)
-
- # cleanup
- for bdms in db.block_device_mapping_get_all_by_instance(
- admin, inst_uuid):
- db.block_device_mapping_destroy(admin, bdms['id'])
- db.instance_destroy(admin, inst_uuid)
-
- def test_rebuild_on_host_with_shared_storage(self):
- """Confirm evacuate scenario on shared storage."""
-
- # creating testdata
- c = self.context.elevated()
-
- inst_ref = jsonutils.to_primitive(self._create_fake_instance
- ({'host': 'fake_host_2'}))
-
- inst_uuid = inst_ref["uuid"]
- dest = self.compute.host
-
- def set_shared_storage(instance):
- return True
-
- self.stubs.Set(self.compute.driver, 'instance_on_disk',
- set_shared_storage)
-
- self.mox.StubOutWithMock(self.compute.driver,
- 'spawn')
- self.compute.driver.spawn(mox.IsA(c), mox.IsA(inst_ref), {},
- mox.IgnoreArg(), None,
- mox.IgnoreArg(), mox.IgnoreArg())
-
- # start test
- self.mox.ReplayAll()
- db.instance_update(self.context, inst_ref['uuid'],
- {"task_state": task_states.REBUILDING})
-
- self.compute.rebuild_instance(c, instance=inst_ref,
- injected_files=None, image_ref=None,
- orig_image_ref=None, new_pass=None,
- orig_sys_metadata=None, bdms=[],
- recreate=True, on_shared_storage=True)
-
- # cleanup
- db.instance_destroy(c, inst_uuid)
-
- def test_rebuild_on_host_without_shared_storage(self):
- """Confirm evacuate scenario without shared storage
- (rebuild from image)"""
-
- # creating testdata
- c = self.context.elevated()
-
- inst_ref = jsonutils.to_primitive(self._create_fake_instance
- ({'host': 'fake_host_2'}))
-
- inst_uuid = inst_ref["uuid"]
- dest = self.compute.host
-
- fake_image = {
- 'id': 1,
- 'name': 'fake_name',
- 'properties': {'kernel_id': 'fake_kernel_id',
- 'ramdisk_id': 'fake_ramdisk_id'},
- }
-
- def set_shared_storage(instance):
- return False
-
- self.stubs.Set(self.compute.driver, 'instance_on_disk',
- set_shared_storage)
-
- self.mox.StubOutWithMock(self.compute.driver,
- 'spawn')
- self.compute.driver.spawn(mox.IsA(c), mox.IsA(inst_ref),
- mox.IsA(fake_image), mox.IgnoreArg(),
- mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg())
-
- # start test
+ self.mox.StubOutWithMock(self.compute.conductor_api,
+ 'notify_usage_exists')
+ self.compute.conductor_api.notify_usage_exists(
+ self.context, instances[0], ignore_missing_network_data=False)
self.mox.ReplayAll()
-
- db.instance_update(self.context, inst_ref['uuid'],
- {"task_state": task_states.REBUILDING})
-
- self.compute.rebuild_instance(c, instance=inst_ref,
- injected_files=None, image_ref=None,
- orig_image_ref=None, new_pass='newpass',
- orig_sys_metadata=None, bdms=[],
- recreate=True, on_shared_storage=False)
-
- # cleanup
- db.instance_destroy(c, inst_uuid)
-
- def test_rebuild_on_host_instance_exists(self):
- """Rebuild if instance exists raise an exception."""
-
- # creating testdata
- c = self.context.elevated()
- inst_ref = self._create_fake_instance({'host': 'fake_host_2'})
- dest = self.compute.host
-
- instance = jsonutils.to_primitive(self._create_fake_instance())
- instance_uuid = instance['uuid']
- dest = self.compute.host
-
- self.compute.run_instance(self.context, instance=instance)
-
- db.instance_update(self.context, inst_ref['uuid'],
- {"task_state": task_states.REBUILDING})
-
- self.assertRaises(exception.Invalid,
- self.compute.rebuild_instance, c, instance=inst_ref,
- injected_files=None, image_ref=None,
- orig_image_ref=None, new_pass=None,
- orig_sys_metadata=None,
- recreate=True, on_shared_storage=True)
-
- # cleanup
- db.instance_destroy(c, inst_ref['uuid'])
- self.compute.terminate_instance(self.context, instance=instance)
+ self.compute._instance_usage_audit(self.context)
class ComputeAPITestCase(BaseTestCase):
@@ -3780,6 +3635,28 @@ class ComputeAPITestCase(BaseTestCase):
finally:
db.instance_destroy(self.context, ref[0]['uuid'])
+ def test_create_saves_type_in_system_metadata(self):
+ instance_type = instance_types.get_default_instance_type()
+ (ref, resv_id) = self.compute_api.create(
+ self.context,
+ instance_type=instance_type,
+ image_href=None)
+ try:
+ sys_metadata = db.instance_system_metadata_get(self.context,
+ ref[0]['uuid'])
+
+ instance_type_props = ['name', 'memory_mb', 'vcpus', 'root_gb',
+ 'ephemeral_gb', 'flavorid', 'swap',
+ 'rxtx_factor', 'vcpu_weight']
+ for key in instance_type_props:
+ sys_meta_key = "instance_type_%s" % key
+ self.assertTrue(sys_meta_key in sys_metadata)
+ self.assertEqual(str(instance_type[key]),
+ str(sys_metadata[sys_meta_key]))
+
+ finally:
+ db.instance_destroy(self.context, ref[0]['uuid'])
+
def test_create_instance_associates_security_groups(self):
# Make sure create associates security groups.
group = self._create_group()
@@ -3929,6 +3806,15 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
+ def test_start_no_host(self):
+ instance = self._create_fake_instance(params={'host': ''})
+
+ self.assertRaises(exception.InstanceNotReady,
+ self.compute_api.start,
+ self.context, instance)
+
+ db.instance_destroy(self.context, instance['uuid'])
+
def test_stop(self):
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
@@ -3944,6 +3830,15 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
+ def test_stop_no_host(self):
+ instance = self._create_fake_instance(params={'host': ''})
+
+ self.assertRaises(exception.InstanceNotReady,
+ self.compute_api.stop,
+ self.context, instance)
+
+ db.instance_destroy(self.context, instance['uuid'])
+
def test_start_shutdown(self):
def check_state(instance_uuid, power_state_, vm_state_, task_state_):
instance = db.instance_get_by_uuid(self.context, instance_uuid)
@@ -5782,6 +5677,15 @@ class ComputeAPITestCase(BaseTestCase):
fake_instance, tail_length=fake_tail_length)
self.assertEqual(output, fake_console_output)
+ def test_console_output_no_host(self):
+ instance = self._create_fake_instance(params={'host': ''})
+
+ self.assertRaises(exception.InstanceNotReady,
+ self.compute_api.get_console_output,
+ self.context, instance)
+
+ db.instance_destroy(self.context, instance['uuid'])
+
def test_attach_volume(self):
# Ensure instance can be soft rebooted.
@@ -5838,6 +5742,88 @@ class ComputeAPITestCase(BaseTestCase):
self.stubs.Set(compute_rpcapi.ComputeAPI, 'attach_volume',
fake_rpc_attach_volume)
+ def test_detach_volume(self):
+ # Ensure volume can be detached from instance
+
+ called = {}
+ instance = self._create_fake_instance()
+
+ def fake_check_detach(*args, **kwargs):
+ called['fake_check_detach'] = True
+
+ def fake_begin_detaching(*args, **kwargs):
+ called['fake_begin_detaching'] = True
+
+ def fake_volume_get(self, context, volume_id):
+ called['fake_volume_get'] = True
+ return {'id': volume_id, 'attach_status': 'in-use',
+ 'instance_uuid': instance['uuid']}
+
+ def fake_rpc_detach_volume(self, context, **kwargs):
+ called['fake_rpc_detach_volume'] = True
+
+ self.stubs.Set(cinder.API, 'get', fake_volume_get)
+ self.stubs.Set(cinder.API, 'check_detach', fake_check_detach)
+ self.stubs.Set(cinder.API, 'begin_detaching', fake_begin_detaching)
+ self.stubs.Set(compute_rpcapi.ComputeAPI, 'detach_volume',
+ fake_rpc_detach_volume)
+
+ self.compute_api.detach_volume(self.context, 1)
+ self.assertTrue(called.get('fake_volume_get'))
+ self.assertTrue(called.get('fake_check_detach'))
+ self.assertTrue(called.get('fake_begin_detaching'))
+ self.assertTrue(called.get('fake_rpc_detach_volume'))
+
+ def test_detach_invalid_volume(self):
+ # Ensure exception is raised while detaching an un-attached volume
+
+ def fake_volume_get(self, context, volume_id):
+ return {'id': volume_id, 'attach_status': 'detached'}
+
+ self.stubs.Set(cinder.API, 'get', fake_volume_get)
+ self.assertRaises(exception.InvalidVolume,
+ self.compute_api.detach_volume, self.context, 1)
+
+ def test_detach_volume_libvirt_is_down(self):
+ # Ensure rollback during detach if libvirt goes down
+
+ called = {}
+ instance = self._create_fake_instance()
+
+ def fake_get_instance_volume_bdm(*args, **kwargs):
+ return {'device_name': '/dev/vdb', 'volume_id': 1,
+ 'connection_info': '{"test": "test"}'}
+
+ def fake_libvirt_driver_instance_exists(*args, **kwargs):
+ called['fake_libvirt_driver_instance_exists'] = True
+ return False
+
+ def fake_libvirt_driver_detach_volume_fails(*args, **kwargs):
+ called['fake_libvirt_driver_detach_volume_fails'] = True
+ raise AttributeError
+
+ def fake_roll_detaching(*args, **kwargs):
+ called['fake_roll_detaching'] = True
+
+ def fake_volume_get(self, context, volume_id):
+ called['fake_volume_get'] = True
+ return {'id': volume_id, 'attach_status': 'in-use'}
+
+ self.stubs.Set(cinder.API, 'get', fake_volume_get)
+ self.stubs.Set(cinder.API, 'roll_detaching', fake_roll_detaching)
+ self.stubs.Set(self.compute, "_get_instance_volume_bdm",
+ fake_get_instance_volume_bdm)
+ self.stubs.Set(self.compute.driver, "instance_exists",
+ fake_libvirt_driver_instance_exists)
+ self.stubs.Set(self.compute.driver, "detach_volume",
+ fake_libvirt_driver_detach_volume_fails)
+
+ self.assertRaises(AttributeError, self.compute.detach_volume,
+ self.context, 1, instance)
+ self.assertTrue(called.get('fake_libvirt_driver_instance_exists'))
+ self.assertTrue(called.get('fake_volume_get'))
+ self.assertTrue(called.get('fake_roll_detaching'))
+
def test_terminate_with_volumes(self):
# Make sure that volumes get detached during instance termination.
admin = context.get_admin_context()
@@ -6074,6 +6060,107 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
+ def test_evacuate(self):
+ instance = jsonutils.to_primitive(self._create_fake_instance())
+ instance_uuid = instance['uuid']
+ instance = db.instance_get_by_uuid(self.context, instance_uuid)
+ self.assertEqual(instance['task_state'], None)
+
+ def fake_service_is_up(*args, **kwargs):
+ return False
+
+ self.stubs.Set(self.compute_api.servicegroup_api, 'service_is_up',
+ fake_service_is_up)
+ self.compute_api.evacuate(self.context.elevated(),
+ instance,
+ host='fake_dest_host',
+ on_shared_storage=True,
+ admin_password=None)
+
+ instance = db.instance_get_by_uuid(self.context, instance_uuid)
+ self.assertEqual(instance['task_state'], task_states.REBUILDING)
+
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_fail_evacuate_from_non_existing_host(self):
+ inst = {}
+ inst['vm_state'] = vm_states.ACTIVE
+ inst['image_ref'] = FAKE_IMAGE_REF
+ inst['reservation_id'] = 'r-fakeres'
+ inst['launch_time'] = '10'
+ inst['user_id'] = self.user_id
+ inst['project_id'] = self.project_id
+ inst['host'] = 'fake_host'
+ inst['node'] = NODENAME
+ type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
+ inst['instance_type_id'] = type_id
+ inst['ami_launch_index'] = 0
+ inst['memory_mb'] = 0
+ inst['vcpus'] = 0
+ inst['root_gb'] = 0
+ inst['ephemeral_gb'] = 0
+ inst['architecture'] = 'x86_64'
+ inst['os_type'] = 'Linux'
+
+ instance = jsonutils.to_primitive(db.instance_create(self.context,
+ inst))
+ instance_uuid = instance['uuid']
+ instance = db.instance_get_by_uuid(self.context, instance_uuid)
+ self.assertEqual(instance['task_state'], None)
+
+ self.assertRaises(exception.ComputeHostNotFound,
+ self.compute_api.evacuate, self.context.elevated(), instance,
+ host='fake_dest_host', on_shared_storage=True,
+ admin_password=None)
+
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_fail_evacuate_from_running_host(self):
+ instance = jsonutils.to_primitive(self._create_fake_instance())
+ instance_uuid = instance['uuid']
+ instance = db.instance_get_by_uuid(self.context, instance_uuid)
+ self.assertEqual(instance['task_state'], None)
+
+ def fake_service_is_up(*args, **kwargs):
+ return True
+
+ self.stubs.Set(self.compute_api.servicegroup_api, 'service_is_up',
+ fake_service_is_up)
+
+ self.assertRaises(exception.ComputeServiceUnavailable,
+ self.compute_api.evacuate, self.context.elevated(), instance,
+ host='fake_dest_host', on_shared_storage=True,
+ admin_password=None)
+
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_fail_evacuate_instance_in_wrong_state(self):
+ instances = [
+ jsonutils.to_primitive(self._create_fake_instance(
+ {'vm_state': vm_states.BUILDING})),
+ jsonutils.to_primitive(self._create_fake_instance(
+ {'vm_state': vm_states.PAUSED})),
+ jsonutils.to_primitive(self._create_fake_instance(
+ {'vm_state': vm_states.SUSPENDED})),
+ jsonutils.to_primitive(self._create_fake_instance(
+ {'vm_state': vm_states.RESCUED})),
+ jsonutils.to_primitive(self._create_fake_instance(
+ {'vm_state': vm_states.RESIZED})),
+ jsonutils.to_primitive(self._create_fake_instance(
+ {'vm_state': vm_states.SOFT_DELETED})),
+ jsonutils.to_primitive(self._create_fake_instance(
+ {'vm_state': vm_states.DELETED})),
+ jsonutils.to_primitive(self._create_fake_instance(
+ {'vm_state': vm_states.ERROR}))
+ ]
+
+ for instance in instances:
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.evacuate, self.context, instance,
+ host='fake_dest_host', on_shared_storage=True,
+ admin_password=None)
+ db.instance_destroy(self.context, instance['uuid'])
+
def fake_rpc_method(context, topic, msg, do_cast=True):
pass
@@ -6752,6 +6839,7 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
exc_info = sys.exc_info()
compute_utils.add_instance_fault_from_exc(self.context,
+ self.compute.conductor_api,
self.instance, exc_info[0], exc_info=exc_info)
self.compute._deallocate_network(self.context,
self.instance).AndRaise(InnerTestingException("Error"))
@@ -6802,6 +6890,7 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
except Exception:
exc_info = sys.exc_info()
compute_utils.add_instance_fault_from_exc(self.context,
+ self.compute.conductor_api,
self.instance, exc_info[0], exc_info=exc_info)
self.compute._deallocate_network(self.context,
self.instance)
@@ -6830,6 +6919,7 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
exc_info = sys.exc_info()
compute_utils.add_instance_fault_from_exc(self.context,
+ self.compute.conductor_api,
self.instance, exc_info[0], exc_info=exc_info)
self.compute._deallocate_network(self.context,
self.instance)
@@ -6966,3 +7056,144 @@ class ComputeInactiveImageTestCase(BaseTestCase):
self.assertRaises(exception.ImageNotActive,
self.compute_api.create,
self.context, inst_type, 'fake-image-uuid')
+
+
+class EvacuateHostTestCase(BaseTestCase):
+ def setUp(self):
+ super(EvacuateHostTestCase, self).setUp()
+ self.inst_ref = jsonutils.to_primitive(self._create_fake_instance
+ ({'host': 'fake_host_2'}))
+ db.instance_update(self.context, self.inst_ref['uuid'],
+ {"task_state": task_states.REBUILDING})
+
+ def tearDown(self):
+ db.instance_destroy(self.context, self.inst_ref['uuid'])
+ super(EvacuateHostTestCase, self).tearDown()
+
+ def _rebuild(self, on_shared_storage=True):
+ orig_image_ref = None
+ image_ref = None
+ injected_files = None
+ self.compute.rebuild_instance(
+ self.context, self.inst_ref, orig_image_ref, image_ref,
+ injected_files, 'newpass', recreate=True,
+ on_shared_storage=on_shared_storage)
+
+ def test_rebuild_on_host_updated_target(self):
+ """Confirm evacuate scenario updates host."""
+ self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
+ self.mox.ReplayAll()
+
+ self._rebuild()
+
+ # Should be on destination host
+ instance = db.instance_get(self.context, self.inst_ref['id'])
+ self.assertEqual(instance['host'], self.compute.host)
+
+ def test_rebuild_with_wrong_shared_storage(self):
+ """Confirm evacuate scenario does not update host."""
+ self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.InvalidSharedStorage,
+ lambda: self._rebuild(on_shared_storage=False))
+
+ # Should remain on original host
+ instance = db.instance_get(self.context, self.inst_ref['id'])
+ self.assertEqual(instance['host'], 'fake_host_2')
+
+ def test_rebuild_on_host_with_volumes(self):
+ """Confirm evacuate scenario reconnects volumes."""
+ values = {'instance_uuid': self.inst_ref['uuid'],
+ 'device_name': '/dev/vdc',
+ 'delete_on_termination': False,
+ 'volume_id': 'fake_volume_id'}
+
+ db.block_device_mapping_create(self.context, values)
+
+ def fake_volume_get(self, context, volume):
+ return {'id': 'fake_volume_id'}
+ self.stubs.Set(cinder.API, "get", fake_volume_get)
+
+ # Stub out and record whether it gets detached
+ result = {"detached": False}
+
+ def fake_detach(self, context, volume):
+ result["detached"] = volume["id"] == 'fake_volume_id'
+ self.stubs.Set(cinder.API, "detach", fake_detach)
+
+ def fake_terminate_connection(self, context, volume, connector):
+ return {}
+ self.stubs.Set(cinder.API, "terminate_connection",
+ fake_terminate_connection)
+
+ # make sure volumes attach, detach are called
+ self.mox.StubOutWithMock(self.compute.volume_api, 'detach')
+ self.compute.volume_api.detach(mox.IsA(self.context), mox.IgnoreArg())
+
+ self.mox.StubOutWithMock(self.compute, '_setup_block_device_mapping')
+ self.compute._setup_block_device_mapping(mox.IsA(self.context),
+ mox.IsA(self.inst_ref),
+ mox.IgnoreArg())
+
+ self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
+ self.mox.ReplayAll()
+
+ self._rebuild()
+
+ # cleanup
+ for bdms in db.block_device_mapping_get_all_by_instance(
+ self.context, self.inst_ref['uuid']):
+ db.block_device_mapping_destroy(self.context, bdms['id'])
+
+ def test_rebuild_on_host_with_shared_storage(self):
+ """Confirm evacuate scenario on shared storage."""
+ self.mox.StubOutWithMock(self.compute.driver, 'spawn')
+ self.compute.driver.spawn(mox.IsA(self.context),
+ mox.IsA(self.inst_ref), {}, mox.IgnoreArg(), 'newpass',
+ network_info=mox.IgnoreArg(),
+ block_device_info=mox.IgnoreArg())
+
+ self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
+ self.mox.ReplayAll()
+
+ self._rebuild()
+
+ def test_rebuild_on_host_without_shared_storage(self):
+ """Confirm evacuate scenario without shared storage
+ (rebuild from image)
+ """
+ fake_image = {'id': 1,
+ 'name': 'fake_name',
+ 'properties': {'kernel_id': 'fake_kernel_id',
+ 'ramdisk_id': 'fake_ramdisk_id'}}
+
+ self.mox.StubOutWithMock(self.compute.driver, 'spawn')
+ self.compute.driver.spawn(mox.IsA(self.context),
+ mox.IsA(self.inst_ref), mox.IsA(fake_image), mox.IgnoreArg(),
+ mox.IsA('newpass'), network_info=mox.IgnoreArg(),
+ block_device_info=mox.IgnoreArg())
+
+ self.stubs.Set(self.compute.driver, 'instance_on_disk',
+ lambda x: False)
+ self.mox.ReplayAll()
+
+ self._rebuild(on_shared_storage=False)
+
+ def test_rebuild_on_host_instance_exists(self):
+ """Rebuild if instance exists raises an exception."""
+ db.instance_update(self.context, self.inst_ref['uuid'],
+ {"task_state": task_states.SCHEDULING})
+ self.compute.run_instance(self.context, instance=self.inst_ref)
+
+ self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
+ self.assertRaises(exception.InstanceExists,
+ lambda: self._rebuild(on_shared_storage=True))
+
+ def test_driver_doesnt_support_recreate(self):
+ with utils.temporary_mutation(self.compute.driver.capabilities,
+ supports_recreate=False):
+ self.stubs.Set(self.compute.driver, 'instance_on_disk',
+ lambda x: True)
+ self.assertRaises(exception.InstanceRecreateNotSupported,
+ lambda: self._rebuild(on_shared_storage=True))
diff --git a/nova/tests/compute/test_compute_cells.py b/nova/tests/compute/test_compute_cells.py
index 3c25f9b43..8ba35e033 100644
--- a/nova/tests/compute/test_compute_cells.py
+++ b/nova/tests/compute/test_compute_cells.py
@@ -164,6 +164,13 @@ class CellsComputeAPITestCase(test_compute.ComputeAPITestCase):
def test_backup(self):
return super(CellsComputeAPITestCase, self).test_backup()
+ def test_detach_volume(self):
+ self.skipTest("This test is failing due to TypeError: "
+ "detach_volume() takes exactly 3 arguments (4 given).")
+
+ def test_evacuate(self):
+ self.skipTest("Test is incompatible with cells.")
+
class CellsComputePolicyTestCase(test_compute.ComputePolicyTestCase):
def setUp(self):
diff --git a/nova/tests/compute/test_compute_utils.py b/nova/tests/compute/test_compute_utils.py
index 4372039e0..9b865014c 100644
--- a/nova/tests/compute/test_compute_utils.py
+++ b/nova/tests/compute/test_compute_utils.py
@@ -32,6 +32,7 @@ from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier_api
from nova.openstack.common.notifier import test_notifier
from nova import test
+from nova.tests import fake_instance_actions
from nova.tests import fake_network
import nova.tests.image.fake
@@ -68,13 +69,30 @@ class ComputeValidateDeviceTestCase(test.TestCase):
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
lambda context, instance: self.data)
+ def _update_instance_type(self, instance_type_info):
+ self.instance_type = {
+ 'id': 1,
+ 'name': 'foo',
+ 'memory_mb': 128,
+ 'vcpus': 1,
+ 'root_gb': 10,
+ 'ephemeral_gb': 10,
+ 'flavorid': 1,
+ 'swap': 0,
+ 'rxtx_factor': 1.0,
+ 'vcpu_weight': 1,
+ }
+ self.instance_type.update(instance_type_info)
+ self.instance['system_metadata'] = [{'key': 'instance_type_%s' % key,
+ 'value': value}
+ for key, value in
+ self.instance_type.items()]
+
def _validate_device(self, device=None):
bdms = db.block_device_mapping_get_all_by_instance(
self.context, self.instance['uuid'])
- return compute_utils.get_device_name_for_instance(self.context,
- self.instance,
- bdms,
- device)
+ return compute_utils.get_device_name_for_instance(
+ self.context, self.instance, bdms, device)
@staticmethod
def _fake_bdm(device):
@@ -148,8 +166,9 @@ class ComputeValidateDeviceTestCase(test.TestCase):
self._validate_device, '/baddata/vdc')
def test_device_in_use(self):
- self.assertRaises(exception.DevicePathInUse,
- self._validate_device, '/dev/vdb')
+ exc = self.assertRaises(exception.DevicePathInUse,
+ self._validate_device, '/dev/vda')
+ self.assertIn('/dev/vda', str(exc))
def test_swap(self):
self.instance['default_swap_device'] = "/dev/vdc"
@@ -163,40 +182,40 @@ class ComputeValidateDeviceTestCase(test.TestCase):
self.assertEqual(device, '/dev/vdc')
def test_ephemeral_xenapi(self):
- self.instance_type = {
- 'ephemeral_gb': 10,
- 'swap': 0,
- }
+ self._update_instance_type({
+ 'ephemeral_gb': 10,
+ 'swap': 0,
+ })
self.stubs.Set(instance_types, 'get_instance_type',
lambda instance_type_id, ctxt=None: self.instance_type)
device = self._validate_device()
self.assertEqual(device, '/dev/xvdc')
def test_swap_xenapi(self):
- self.instance_type = {
- 'ephemeral_gb': 0,
- 'swap': 10,
- }
+ self._update_instance_type({
+ 'ephemeral_gb': 0,
+ 'swap': 10,
+ })
self.stubs.Set(instance_types, 'get_instance_type',
lambda instance_type_id, ctxt=None: self.instance_type)
device = self._validate_device()
self.assertEqual(device, '/dev/xvdb')
def test_swap_and_ephemeral_xenapi(self):
- self.instance_type = {
- 'ephemeral_gb': 10,
- 'swap': 10,
- }
+ self._update_instance_type({
+ 'ephemeral_gb': 10,
+ 'swap': 10,
+ })
self.stubs.Set(instance_types, 'get_instance_type',
lambda instance_type_id, ctxt=None: self.instance_type)
device = self._validate_device()
self.assertEqual(device, '/dev/xvdd')
def test_swap_and_one_attachment_xenapi(self):
- self.instance_type = {
- 'ephemeral_gb': 0,
- 'swap': 10,
- }
+ self._update_instance_type({
+ 'ephemeral_gb': 0,
+ 'swap': 10,
+ })
self.stubs.Set(instance_types, 'get_instance_type',
lambda instance_type_id, ctxt=None: self.instance_type)
device = self._validate_device()
@@ -236,6 +255,7 @@ class UsageInfoTestCase(test.TestCase):
self.stubs.Set(nova.tests.image.fake._FakeImageService,
'show', fake_show)
fake_network.set_stub_network_methods(self.stubs)
+ fake_instance_actions.stub_out_action_events(self.stubs)
def _create_instance(self, params={}):
"""Create a test instance."""
diff --git a/nova/tests/compute/test_host_api.py b/nova/tests/compute/test_host_api.py
index 95d3c4926..772ae1eb1 100644
--- a/nova/tests/compute/test_host_api.py
+++ b/nova/tests/compute/test_host_api.py
@@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from nova.cells import utils as cells_utils
from nova import compute
from nova.compute import rpcapi as compute_rpcapi
from nova import context
@@ -84,6 +85,42 @@ class ComputeHostAPITestCase(test.TestCase):
'fake_mode')
self.assertEqual('fake-result', result)
+ def test_service_get_all_no_zones(self):
+ services = [dict(id=1, key1='val1', key2='val2', topic='compute',
+ host='host1'),
+ dict(id=2, key1='val2', key3='val3', topic='compute',
+ host='host2')]
+
+ self.mox.StubOutWithMock(self.host_api.db,
+ 'service_get_all')
+
+ # Test no filters
+ self.host_api.db.service_get_all(self.ctxt,
+ disabled=None).AndReturn(services)
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_all(self.ctxt)
+ self.mox.VerifyAll()
+ self.assertEqual(services, result)
+
+ # Test no filters #2
+ self.mox.ResetAll()
+ self.host_api.db.service_get_all(self.ctxt,
+ disabled=None).AndReturn(services)
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_all(self.ctxt, filters={})
+ self.mox.VerifyAll()
+ self.assertEqual(services, result)
+
+ # Test w/ filter
+ self.mox.ResetAll()
+ self.host_api.db.service_get_all(self.ctxt,
+ disabled=None).AndReturn(services)
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_all(self.ctxt,
+ filters=dict(key1='val2'))
+ self.mox.VerifyAll()
+ self.assertEqual([services[1]], result)
+
def test_service_get_all(self):
services = [dict(id=1, key1='val1', key2='val2', topic='compute',
host='host1'),
@@ -99,28 +136,188 @@ class ComputeHostAPITestCase(test.TestCase):
'service_get_all')
# Test no filters
- self.host_api.db.service_get_all(self.ctxt, False).AndReturn(
- services)
+ self.host_api.db.service_get_all(self.ctxt,
+ disabled=None).AndReturn(services)
self.mox.ReplayAll()
- result = self.host_api.service_get_all(self.ctxt)
+ result = self.host_api.service_get_all(self.ctxt, set_zones=True)
self.mox.VerifyAll()
self.assertEqual(exp_services, result)
# Test no filters #2
self.mox.ResetAll()
- self.host_api.db.service_get_all(self.ctxt, False).AndReturn(
- services)
+ self.host_api.db.service_get_all(self.ctxt,
+ disabled=None).AndReturn(services)
self.mox.ReplayAll()
- result = self.host_api.service_get_all(self.ctxt, filters={})
+ result = self.host_api.service_get_all(self.ctxt, filters={},
+ set_zones=True)
self.mox.VerifyAll()
self.assertEqual(exp_services, result)
# Test w/ filter
self.mox.ResetAll()
- self.host_api.db.service_get_all(self.ctxt, False).AndReturn(
- services)
+ self.host_api.db.service_get_all(self.ctxt,
+ disabled=None).AndReturn(services)
self.mox.ReplayAll()
result = self.host_api.service_get_all(self.ctxt,
- filters=dict(key1='val2'))
+ filters=dict(key1='val2'),
+ set_zones=True)
self.mox.VerifyAll()
self.assertEqual([exp_services[1]], result)
+
+ # Test w/ zone filter but no set_zones arg.
+ self.mox.ResetAll()
+ self.host_api.db.service_get_all(self.ctxt,
+ disabled=None).AndReturn(services)
+ self.mox.ReplayAll()
+ filters = {'availability_zone': 'nova'}
+ result = self.host_api.service_get_all(self.ctxt,
+ filters=filters)
+ self.mox.VerifyAll()
+ self.assertEqual(exp_services, result)
+
+ def test_service_get_by_compute_host(self):
+ self.mox.StubOutWithMock(self.host_api.db,
+ 'service_get_by_compute_host')
+
+ self.host_api.db.service_get_by_compute_host(self.ctxt,
+ 'fake-host').AndReturn('fake-response')
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_by_compute_host(self.ctxt,
+ 'fake-host')
+ self.assertEqual('fake-response', result)
+
+ def test_instance_get_all_by_host(self):
+ self.mox.StubOutWithMock(self.host_api.db,
+ 'instance_get_all_by_host')
+
+ self.host_api.db.instance_get_all_by_host(self.ctxt,
+ 'fake-host').AndReturn(['fake-responses'])
+ self.mox.ReplayAll()
+ result = self.host_api.instance_get_all_by_host(self.ctxt,
+ 'fake-host')
+ self.assertEqual(['fake-responses'], result)
+
+ def test_task_log_get_all(self):
+ self.mox.StubOutWithMock(self.host_api.db, 'task_log_get_all')
+
+ self.host_api.db.task_log_get_all(self.ctxt,
+ 'fake-name', 'fake-begin', 'fake-end', host='fake-host',
+ state='fake-state').AndReturn('fake-response')
+ self.mox.ReplayAll()
+ result = self.host_api.task_log_get_all(self.ctxt, 'fake-name',
+ 'fake-begin', 'fake-end', host='fake-host',
+ state='fake-state')
+ self.assertEqual('fake-response', result)
+
+
+class ComputeHostAPICellsTestCase(ComputeHostAPITestCase):
+ def setUp(self):
+ self.flags(compute_api_class='nova.compute.cells_api.ComputeCellsAPI')
+ super(ComputeHostAPICellsTestCase, self).setUp()
+
+ def _mock_rpc_call(self, expected_message, result=None):
+ if result is None:
+ result = 'fake-result'
+ # Wrapped with cells call
+ expected_message = {'method': 'proxy_rpc_to_manager',
+ 'args': {'topic': 'compute.fake_host',
+ 'rpc_message': expected_message,
+ 'call': True,
+ 'timeout': None},
+ 'version': '1.2'}
+ self.mox.StubOutWithMock(rpc, 'call')
+ rpc.call(self.ctxt, 'cells', expected_message,
+ None).AndReturn(result)
+
+ def test_service_get_all_no_zones(self):
+ services = [dict(id=1, key1='val1', key2='val2', topic='compute',
+ host='host1'),
+ dict(id=2, key1='val2', key3='val3', topic='compute',
+ host='host2')]
+
+ fake_filters = {'key1': 'val1'}
+ self.mox.StubOutWithMock(self.host_api.cells_rpcapi,
+ 'service_get_all')
+ self.host_api.cells_rpcapi.service_get_all(self.ctxt,
+ filters=fake_filters).AndReturn(services)
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_all(self.ctxt,
+ filters=fake_filters)
+ self.assertEqual(services, result)
+
+ def test_service_get_all(self):
+ services = [dict(id=1, key1='val1', key2='val2', topic='compute',
+ host='host1'),
+ dict(id=2, key1='val2', key3='val3', topic='compute',
+ host='host2')]
+ exp_services = []
+ for service in services:
+ exp_service = {}
+ exp_service.update(availability_zone='nova', **service)
+ exp_services.append(exp_service)
+
+ fake_filters = {'key1': 'val1'}
+ self.mox.StubOutWithMock(self.host_api.cells_rpcapi,
+ 'service_get_all')
+ self.host_api.cells_rpcapi.service_get_all(self.ctxt,
+ filters=fake_filters).AndReturn(services)
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_all(self.ctxt,
+ filters=fake_filters,
+ set_zones=True)
+ self.mox.VerifyAll()
+ self.assertEqual(exp_services, result)
+
+ # Test w/ zone filter but no set_zones arg.
+ self.mox.ResetAll()
+ fake_filters = {'availability_zone': 'nova'}
+ # Zone filter is done client-size, so should be stripped
+ # from this call.
+ self.host_api.cells_rpcapi.service_get_all(self.ctxt,
+ filters={}).AndReturn(services)
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_all(self.ctxt,
+ filters=fake_filters)
+ self.mox.VerifyAll()
+ self.assertEqual(exp_services, result)
+
+ def test_service_get_by_compute_host(self):
+ self.mox.StubOutWithMock(self.host_api.cells_rpcapi,
+ 'service_get_by_compute_host')
+
+ self.host_api.cells_rpcapi.service_get_by_compute_host(self.ctxt,
+ 'fake-host').AndReturn('fake-response')
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_by_compute_host(self.ctxt,
+ 'fake-host')
+ self.assertEqual('fake-response', result)
+
+ def test_instance_get_all_by_host(self):
+ instances = [dict(id=1, cell_name='cell1', host='host1'),
+ dict(id=2, cell_name='cell2', host='host1'),
+ dict(id=3, cell_name='cell1', host='host2')]
+
+ self.mox.StubOutWithMock(self.host_api.db,
+ 'instance_get_all_by_host')
+
+ self.host_api.db.instance_get_all_by_host(self.ctxt,
+ 'fake-host').AndReturn(instances)
+ self.mox.ReplayAll()
+ expected_result = [instances[0], instances[2]]
+ cell_and_host = cells_utils.cell_with_item('cell1', 'fake-host')
+ result = self.host_api.instance_get_all_by_host(self.ctxt,
+ cell_and_host)
+ self.assertEqual(expected_result, result)
+
+ def test_task_log_get_all(self):
+ self.mox.StubOutWithMock(self.host_api.cells_rpcapi,
+ 'task_log_get_all')
+
+ self.host_api.cells_rpcapi.task_log_get_all(self.ctxt,
+ 'fake-name', 'fake-begin', 'fake-end', host='fake-host',
+ state='fake-state').AndReturn('fake-response')
+ self.mox.ReplayAll()
+ result = self.host_api.task_log_get_all(self.ctxt, 'fake-name',
+ 'fake-begin', 'fake-end', host='fake-host',
+ state='fake-state')
+ self.assertEqual('fake-response', result)
diff --git a/nova/tests/compute/test_resource_tracker.py b/nova/tests/compute/test_resource_tracker.py
index 53d92a13f..f1d0d1f0c 100644
--- a/nova/tests/compute/test_resource_tracker.py
+++ b/nova/tests/compute/test_resource_tracker.py
@@ -19,6 +19,7 @@
import uuid
+from nova.compute import instance_types
from nova.compute import resource_tracker
from nova.compute import task_states
from nova.compute import vm_states
@@ -152,8 +153,22 @@ class BaseTestCase(test.TestCase):
}
return service
+ def _fake_instance_system_metadata(self, instance_type, prefix=''):
+ sys_meta = []
+ for key in instance_types.system_metadata_instance_type_props.keys():
+ sys_meta.append({'key': '%sinstance_type_%s' % (prefix, key),
+ 'value': instance_type[key]})
+ return sys_meta
+
def _fake_instance(self, *args, **kwargs):
+ # Default to an instance ready to resize to or from the same
+ # instance_type
+ itype = self._fake_instance_type_create()
+ sys_meta = (self._fake_instance_system_metadata(itype) +
+ self._fake_instance_system_metadata(itype, 'new_') +
+ self._fake_instance_system_metadata(itype, 'old_'))
+
instance_uuid = str(uuid.uuid1())
instance = {
'uuid': instance_uuid,
@@ -169,6 +184,7 @@ class BaseTestCase(test.TestCase):
'node': None,
'instance_type_id': 1,
'launched_on': None,
+ 'system_metadata': sys_meta,
}
instance.update(kwargs)
@@ -183,6 +199,9 @@ class BaseTestCase(test.TestCase):
'vcpus': FAKE_VIRT_VCPUS,
'root_gb': FAKE_VIRT_LOCAL_GB / 2,
'ephemeral_gb': FAKE_VIRT_LOCAL_GB / 2,
+ 'swap': 0,
+ 'rxtx_factor': 1.0,
+ 'vcpu_weight': 1,
'flavorid': 'fakeflavor'
}
instance_type.update(**kwargs)
@@ -391,7 +410,7 @@ class BaseTrackerTestCase(BaseTestCase):
if tracker is None:
tracker = self.tracker
- if not field in tracker.compute_node:
+ if field not in tracker.compute_node:
raise test.TestingException(
"'%(field)s' not in compute node." % locals())
x = tracker.compute_node[field]
@@ -714,11 +733,12 @@ class ResizeClaimTestCase(BaseTrackerTestCase):
# make an instance of src_type:
instance = self._fake_instance(memory_mb=1, root_gb=1, ephemeral_gb=0,
vcpus=1, instance_type_id=2)
-
+ instance['system_metadata'] = self._fake_instance_system_metadata(
+ dest_type)
self.tracker.instance_claim(self.context, instance, self.limits)
# resize to dest_type:
- claim = self.tracker.resize_claim(self.context, self.instance,
+ claim = self.tracker.resize_claim(self.context, instance,
dest_type, self.limits)
self._assert(3, 'memory_mb_used')
diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py
index 30d176bbd..5c9ce9e5f 100644
--- a/nova/tests/conductor/test_conductor.py
+++ b/nova/tests/conductor/test_conductor.py
@@ -17,6 +17,7 @@
import mox
from nova.compute import instance_types
+from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import conductor
from nova.conductor import api as conductor_api
@@ -26,9 +27,11 @@ from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception as exc
+from nova import notifications
from nova.openstack.common import jsonutils
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
+from nova import quota
from nova import test
@@ -333,16 +336,6 @@ class _BaseTestCase(object):
self.mox.ReplayAll()
self.conductor.instance_get_all_hung_in_rebooting(self.context, 123)
- def test_instance_get_active_by_window(self):
- self.mox.StubOutWithMock(db, 'instance_get_active_by_window_joined')
- db.instance_get_active_by_window(self.context, 'fake-begin',
- 'fake-end', 'fake-proj',
- 'fake-host')
- self.mox.ReplayAll()
- self.conductor.instance_get_active_by_window(self.context,
- 'fake-begin', 'fake-end',
- 'fake-proj', 'fake-host')
-
def test_instance_get_active_by_window_joined(self):
self.mox.StubOutWithMock(db, 'instance_get_active_by_window_joined')
db.instance_get_active_by_window_joined(self.context, 'fake-begin',
@@ -426,12 +419,131 @@ class _BaseTestCase(object):
'fake-values', False)
self.assertEqual(result, 'fake-result')
+ def test_instance_fault_create(self):
+ self.mox.StubOutWithMock(db, 'instance_fault_create')
+ db.instance_fault_create(self.context, 'fake-values').AndReturn(
+ 'fake-result')
+ self.mox.ReplayAll()
+ result = self.conductor.instance_fault_create(self.context,
+ 'fake-values')
+ self.assertEqual(result, 'fake-result')
+
+ def test_task_log_get(self):
+ self.mox.StubOutWithMock(db, 'task_log_get')
+ db.task_log_get(self.context, 'task', 'begin', 'end', 'host',
+ 'state').AndReturn('result')
+ self.mox.ReplayAll()
+ result = self.conductor.task_log_get(self.context, 'task', 'begin',
+ 'end', 'host', 'state')
+ self.assertEqual(result, 'result')
+
+ def test_task_log_get_with_no_state(self):
+ self.mox.StubOutWithMock(db, 'task_log_get')
+ db.task_log_get(self.context, 'task', 'begin', 'end',
+ 'host', None).AndReturn('result')
+ self.mox.ReplayAll()
+ result = self.conductor.task_log_get(self.context, 'task', 'begin',
+ 'end', 'host')
+ self.assertEqual(result, 'result')
+
+ def test_task_log_begin_task(self):
+ self.mox.StubOutWithMock(db, 'task_log_begin_task')
+ db.task_log_begin_task(self.context.elevated(), 'task', 'begin',
+ 'end', 'host', 'items',
+ 'message').AndReturn('result')
+ self.mox.ReplayAll()
+ result = self.conductor.task_log_begin_task(
+ self.context, 'task', 'begin', 'end', 'host', 'items', 'message')
+ self.assertEqual(result, 'result')
+
+ def test_task_log_end_task(self):
+ self.mox.StubOutWithMock(db, 'task_log_end_task')
+ db.task_log_end_task(self.context.elevated(), 'task', 'begin', 'end',
+ 'host', 'errors', 'message').AndReturn('result')
+ self.mox.ReplayAll()
+ result = self.conductor.task_log_end_task(
+ self.context, 'task', 'begin', 'end', 'host', 'errors', 'message')
+ self.assertEqual(result, 'result')
+
+ def test_notify_usage_exists(self):
+ info = {
+ 'audit_period_beginning': 'start',
+ 'audit_period_ending': 'end',
+ 'bandwidth': 'bw_usage',
+ 'image_meta': {},
+ 'extra': 'info',
+ }
+ instance = {
+ 'system_metadata': [],
+ }
+
+ self.mox.StubOutWithMock(notifications, 'audit_period_bounds')
+ self.mox.StubOutWithMock(notifications, 'bandwidth_usage')
+ self.mox.StubOutWithMock(compute_utils, 'notify_about_instance_usage')
+
+ notifications.audit_period_bounds(False).AndReturn(('start', 'end'))
+ notifications.bandwidth_usage(instance, 'start', True).AndReturn(
+ 'bw_usage')
+ compute_utils.notify_about_instance_usage(self.context, instance,
+ 'exists',
+ system_metadata={},
+ extra_usage_info=info)
+
+ self.mox.ReplayAll()
+
+ self.conductor.notify_usage_exists(self.context, instance,
+ system_metadata={},
+ extra_usage_info=dict(extra='info'))
+
+ def test_security_groups_trigger_members_refresh(self):
+ self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
+ 'trigger_members_refresh')
+ self.conductor_manager.security_group_api.trigger_members_refresh(
+ self.context, [1, 2, 3])
+ self.mox.ReplayAll()
+ self.conductor.security_groups_trigger_members_refresh(self.context,
+ [1, 2, 3])
+
+ def test_network_migrate_instance_start(self):
+ self.mox.StubOutWithMock(self.conductor_manager.network_api,
+ 'migrate_instance_start')
+ self.conductor_manager.network_api.migrate_instance_start(self.context,
+ 'instance',
+ 'migration')
+ self.mox.ReplayAll()
+ self.conductor.network_migrate_instance_start(self.context,
+ 'instance',
+ 'migration')
+
+ def test_network_migrate_instance_finish(self):
+ self.mox.StubOutWithMock(self.conductor_manager.network_api,
+ 'migrate_instance_finish')
+ self.conductor_manager.network_api.migrate_instance_finish(
+ self.context, 'instance', 'migration')
+ self.mox.ReplayAll()
+ self.conductor.network_migrate_instance_finish(self.context,
+ 'instance',
+ 'migration')
+
+ def test_quota_commit(self):
+ self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
+ quota.QUOTAS.commit(self.context, 'reservations')
+ self.mox.ReplayAll()
+ self.conductor.quota_commit(self.context, 'reservations')
+
+ def test_quota_commit(self):
+ self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
+ quota.QUOTAS.rollback(self.context, 'reservations')
+ self.mox.ReplayAll()
+ self.conductor.quota_rollback(self.context, 'reservations')
+
class ConductorTestCase(_BaseTestCase, test.TestCase):
"""Conductor Manager Tests."""
def setUp(self):
super(ConductorTestCase, self).setUp()
self.conductor = conductor_manager.ConductorManager()
+ self.conductor_manager = self.conductor
self.stub_out_client_exceptions()
def test_block_device_mapping_update_or_create(self):
@@ -544,6 +656,16 @@ class ConductorTestCase(_BaseTestCase, test.TestCase):
('host', 'binary'),
dict(host='host', binary='binary'))
+ def test_security_groups_trigger_handler(self):
+ self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
+ 'trigger_handler')
+ self.conductor_manager.security_group_api.trigger_handler('event',
+ self.context,
+ 'args')
+ self.mox.ReplayAll()
+ self.conductor.security_groups_trigger_handler(self.context,
+ 'event', ['args'])
+
class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor RPC API Tests."""
@@ -551,6 +673,7 @@ class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
super(ConductorRPCAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
+ self.conductor_manager = self.conductor_service.manager
self.conductor = conductor_rpcapi.ConductorAPI()
def test_block_device_mapping_update_or_create(self):
@@ -640,6 +763,16 @@ class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
dict(topic='compute', host='host'),
db_result_listified=True)
+ def test_security_groups_trigger_handler(self):
+ self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
+ 'trigger_handler')
+ self.conductor_manager.security_group_api.trigger_handler('event',
+ self.context,
+ 'arg')
+ self.mox.ReplayAll()
+ self.conductor.security_groups_trigger_handler(self.context,
+ 'event', ['arg'])
+
class ConductorAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor API Tests."""
@@ -648,6 +781,7 @@ class ConductorAPITestCase(_BaseTestCase, test.TestCase):
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_api.API()
+ self.conductor_manager = self.conductor_service.manager
self.db = None
def _do_update(self, instance_uuid, **updates):
@@ -789,12 +923,23 @@ class ConductorAPITestCase(_BaseTestCase, test.TestCase):
self.assertEqual(timeouts.count(10), 10)
self.assertTrue(None in timeouts)
+ def test_security_groups_trigger_handler(self):
+ self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
+ 'trigger_handler')
+ self.conductor_manager.security_group_api.trigger_handler('event',
+ self.context,
+ 'arg')
+ self.mox.ReplayAll()
+ self.conductor.security_groups_trigger_handler(self.context,
+ 'event', 'arg')
+
class ConductorLocalAPITestCase(ConductorAPITestCase):
"""Conductor LocalAPI Tests."""
def setUp(self):
super(ConductorLocalAPITestCase, self).setUp()
self.conductor = conductor_api.LocalAPI()
+ self.conductor_manager = self.conductor._manager._target
self.db = db
self.stub_out_client_exceptions()
diff --git a/nova/tests/conf_fixture.py b/nova/tests/conf_fixture.py
index 2f4d0ebb1..230f70a1b 100644
--- a/nova/tests/conf_fixture.py
+++ b/nova/tests/conf_fixture.py
@@ -30,8 +30,8 @@ CONF.import_opt('scheduler_driver', 'nova.scheduler.manager')
CONF.import_opt('fake_network', 'nova.network.manager')
CONF.import_opt('network_size', 'nova.network.manager')
CONF.import_opt('num_networks', 'nova.network.manager')
-CONF.import_opt('floating_ip_dns_manager', 'nova.network.manager')
-CONF.import_opt('instance_dns_manager', 'nova.network.manager')
+CONF.import_opt('floating_ip_dns_manager', 'nova.network.floating_ips')
+CONF.import_opt('instance_dns_manager', 'nova.network.floating_ips')
CONF.import_opt('policy_file', 'nova.policy')
CONF.import_opt('compute_driver', 'nova.virt.driver')
CONF.import_opt('api_paste_config', 'nova.wsgi')
diff --git a/nova/tests/fake_instance_actions.py b/nova/tests/fake_instance_actions.py
new file mode 100644
index 000000000..1667ac62d
--- /dev/null
+++ b/nova/tests/fake_instance_actions.py
@@ -0,0 +1,30 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2013 OpenStack LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import db
+
+
+def fake_action_event_start(*args):
+ pass
+
+
+def fake_action_event_finish(*args):
+ pass
+
+
+def stub_out_action_events(stubs):
+ stubs.Set(db, 'action_event_start', fake_action_event_start)
+ stubs.Set(db, 'action_event_finish', fake_action_event_finish)
diff --git a/nova/tests/fake_libvirt_utils.py b/nova/tests/fake_libvirt_utils.py
index b3d842468..285a4b7e3 100644
--- a/nova/tests/fake_libvirt_utils.py
+++ b/nova/tests/fake_libvirt_utils.py
@@ -144,3 +144,7 @@ def fetch_image(context, target, image_id, user_id, project_id):
def get_instance_path(instance):
# TODO(mikal): we should really just call the real one here
return os.path.join(CONF.instances_path, instance['name'])
+
+
+def pick_disk_driver_name(is_block_dev=False):
+ return "qemu"
diff --git a/nova/tests/fake_network.py b/nova/tests/fake_network.py
index b97999e7d..883466cd6 100644
--- a/nova/tests/fake_network.py
+++ b/nova/tests/fake_network.py
@@ -47,7 +47,7 @@ class FakeIptablesFirewallDriver(object):
class FakeVIFDriver(object):
- def __init__(self, **kwargs):
+ def __init__(self, *args, **kwargs):
pass
def setattr(self, key, val):
@@ -65,6 +65,9 @@ class FakeVIFDriver(object):
def plug(self, instance, vif):
pass
+ def unplug(self, instance, vif):
+ pass
+
class FakeModel(dict):
"""Represent a model from the db."""
diff --git a/nova/tests/fake_policy.py b/nova/tests/fake_policy.py
index dbf620196..92ce0815a 100644
--- a/nova/tests/fake_policy.py
+++ b/nova/tests/fake_policy.py
@@ -117,15 +117,21 @@ policy_data = """
"compute_extension:createserverext": "",
"compute_extension:deferred_delete": "",
"compute_extension:disk_config": "",
+ "compute_extension:evacuate": "",
"compute_extension:extended_server_attributes": "",
"compute_extension:extended_status": "",
+ "compute_extension:extended_availability_zone": "",
"compute_extension:fixed_ips": "",
"compute_extension:flavor_access": "",
"compute_extension:flavor_disabled": "",
"compute_extension:flavor_rxtx": "",
"compute_extension:flavor_swap": "",
"compute_extension:flavorextradata": "",
- "compute_extension:flavorextraspecs": "",
+ "compute_extension:flavorextraspecs:index": "",
+ "compute_extension:flavorextraspecs:show": "",
+ "compute_extension:flavorextraspecs:create": "is_admin:True",
+ "compute_extension:flavorextraspecs:update": "is_admin:True",
+ "compute_extension:flavorextraspecs:delete": "is_admin:True",
"compute_extension:flavormanage": "",
"compute_extension:floating_ip_dns": "",
"compute_extension:floating_ip_pools": "",
@@ -157,6 +163,10 @@ policy_data = """
"compute_extension:virtual_interfaces": "",
"compute_extension:virtual_storage_arrays": "",
"compute_extension:volumes": "",
+ "compute_extension:volume_attachments:index": "",
+ "compute_extension:volume_attachments:show": "",
+ "compute_extension:volume_attachments:create": "",
+ "compute_extension:volume_attachments:delete": "",
"compute_extension:volumetypes": "",
"compute_extension:zones": "",
"compute_extension:availability_zone:list": "",
diff --git a/nova/tests/fake_volume.py b/nova/tests/fake_volume.py
index f2aa3ea91..c7430ee6d 100644
--- a/nova/tests/fake_volume.py
+++ b/nova/tests/fake_volume.py
@@ -136,7 +136,6 @@ class API(object):
def create_with_kwargs(self, context, **kwargs):
volume_id = kwargs.get('volume_id', None)
- print volume_id
v = fake_volume(kwargs['size'],
kwargs['name'],
kwargs['description'],
@@ -145,7 +144,6 @@ class API(object):
None,
None,
None)
- print v.vol['id']
if kwargs.get('status', None) is not None:
v.vol['status'] = kwargs['status']
if kwargs['host'] is not None:
diff --git a/nova/tests/fakeguestfs.py b/nova/tests/fakeguestfs.py
index 33ca49c33..ff006db68 100644
--- a/nova/tests/fakeguestfs.py
+++ b/nova/tests/fakeguestfs.py
@@ -50,7 +50,7 @@ class GuestFS(object):
self.mounts.append((options, device, mntpoint))
def mkdir_p(self, path):
- if not path in self.files:
+ if path not in self.files:
self.files[path] = {
"isdir": True,
"gid": 100,
@@ -59,7 +59,7 @@ class GuestFS(object):
}
def read_file(self, path):
- if not path in self.files:
+ if path not in self.files:
self.files[path] = {
"isdir": False,
"content": "Hello World",
@@ -71,7 +71,7 @@ class GuestFS(object):
return self.files[path]["content"]
def write(self, path, content):
- if not path in self.files:
+ if path not in self.files:
self.files[path] = {
"isdir": False,
"content": "Hello World",
@@ -83,7 +83,7 @@ class GuestFS(object):
self.files[path]["content"] = content
def write_append(self, path, content):
- if not path in self.files:
+ if path not in self.files:
self.files[path] = {
"isdir": False,
"content": "Hello World",
@@ -95,13 +95,13 @@ class GuestFS(object):
self.files[path]["content"] = self.files[path]["content"] + content
def stat(self, path):
- if not path in self.files:
+ if path not in self.files:
raise RuntimeError("No such file: " + path)
return self.files[path]["mode"]
def chown(self, uid, gid, path):
- if not path in self.files:
+ if path not in self.files:
raise RuntimeError("No such file: " + path)
if uid != -1:
@@ -110,7 +110,7 @@ class GuestFS(object):
self.files[path]["gid"] = gid
def chmod(self, mode, path):
- if not path in self.files:
+ if path not in self.files:
raise RuntimeError("No such file: " + path)
self.files[path]["mode"] = mode
diff --git a/nova/tests/fakelibvirt.py b/nova/tests/fakelibvirt.py
index 259d192cb..6abe7771c 100644
--- a/nova/tests/fakelibvirt.py
+++ b/nova/tests/fakelibvirt.py
@@ -481,7 +481,7 @@ class DomainSnapshot(object):
class Connection(object):
- def __init__(self, uri, readonly):
+ def __init__(self, uri, readonly, version=9007):
if not uri or uri == '':
if allow_default_uri_connection:
uri = 'qemu:///session'
@@ -506,6 +506,8 @@ class Connection(object):
self._running_vms = {}
self._id_counter = 1 # libvirt reserves 0 for the hypervisor.
self._nwfilters = {}
+ self.fakeLibVersion = version
+ self.fakeVersion = version
def _add_filter(self, nwfilter):
self._nwfilters[nwfilter._name] = nwfilter
@@ -576,10 +578,10 @@ class Connection(object):
return 'QEMU'
def getLibVersion(self):
- return 9007
+ return self.fakeLibVersion
def getVersion(self):
- return 14000
+ return self.fakeVersion
def getHostname(self):
return 'compute1'
diff --git a/nova/tests/hyperv/README.rst b/nova/tests/hyperv/README.rst
deleted file mode 100644
index c7ba16046..000000000
--- a/nova/tests/hyperv/README.rst
+++ /dev/null
@@ -1,83 +0,0 @@
-=====================================
-OpenStack Hyper-V Nova Testing Architecture
-=====================================
-
-The Hyper-V Nova Compute plugin uses Windows Management Instrumentation (WMI)
-as the main API for hypervisor related operations.
-WMI has a database / procedural oriented nature that can become difficult to
-test with a traditional static mock / stub based unit testing approach.
-
-The included Hyper-V testing framework has been developed with the
-following goals:
-
-1) Dynamic mock generation.
-2) Decoupling. No dependencies on WMI or any other module.
- The tests are designed to work with mocked objects in all cases, including
- OS-dependent (e.g. wmi, os, subprocess) and non-deterministic
- (e.g. time, uuid) modules
-3) Transparency. Mocks and real objects can be swapped via DI
- or monkey patching.
-4) Platform independence.
-5) Tests need to be executed against the real object or against the mocks
- with a simple configuration switch. Development efforts can highly
- benefit from this feature.
-6) It must be possible to change a mock's behavior without running the tests
- against the hypervisor (e.g. by manually adding a value / return value).
-
-The tests included in this package include dynamically generated mock objects,
-based on the recording of the attribute values and invocations on the
-real WMI objects and other OS dependent features.
-The generated mock objects are serialized in the nova/tests/hyperv/stubs
-directory as gzipped pickled objects.
-
-An environment variable controls the execution mode of the tests.
-
-Recording mode:
-
-NOVA_GENERATE_TEST_MOCKS=True
-Tests are executed on the hypervisor (without mocks), and mock objects are
-generated.
-
-Replay mode:
-
-NOVA_GENERATE_TEST_MOCKS=
-Tests are executed with the existing mock objects (default).
-
-Mock generation is performed by nova.tests.hyperv.mockproxy.MockProxy.
-Instances of this class wrap objects that need to be mocked and act as a
-delegate on the wrapped object by leveraging Python's __getattr__ feature.
-Attribute values and method call return values are recorded at each access.
-Objects returned by attributes and method invocations are wrapped in a
-MockProxy consistently.
-From a caller perspective, the MockProxy is completely transparent,
-with the exception of calls to the type(...) builtin function.
-
-At the end of the test, a mock is generated by each MockProxy by calling
-the get_mock() method. A mock is represented by an instance of the
-nova.tests.hyperv.mockproxy.Mock class.
-
-The Mock class task consists of replicating the behaviour of the mocked
-objects / modules by returning the same values in the same order, for example:
-
-def check_path(path):
- if not os.path.exists(path):
- os.makedirs(path)
-
-check_path(path)
-# The second time os.path.exists returns True
-check_path(path)
-
-The injection of MockProxy / Mock instances is performed by the
-nova.tests.hyperv.basetestcase.BaseTestCase class in the setUp()
-method via selective monkey patching.
-Mocks are serialized in tearDown() during recording.
-
-The actual Hyper-V test case inherits from BaseTestCase:
-nova.tests.hyperv.test_hypervapi.HyperVAPITestCase
-
-
-Future directions:
-
-1) Replace the pickled files with a more generic serialization option (e.g. json)
-2) Add methods to statically extend the mocks (e.g. method call return values)
-3) Extend an existing framework, e.g. mox
diff --git a/nova/tests/hyperv/__init__.py b/nova/tests/hyperv/__init__.py
index e69de29bb..090fc0639 100644
--- a/nova/tests/hyperv/__init__.py
+++ b/nova/tests/hyperv/__init__.py
@@ -0,0 +1,16 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
diff --git a/nova/tests/hyperv/basetestcase.py b/nova/tests/hyperv/basetestcase.py
deleted file mode 100644
index c4f6cf95f..000000000
--- a/nova/tests/hyperv/basetestcase.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 Cloudbase Solutions Srl
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-TestCase for MockProxy based tests and related classes.
-"""
-
-import gzip
-import os
-import pickle
-import sys
-
-from nova import test
-from nova.tests.hyperv import mockproxy
-
-gen_test_mocks_key = 'NOVA_GENERATE_TEST_MOCKS'
-
-
-class BaseTestCase(test.TestCase):
- """TestCase for MockProxy based tests."""
-
- def run(self, result=None):
- self._currentResult = result
- super(BaseTestCase, self).run(result)
-
- def setUp(self):
- super(BaseTestCase, self).setUp()
- self._mps = {}
-
- def tearDown(self):
- super(BaseTestCase, self).tearDown()
-
- # python-subunit will wrap test results with a decorator.
- # Need to access the decorated member of results to get the
- # actual test result when using python-subunit.
- if hasattr(self._currentResult, 'decorated'):
- result = self._currentResult.decorated
- else:
- result = self._currentResult
- has_errors = len([test for (test, msgs) in result.errors
- if test.id() == self.id()]) > 0
- failed = len([test for (test, msgs) in result.failures
- if test.id() == self.id()]) > 0
-
- if not has_errors and not failed:
- self._save_mock_proxies()
-
- def _save_mock(self, name, mock):
- path = self._get_stub_file_path(self.id(), name)
- pickle.dump(mock, gzip.open(path, 'wb'))
-
- def _get_stub_file_path(self, test_name, mock_name):
- # test naming differs between platforms
- prefix = 'nova.tests.'
- if test_name.startswith(prefix):
- test_name = test_name[len(prefix):]
- file_name = '{0}_{1}.p.gz'.format(test_name, mock_name)
- return os.path.join(os.path.dirname(mockproxy.__file__),
- "stubs", file_name)
-
- def _load_mock(self, name):
- path = self._get_stub_file_path(self.id(), name)
- if os.path.exists(path):
- return pickle.load(gzip.open(path, 'rb'))
- else:
- return None
-
- def _load_mock_or_create_proxy(self, module_name):
- m = None
- if not gen_test_mocks_key in os.environ or \
- os.environ[gen_test_mocks_key].lower() \
- not in ['true', 'yes', '1']:
- m = self._load_mock(module_name)
- else:
- __import__(module_name)
- module = sys.modules[module_name]
- m = mockproxy.MockProxy(module)
- self._mps[module_name] = m
- return m
-
- def _inject_mocks_in_modules(self, objects_to_mock, modules_to_test):
- for module_name in objects_to_mock:
- mp = self._load_mock_or_create_proxy(module_name)
- for mt in modules_to_test:
- module_local_name = module_name.split('.')[-1]
- setattr(mt, module_local_name, mp)
-
- def _save_mock_proxies(self):
- for name, mp in self._mps.items():
- m = mp.get_mock()
- if m.has_values():
- self._save_mock(name, m)
diff --git a/nova/tests/hyperv/db_fakes.py b/nova/tests/hyperv/db_fakes.py
index 16d894df8..e384e909a 100644
--- a/nova/tests/hyperv/db_fakes.py
+++ b/nova/tests/hyperv/db_fakes.py
@@ -29,35 +29,35 @@ from nova import utils
def get_fake_instance_data(name, project_id, user_id):
return {'name': name,
- 'id': 1,
- 'uuid': str(uuid.uuid4()),
- 'project_id': project_id,
- 'user_id': user_id,
- 'image_ref': "1",
- 'kernel_id': "1",
- 'ramdisk_id': "1",
- 'mac_address': "de:ad:be:ef:be:ef",
- 'instance_type':
- {'name': 'm1.tiny',
- 'memory_mb': 512,
- 'vcpus': 1,
- 'root_gb': 0,
- 'flavorid': 1,
- 'rxtx_factor': 1}
- }
+ 'id': 1,
+ 'uuid': str(uuid.uuid4()),
+ 'project_id': project_id,
+ 'user_id': user_id,
+ 'image_ref': "1",
+ 'kernel_id': "1",
+ 'ramdisk_id': "1",
+ 'mac_address': "de:ad:be:ef:be:ef",
+ 'instance_type':
+ {'name': 'm1.tiny',
+ 'memory_mb': 512,
+ 'vcpus': 1,
+ 'root_gb': 0,
+ 'flavorid': 1,
+ 'rxtx_factor': 1}
+ }
def get_fake_image_data(project_id, user_id):
return {'name': 'image1',
- 'id': 1,
- 'project_id': project_id,
- 'user_id': user_id,
- 'image_ref': "1",
- 'kernel_id': "1",
- 'ramdisk_id': "1",
- 'mac_address': "de:ad:be:ef:be:ef",
- 'instance_type': 'm1.tiny',
- }
+ 'id': 1,
+ 'project_id': project_id,
+ 'user_id': user_id,
+ 'image_ref': "1",
+ 'kernel_id': "1",
+ 'ramdisk_id': "1",
+ 'mac_address': "de:ad:be:ef:be:ef",
+ 'instance_type': 'm1.tiny',
+ }
def get_fake_volume_info_data(target_portal, volume_id):
@@ -72,25 +72,25 @@ def get_fake_volume_info_data(target_portal, volume_id):
'auth_method': 'fake',
'auth_method': 'fake',
}
-}
+ }
def get_fake_block_device_info(target_portal, volume_id):
- return {
- 'block_device_mapping': [{'connection_info': {
- 'driver_volume_type': 'iscsi',
- 'data': {'target_lun': 1,
- 'volume_id': volume_id,
- 'target_iqn': 'iqn.2010-10.org.openstack:volume-' +
- volume_id,
- 'target_portal': target_portal,
- 'target_discovered': False}},
- 'mount_device': 'vda',
- 'delete_on_termination': False}],
+ return {'block_device_mapping': [{'connection_info': {
+ 'driver_volume_type': 'iscsi',
+ 'data': {'target_lun': 1,
+ 'volume_id': volume_id,
+ 'target_iqn':
+ 'iqn.2010-10.org.openstack:volume-' +
+ volume_id,
+ 'target_portal': target_portal,
+ 'target_discovered': False}},
+ 'mount_device': 'vda',
+ 'delete_on_termination': False}],
'root_device_name': None,
'ephemerals': [],
'swap': None
- }
+ }
def stub_out_db_instance_api(stubs):
@@ -99,11 +99,9 @@ def stub_out_db_instance_api(stubs):
INSTANCE_TYPES = {
'm1.tiny': dict(memory_mb=512, vcpus=1, root_gb=0, flavorid=1),
'm1.small': dict(memory_mb=2048, vcpus=1, root_gb=20, flavorid=2),
- 'm1.medium':
- dict(memory_mb=4096, vcpus=2, root_gb=40, flavorid=3),
+ 'm1.medium': dict(memory_mb=4096, vcpus=2, root_gb=40, flavorid=3),
'm1.large': dict(memory_mb=8192, vcpus=4, root_gb=80, flavorid=4),
- 'm1.xlarge':
- dict(memory_mb=16384, vcpus=8, root_gb=160, flavorid=5)}
+ 'm1.xlarge': dict(memory_mb=16384, vcpus=8, root_gb=160, flavorid=5)}
class FakeModel(object):
"""Stubs out for model."""
@@ -152,7 +150,7 @@ def stub_out_db_instance_api(stubs):
'vcpus': instance_type['vcpus'],
'mac_addresses': [{'address': values['mac_address']}],
'root_gb': instance_type['root_gb'],
- }
+ }
return FakeModel(base_options)
def fake_network_get_by_instance(context, instance_id):
@@ -181,4 +179,4 @@ def stub_out_db_instance_api(stubs):
stubs.Set(db, 'instance_type_get_all', fake_instance_type_get_all)
stubs.Set(db, 'instance_type_get_by_name', fake_instance_type_get_by_name)
stubs.Set(db, 'block_device_mapping_get_all_by_instance',
- fake_block_device_mapping_get_all_by_instance)
+ fake_block_device_mapping_get_all_by_instance)
diff --git a/nova/tests/hyperv/fake.py b/nova/tests/hyperv/fake.py
new file mode 100644
index 000000000..9890a5462
--- /dev/null
+++ b/nova/tests/hyperv/fake.py
@@ -0,0 +1,46 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import io
+import os
+
+
+class PathUtils(object):
+ def open(self, path, mode):
+ return io.BytesIO(b'fake content')
+
+ def get_instances_path(self):
+ return 'C:\\FakePath\\'
+
+ def get_instance_path(self, instance_name):
+ return os.path.join(self.get_instances_path(), instance_name)
+
+ def get_vhd_path(self, instance_name):
+ instance_path = self.get_instance_path(instance_name)
+ return os.path.join(instance_path, instance_name + ".vhd")
+
+ def get_base_vhd_path(self, image_name):
+ base_dir = os.path.join(self.get_instances_path(), '_base')
+ return os.path.join(base_dir, image_name + ".vhd")
+
+ def make_export_path(self, instance_name):
+ export_folder = os.path.join(self.get_instances_path(), "export",
+ instance_name)
+ return export_folder
+
+ def vhd_exists(self, path):
+ return False
diff --git a/nova/tests/hyperv/hypervutils.py b/nova/tests/hyperv/hypervutils.py
deleted file mode 100644
index b71e60229..000000000
--- a/nova/tests/hyperv/hypervutils.py
+++ /dev/null
@@ -1,262 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 Cloudbase Solutions Srl
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Hyper-V classes to be used in testing.
-"""
-
-import sys
-import time
-
-from nova import exception
-from nova.virt.hyperv import constants
-from nova.virt.hyperv import volumeutilsV2
-from xml.etree import ElementTree
-
-# Check needed for unit testing on Unix
-if sys.platform == 'win32':
- import wmi
-
-
-class HyperVUtils(object):
- def __init__(self):
- self.__conn = None
- self.__conn_v2 = None
- self.__conn_cimv2 = None
- self.__conn_wmi = None
- self.__conn_storage = None
- self._volumeutils = volumeutilsV2.VolumeUtilsV2(
- self._conn_storage, self._conn_wmi)
-
- @property
- def _conn(self):
- if self.__conn is None:
- self.__conn = wmi.WMI(moniker='//./root/virtualization')
- return self.__conn
-
- @property
- def _conn_v2(self):
- if self.__conn_v2 is None:
- self.__conn_v2 = wmi.WMI(moniker='//./root/virtualization/v2')
- return self.__conn_v2
-
- @property
- def _conn_cimv2(self):
- if self.__conn_cimv2 is None:
- self.__conn_cimv2 = wmi.WMI(moniker='//./root/cimv2')
- return self.__conn_cimv2
-
- @property
- def _conn_wmi(self):
- if self.__conn_wmi is None:
- self.__conn_wmi = wmi.WMI(moniker='//./root/wmi')
- return self.__conn_wmi
-
- @property
- def _conn_storage(self):
- if self.__conn_storage is None:
- storage_namespace = '//./Root/Microsoft/Windows/Storage'
- self.__conn_storage = wmi.WMI(moniker=storage_namespace)
- return self.__conn_storage
-
- def create_vhd(self, path):
- image_service = self._conn.query(
- "Select * from Msvm_ImageManagementService")[0]
- (job, ret_val) = image_service.CreateDynamicVirtualHardDisk(
- Path=path, MaxInternalSize=3 * 1024 * 1024)
-
- if ret_val == constants.WMI_JOB_STATUS_STARTED:
- success = self._check_job_status(job)
- else:
- success = (ret_val == 0)
- if not success:
- raise Exception('Failed to create Dynamic disk %s with error %d'
- % (path, ret_val))
-
- def _check_job_status(self, jobpath):
- """Poll WMI job state for completion."""
- job_wmi_path = jobpath.replace('\\', '/')
- job = wmi.WMI(moniker=job_wmi_path)
-
- while job.JobState == constants.WMI_JOB_STATE_RUNNING:
- time.sleep(0.1)
- job = wmi.WMI(moniker=job_wmi_path)
- return job.JobState == constants.WMI_JOB_STATE_COMPLETED
-
- def _get_vm(self, vm_name, conn=None):
- if conn is None:
- conn = self._conn
- vml = conn.Msvm_ComputerSystem(ElementName=vm_name)
- if not len(vml):
- raise exception.InstanceNotFound(instance=vm_name)
- return vml[0]
-
- def remote_vm_exists(self, server, vm_name):
- conn = wmi.WMI(moniker='//' + server + '/root/virtualization')
- return self._vm_exists(conn, vm_name)
-
- def vm_exists(self, vm_name):
- return self._vm_exists(self._conn, vm_name)
-
- def _vm_exists(self, conn, vm_name):
- return len(conn.Msvm_ComputerSystem(ElementName=vm_name)) > 0
-
- def _get_vm_summary(self, vm_name):
- vm = self._get_vm(vm_name)
- vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
- vmsettings = vm.associators(
- wmi_association_class='Msvm_SettingsDefineState',
- wmi_result_class='Msvm_VirtualSystemSettingData')
- settings_paths = [v.path_() for v in vmsettings]
- return vs_man_svc.GetSummaryInformation([100, 105],
- settings_paths)[1][0]
-
- def get_vm_uptime(self, vm_name):
- return self._get_vm_summary(vm_name).UpTime
-
- def get_vm_state(self, vm_name):
- return self._get_vm_summary(vm_name).EnabledState
-
- def set_vm_state(self, vm_name, req_state):
- self._set_vm_state(self._conn, vm_name, req_state)
-
- def _set_vm_state(self, conn, vm_name, req_state):
- vm = self._get_vm(vm_name, conn)
- (job, ret_val) = vm.RequestStateChange(req_state)
-
- success = False
- if ret_val == constants.WMI_JOB_STATUS_STARTED:
- success = self._check_job_status(job)
- elif ret_val == 0:
- success = True
- elif ret_val == 32775:
- #Invalid state for current operation. Typically means it is
- #already in the state requested
- success = True
- if not success:
- raise Exception(_("Failed to change vm state of %(vm_name)s"
- " to %(req_state)s") % locals())
-
- def get_vm_disks(self, vm_name):
- return self._get_vm_disks(self._conn, vm_name)
-
- def _get_vm_disks(self, conn, vm_name):
- vm = self._get_vm(vm_name, conn)
- vmsettings = vm.associators(
- wmi_result_class='Msvm_VirtualSystemSettingData')
- rasds = vmsettings[0].associators(
- wmi_result_class='MSVM_ResourceAllocationSettingData')
-
- disks = [r for r in rasds
- if r.ResourceSubType == 'Microsoft Virtual Hard Disk']
- disk_files = []
- for disk in disks:
- disk_files.extend([c for c in disk.Connection])
-
- volumes = [r for r in rasds
- if r.ResourceSubType == 'Microsoft Physical Disk Drive']
- volume_drives = []
- for volume in volumes:
- hostResources = volume.HostResource
- drive_path = hostResources[0]
- volume_drives.append(drive_path)
-
- dvds = [r for r in rasds
- if r.ResourceSubType == 'Microsoft Virtual CD/DVD Disk']
- dvd_files = []
- for dvd in dvds:
- dvd_files.extend([c for c in dvd.Connection])
-
- return (disk_files, volume_drives, dvd_files)
-
- def remove_remote_vm(self, server, vm_name):
- conn = wmi.WMI(moniker='//' + server + '/root/virtualization')
- conn_cimv2 = wmi.WMI(moniker='//' + server + '/root/cimv2')
- self._remove_vm(vm_name, conn, conn_cimv2)
-
- def remove_vm(self, vm_name):
- self._remove_vm(vm_name, self._conn, self._conn_cimv2)
-
- def _remove_vm(self, vm_name, conn, conn_cimv2):
- vm = self._get_vm(vm_name, conn)
- vs_man_svc = conn.Msvm_VirtualSystemManagementService()[0]
- #Stop the VM first.
- self._set_vm_state(conn, vm_name, 3)
-
- (disk_files, volume_drives, dvd_files) = self._get_vm_disks(conn,
- vm_name)
-
- (job, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_())
- if ret_val == constants.WMI_JOB_STATUS_STARTED:
- success = self._check_job_status(job)
- elif ret_val == 0:
- success = True
- if not success:
- raise Exception(_('Failed to destroy vm %s') % vm_name)
-
- #Delete associated vhd disk files.
- for disk in disk_files + dvd_files:
- vhd_file = conn_cimv2.query(
- "Select * from CIM_DataFile where Name = '" +
- disk.replace("'", "''") + "'")[0]
- vhd_file.Delete()
-
- def _get_target_iqn(self, volume_id):
- return 'iqn.2010-10.org.openstack:volume-' + volume_id
-
- def logout_iscsi_volume_sessions(self, volume_id):
- target_iqn = self._get_target_iqn(volume_id)
- if (self.iscsi_volume_sessions_exist(volume_id)):
- self._volumeutils.logout_storage_target(target_iqn)
-
- def iscsi_volume_sessions_exist(self, volume_id):
- target_iqn = self._get_target_iqn(volume_id)
- return len(self._conn_wmi.query(
- "SELECT * FROM MSiSCSIInitiator_SessionClass \
- WHERE TargetName='" + target_iqn + "'")) > 0
-
- def get_vm_count(self):
- return len(self._conn.query(
- "Select * from Msvm_ComputerSystem where Description "
- "<> 'Microsoft Hosting Computer System'"))
-
- def get_vm_snapshots_count(self, vm_name):
- return len(self._conn.query(
- "Select * from Msvm_VirtualSystemSettingData where \
- SettingType = 5 and SystemName = '" + vm_name + "'"))
-
- def get_vhd_parent_path(self, vhd_path):
-
- image_man_svc = self._conn.Msvm_ImageManagementService()[0]
-
- (vhd_info, job_path, ret_val) = \
- image_man_svc.GetVirtualHardDiskInfo(vhd_path)
- if ret_val == constants.WMI_JOB_STATUS_STARTED:
- success = self._check_job_status(job_path)
- else:
- success = (ret_val == 0)
- if not success:
- raise Exception(_("Failed to get info for disk %s") %
- (vhd_path))
-
- base_disk_path = None
- et = ElementTree.fromstring(vhd_info)
- for item in et.findall("PROPERTY"):
- if item.attrib["NAME"] == "ParentPath":
- base_disk_path = item.find("VALUE").text
- break
-
- return base_disk_path
diff --git a/nova/tests/hyperv/mockproxy.py b/nova/tests/hyperv/mockproxy.py
deleted file mode 100644
index 513422c13..000000000
--- a/nova/tests/hyperv/mockproxy.py
+++ /dev/null
@@ -1,272 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 Cloudbase Solutions Srl
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-
-"""
-Classes for dynamic generation of mock objects.
-"""
-
-import inspect
-
-
-def serialize_obj(obj):
- if isinstance(obj, float):
- val = str(round(obj, 10))
- elif isinstance(obj, dict):
- d = {}
- for k1, v1 in obj.items():
- d[k1] = serialize_obj(v1)
- val = str(d)
- elif isinstance(obj, list):
- l1 = []
- for i1 in obj:
- l1.append(serialize_obj(i1))
- val = str(l1)
- elif isinstance(obj, tuple):
- l1 = ()
- for i1 in obj:
- l1 = l1 + (serialize_obj(i1),)
- val = str(l1)
- else:
- if isinstance(obj, str) or isinstance(obj, unicode):
- val = obj
- elif hasattr(obj, '__str__') and inspect.ismethod(obj.__str__):
- val = str(obj)
- else:
- val = str(type(obj))
- return val
-
-
-def serialize_args(*args, **kwargs):
- """Workaround for float string conversion issues in Python 2.6."""
- return serialize_obj((args, kwargs))
-
-
-class MockException(Exception):
- def __init__(self, message):
- super(MockException, self).__init__(message)
-
-
-class Mock(object):
- def _get_next_value(self, name):
- c = self._access_count.get(name)
- if c is None:
- c = 0
- else:
- c = c + 1
- self._access_count[name] = c
-
- try:
- value = self._values[name][c]
- except IndexError as ex:
- raise MockException(_('Couldn\'t find invocation num. %(c)d '
- 'of attribute "%(name)s"') % locals())
- return value
-
- def _get_next_ret_value(self, name, params):
- d = self._access_count.get(name)
- if d is None:
- d = {}
- self._access_count[name] = d
- c = d.get(params)
- if c is None:
- c = 0
- else:
- c = c + 1
- d[params] = c
-
- try:
- m = self._values[name]
- except KeyError as ex:
- raise MockException(_('Couldn\'t find attribute "%s"') % (name))
-
- try:
- value = m[params][c]
- except KeyError as ex:
- raise MockException(_('Couldn\'t find attribute "%(name)s" '
- 'with arguments "%(params)s"') % locals())
- except IndexError as ex:
- raise MockException(_('Couldn\'t find invocation num. %(c)d '
- 'of attribute "%(name)s" with arguments "%(params)s"')
- % locals())
-
- return value
-
- def __init__(self, values):
- self._values = values
- self._access_count = {}
-
- def has_values(self):
- return len(self._values) > 0
-
- def __getattr__(self, name):
- if name.startswith('__') and name.endswith('__'):
- return object.__getattribute__(self, name)
- else:
- try:
- isdict = isinstance(self._values[name], dict)
- except KeyError as ex:
- raise MockException(_('Couldn\'t find attribute "%s"')
- % (name))
-
- if isdict:
- def newfunc(*args, **kwargs):
- params = serialize_args(args, kwargs)
- return self._get_next_ret_value(name, params)
- return newfunc
- else:
- return self._get_next_value(name)
-
- def __str__(self):
- return self._get_next_value('__str__')
-
- def __iter__(self):
- return getattr(self._get_next_value('__iter__'), '__iter__')()
-
- def __len__(self):
- return self._get_next_value('__len__')
-
- def __getitem__(self, key):
- return self._get_next_ret_value('__getitem__', str(key))
-
- def __call__(self, *args, **kwargs):
- params = serialize_args(args, kwargs)
- return self._get_next_ret_value('__call__', params)
-
-
-class MockProxy(object):
- def __init__(self, wrapped):
- self._wrapped = wrapped
- self._recorded_values = {}
-
- def _get_proxy_object(self, obj):
- if hasattr(obj, '__dict__') or isinstance(obj, tuple) or \
- isinstance(obj, list) or isinstance(obj, dict):
- p = MockProxy(obj)
- else:
- p = obj
- return p
-
- def __getattr__(self, name):
- if name in ['_wrapped']:
- return object.__getattribute__(self, name)
- else:
- attr = getattr(self._wrapped, name)
- if inspect.isfunction(attr) or inspect.ismethod(attr) or \
- inspect.isbuiltin(attr):
- def newfunc(*args, **kwargs):
- result = attr(*args, **kwargs)
- p = self._get_proxy_object(result)
- params = serialize_args(args, kwargs)
- self._add_recorded_ret_value(name, params, p)
- return p
- return newfunc
- elif hasattr(attr, '__dict__') or (hasattr(attr, '__getitem__')
- and not (isinstance(attr, str) or isinstance(attr, unicode))):
- p = MockProxy(attr)
- else:
- p = attr
- self._add_recorded_value(name, p)
- return p
-
- def __setattr__(self, name, value):
- if name in ['_wrapped', '_recorded_values']:
- object.__setattr__(self, name, value)
- else:
- setattr(self._wrapped, name, value)
-
- def _add_recorded_ret_value(self, name, params, val):
- d = self._recorded_values.get(name)
- if d is None:
- d = {}
- self._recorded_values[name] = d
- l = d.get(params)
- if l is None:
- l = []
- d[params] = l
- l.append(val)
-
- def _add_recorded_value(self, name, val):
- if not name in self._recorded_values:
- self._recorded_values[name] = []
- self._recorded_values[name].append(val)
-
- def get_mock(self):
- values = {}
- for k, v in self._recorded_values.items():
- if isinstance(v, dict):
- d = {}
- values[k] = d
- for k1, v1 in v.items():
- l = []
- d[k1] = l
- for i1 in v1:
- if isinstance(i1, MockProxy):
- l.append(i1.get_mock())
- else:
- l.append(i1)
- else:
- l = []
- values[k] = l
- for i in v:
- if isinstance(i, MockProxy):
- l.append(i.get_mock())
- elif isinstance(i, dict):
- d = {}
- for k1, v1 in v.items():
- if isinstance(v1, MockProxy):
- d[k1] = v1.get_mock()
- else:
- d[k1] = v1
- l.append(d)
- elif isinstance(i, list):
- l1 = []
- for i1 in i:
- if isinstance(i1, MockProxy):
- l1.append(i1.get_mock())
- else:
- l1.append(i1)
- l.append(l1)
- else:
- l.append(i)
- return Mock(values)
-
- def __str__(self):
- s = str(self._wrapped)
- self._add_recorded_value('__str__', s)
- return s
-
- def __len__(self):
- l = len(self._wrapped)
- self._add_recorded_value('__len__', l)
- return l
-
- def __iter__(self):
- it = []
- for i in self._wrapped:
- it.append(self._get_proxy_object(i))
- self._add_recorded_value('__iter__', it)
- return iter(it)
-
- def __getitem__(self, key):
- p = self._get_proxy_object(self._wrapped[key])
- self._add_recorded_ret_value('__getitem__', str(key), p)
- return p
-
- def __call__(self, *args, **kwargs):
- c = self._wrapped(*args, **kwargs)
- p = self._get_proxy_object(c)
- params = serialize_args(args, kwargs)
- self._add_recorded_ret_value('__call__', params, p)
- return p
diff --git a/nova/tests/hyperv/stubs/README.rst b/nova/tests/hyperv/stubs/README.rst
deleted file mode 100644
index 150fd3ad1..000000000
--- a/nova/tests/hyperv/stubs/README.rst
+++ /dev/null
@@ -1,2 +0,0 @@
-Files with extension p.gz are compressed pickle files containing serialized
-mocks used during unit testing
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_os.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_os.p.gz
deleted file mode 100644
index c65832c57..000000000
--- a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_subprocess.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_subprocess.p.gz
deleted file mode 100644
index 7076c4868..000000000
--- a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_subprocess.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_time.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_time.p.gz
deleted file mode 100644
index c251f9d6c..000000000
--- a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_uuid.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_uuid.p.gz
deleted file mode 100644
index cac08e3d0..000000000
--- a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_os.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_os.p.gz
deleted file mode 100644
index d6e624bb0..000000000
--- a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_subprocess.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_subprocess.p.gz
deleted file mode 100644
index bb18f7453..000000000
--- a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_subprocess.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_time.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_time.p.gz
deleted file mode 100644
index a5f592a74..000000000
--- a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_uuid.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_uuid.p.gz
deleted file mode 100644
index 4bebe0e72..000000000
--- a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_wmi.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_wmi.p.gz
deleted file mode 100644
index 29a610f36..000000000
--- a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_wmi.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_wmi.p.gz
deleted file mode 100644
index ca92ece00..000000000
--- a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_attach_volume_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_boot_from_volume_subprocess.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_boot_from_volume_subprocess.p.gz
deleted file mode 100644
index 58269455d..000000000
--- a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_boot_from_volume_subprocess.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_boot_from_volume_time.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_boot_from_volume_time.p.gz
deleted file mode 100644
index 97cd7e62b..000000000
--- a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_boot_from_volume_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_boot_from_volume_uuid.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_boot_from_volume_uuid.p.gz
deleted file mode 100644
index 708197430..000000000
--- a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_boot_from_volume_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_boot_from_volume_wmi.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_boot_from_volume_wmi.p.gz
deleted file mode 100644
index d5eb4d746..000000000
--- a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_boot_from_volume_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_os.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_os.p.gz
deleted file mode 100644
index d8c63d8ad..000000000
--- a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_subprocess.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_subprocess.p.gz
deleted file mode 100644
index d0b27d201..000000000
--- a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_subprocess.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_time.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_time.p.gz
deleted file mode 100644
index 657379cec..000000000
--- a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_uuid.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_uuid.p.gz
deleted file mode 100644
index 8bf58ef5c..000000000
--- a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_wmi.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_wmi.p.gz
deleted file mode 100644
index c20281811..000000000
--- a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_detach_volume_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_os.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_os.p.gz
deleted file mode 100644
index a198af844..000000000
--- a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_time.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_time.p.gz
deleted file mode 100644
index 749eabe40..000000000
--- a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_uuid.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_uuid.p.gz
deleted file mode 100644
index c40e6f995..000000000
--- a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_os.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_os.p.gz
deleted file mode 100644
index c67dc9271..000000000
--- a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_time.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_time.p.gz
deleted file mode 100644
index 0d671fc18..000000000
--- a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_uuid.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_uuid.p.gz
deleted file mode 100644
index 66583beb1..000000000
--- a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_wmi.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_wmi.p.gz
deleted file mode 100644
index efdef819f..000000000
--- a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_wmi.p.gz b/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_wmi.p.gz
deleted file mode 100644
index 5edd6f147..000000000
--- a/nova/tests/hyperv/stubs/nova.tests.test_hypervapi.HyperVAPITestCase.test_live_migration_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_nova.utils.p.gz
deleted file mode 100644
index f968e2af5..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_nova.utils.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_nova.virt.configdrive.p.gz
deleted file mode 100644
index bd5ced9f8..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_nova.virt.configdrive.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_os.p.gz
deleted file mode 100644
index a48a21ca9..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_shutil.p.gz
deleted file mode 100644
index c662b602a..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_subprocess.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_subprocess.p.gz
deleted file mode 100644
index 6a692b3d8..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_subprocess.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_time.p.gz
deleted file mode 100644
index f2ae56be1..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_uuid.p.gz
deleted file mode 100644
index 2d24523aa..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_nova.utils.p.gz
deleted file mode 100644
index aca0d6f0c..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_nova.utils.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_nova.virt.configdrive.p.gz
deleted file mode 100644
index bbeec53df..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_nova.virt.configdrive.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_os.p.gz
deleted file mode 100644
index 3bf9bd13a..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_shutil.p.gz
deleted file mode 100644
index 62e3fa329..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_subprocess.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_subprocess.p.gz
deleted file mode 100644
index 36970348a..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_subprocess.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_time.p.gz
deleted file mode 100644
index 8db997abf..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_uuid.p.gz
deleted file mode 100644
index 73f90ac2b..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_wmi.p.gz
deleted file mode 100644
index 3ae9a6f46..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_with_target_connection_failure_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_wmi.p.gz
deleted file mode 100644
index 5b851f9b7..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_attach_volume_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_nova.utils.p.gz
deleted file mode 100644
index 7a1c47449..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_nova.utils.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_nova.virt.configdrive.p.gz
deleted file mode 100644
index 48583265e..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_nova.virt.configdrive.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_shutil.p.gz
deleted file mode 100644
index 90d6a2ca6..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_subprocess.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_subprocess.p.gz
deleted file mode 100644
index 3b17cc74f..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_subprocess.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_time.p.gz
deleted file mode 100644
index 162f52457..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_uuid.p.gz
deleted file mode 100644
index f88f8bc86..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_wmi.p.gz
deleted file mode 100644
index f671dc247..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_boot_from_volume_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_nova.utils.p.gz
deleted file mode 100644
index 37892d051..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_nova.utils.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_nova.virt.configdrive.p.gz
deleted file mode 100644
index 9aec45796..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_nova.virt.configdrive.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_os.p.gz
deleted file mode 100644
index ffc21536e..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_shutil.p.gz
deleted file mode 100644
index b47c49202..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_time.p.gz
deleted file mode 100644
index 78e4292b6..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_uuid.p.gz
deleted file mode 100644
index 5bc7602a8..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_wmi.p.gz
deleted file mode 100644
index 9ba025e55..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_destroy_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_nova.utils.p.gz
deleted file mode 100644
index 3341bca28..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_nova.utils.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_nova.virt.configdrive.p.gz
deleted file mode 100644
index 56cb9d103..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_nova.virt.configdrive.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_os.p.gz
deleted file mode 100644
index 81205e04d..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_shutil.p.gz
deleted file mode 100644
index 9d1311341..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_subprocess.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_subprocess.p.gz
deleted file mode 100644
index a151a99b4..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_subprocess.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_time.p.gz
deleted file mode 100644
index b1d0b0f3a..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_uuid.p.gz
deleted file mode 100644
index c2985c424..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_wmi.p.gz
deleted file mode 100644
index 2c4901c9f..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_detach_volume_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_ctypes.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_ctypes.p.gz
deleted file mode 100644
index 2481a7b3e..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_ctypes.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_multiprocessing.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_multiprocessing.p.gz
deleted file mode 100644
index 61cbc1854..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_multiprocessing.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_os.p.gz
deleted file mode 100644
index 09b86b24e..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_shutil.p.gz
deleted file mode 100644
index ba89bfd7e..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_wmi.p.gz
deleted file mode 100644
index cfce8c10a..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_available_resource_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_os.p.gz
deleted file mode 100644
index 6092f36ab..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_shutil.p.gz
deleted file mode 100644
index 010c07e56..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_wmi.p.gz
deleted file mode 100644
index 9d3adec48..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_host_stats_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_nova.utils.p.gz
deleted file mode 100644
index 995dde1b5..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_nova.utils.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_nova.virt.configdrive.p.gz
deleted file mode 100644
index 12d18d12e..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_nova.virt.configdrive.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_os.p.gz
deleted file mode 100644
index 64c756ffa..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_shutil.p.gz
deleted file mode 100644
index d2cefdc37..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_time.p.gz
deleted file mode 100644
index 9fdef3b90..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_uuid.p.gz
deleted file mode 100644
index c34d2308b..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_wmi.p.gz
deleted file mode 100644
index 36a342e7c..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_get_info_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_detail_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_detail_shutil.p.gz
deleted file mode 100644
index 3ab35a29f..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_detail_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_detail_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_detail_wmi.p.gz
deleted file mode 100644
index 411c0ed07..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_detail_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_shutil.p.gz
deleted file mode 100644
index 1af20acde..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_wmi.p.gz
deleted file mode 100644
index d84122d77..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_list_instances_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_nova.utils.p.gz
deleted file mode 100644
index d650f40a5..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_nova.utils.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_nova.virt.configdrive.p.gz
deleted file mode 100644
index a03d442a4..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_nova.virt.configdrive.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_os.p.gz
deleted file mode 100644
index 993d9bb2d..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_shutil.p.gz
deleted file mode 100644
index 6693c2ce9..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_time.p.gz
deleted file mode 100644
index 07898dd55..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_uuid.p.gz
deleted file mode 100644
index 56e583449..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_nova.utils.p.gz
deleted file mode 100644
index 5d4c0e111..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_nova.utils.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_nova.virt.configdrive.p.gz
deleted file mode 100644
index cb52cb974..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_nova.virt.configdrive.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_os.p.gz
deleted file mode 100644
index 8b2ff15f3..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_shutil.p.gz
deleted file mode 100644
index aee1fb14d..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_time.p.gz
deleted file mode 100644
index f926d206f..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_uuid.p.gz
deleted file mode 100644
index 483b23d53..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_wmi.p.gz
deleted file mode 100644
index 14d61039f..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_with_target_failure_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_wmi.p.gz
deleted file mode 100644
index daecf0156..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_live_migration_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_nova.utils.p.gz
deleted file mode 100644
index 548b88148..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_nova.utils.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_nova.virt.configdrive.p.gz
deleted file mode 100644
index 8545a1833..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_nova.virt.configdrive.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_os.p.gz
deleted file mode 100644
index c1daf3db9..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_shutil.p.gz
deleted file mode 100644
index 750d68d29..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_time.p.gz
deleted file mode 100644
index 6e91b72a2..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_uuid.p.gz
deleted file mode 100644
index 2d0349d96..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_wmi.p.gz
deleted file mode 100644
index 6b9ef360a..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_already_paused_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_nova.utils.p.gz
deleted file mode 100644
index 3e582226f..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_nova.utils.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_nova.virt.configdrive.p.gz
deleted file mode 100644
index 723966011..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_nova.virt.configdrive.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_os.p.gz
deleted file mode 100644
index 29b73888b..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_shutil.p.gz
deleted file mode 100644
index 595124af2..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_time.p.gz
deleted file mode 100644
index 03d53be74..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_uuid.p.gz
deleted file mode 100644
index 2a0663e6f..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_wmi.p.gz
deleted file mode 100644
index e651c02fc..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pause_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_nova.utils.p.gz
deleted file mode 100644
index a50935649..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_nova.utils.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_nova.virt.configdrive.p.gz
deleted file mode 100644
index 4b07271c1..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_nova.virt.configdrive.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_os.p.gz
deleted file mode 100644
index f62298ed7..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_shutil.p.gz
deleted file mode 100644
index 12a164f23..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_time.p.gz
deleted file mode 100644
index 33f1862e6..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_uuid.p.gz
deleted file mode 100644
index 80853eea4..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_wmi.p.gz
deleted file mode 100644
index 5cebe527d..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_already_powered_off_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_nova.utils.p.gz
deleted file mode 100644
index d0c431b9d..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_nova.utils.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_nova.virt.configdrive.p.gz
deleted file mode 100644
index d231f803d..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_nova.virt.configdrive.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_os.p.gz
deleted file mode 100644
index 25fe5f3ff..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_shutil.p.gz
deleted file mode 100644
index 8be80ba56..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_time.p.gz
deleted file mode 100644
index 51b6f2df8..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_uuid.p.gz
deleted file mode 100644
index 97812405e..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_wmi.p.gz
deleted file mode 100644
index 20b2e021e..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_off_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_nova.utils.p.gz
deleted file mode 100644
index c32f9ecd2..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_nova.utils.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_nova.virt.configdrive.p.gz
deleted file mode 100644
index 672376a0e..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_nova.virt.configdrive.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_os.p.gz
deleted file mode 100644
index aa6f4ca8a..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_shutil.p.gz
deleted file mode 100644
index 00f5770a7..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_time.p.gz
deleted file mode 100644
index 1631f35df..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_uuid.p.gz
deleted file mode 100644
index ec28756ad..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_wmi.p.gz
deleted file mode 100644
index 699ccde76..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_already_running_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_nova.utils.p.gz
deleted file mode 100644
index 2b99fb9cd..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_nova.utils.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_nova.virt.configdrive.p.gz
deleted file mode 100644
index a43bfeb7e..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_nova.virt.configdrive.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_os.p.gz
deleted file mode 100644
index 57e74e618..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_shutil.p.gz
deleted file mode 100644
index 273364d95..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_time.p.gz
deleted file mode 100644
index 732a0f2e6..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_uuid.p.gz
deleted file mode 100644
index d6cb32559..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_wmi.p.gz
deleted file mode 100644
index e44197039..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_power_on_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_nova.utils.p.gz
deleted file mode 100644
index 456af2816..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_nova.utils.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_os.p.gz
deleted file mode 100644
index 93568dcef..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_shutil.p.gz
deleted file mode 100644
index 6a4b90850..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_time.p.gz
deleted file mode 100644
index fc816320f..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_uuid.p.gz
deleted file mode 100644
index 83cf9c071..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_wmi.p.gz
deleted file mode 100644
index 93977743f..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_cow_image_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_nova.utils.p.gz
deleted file mode 100644
index f58f80a79..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_nova.utils.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_shutil.p.gz
deleted file mode 100644
index 18a8aed13..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_uuid.p.gz
deleted file mode 100644
index 4225a72b0..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_wmi.p.gz
deleted file mode 100644
index 363c431d4..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_pre_live_migration_no_cow_image_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_nova.utils.p.gz
deleted file mode 100644
index 8761703dc..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_nova.utils.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_nova.virt.configdrive.p.gz
deleted file mode 100644
index fc907ed31..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_nova.virt.configdrive.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_os.p.gz
deleted file mode 100644
index 0eca8e6ce..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_shutil.p.gz
deleted file mode 100644
index 0886c942d..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_time.p.gz
deleted file mode 100644
index d0fb77bd1..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_uuid.p.gz
deleted file mode 100644
index df3961276..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_wmi.p.gz
deleted file mode 100644
index 4df451154..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_reboot_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_nova.utils.p.gz
deleted file mode 100644
index 59724b43d..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_nova.utils.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_nova.virt.configdrive.p.gz
deleted file mode 100644
index 4b3711ec0..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_nova.virt.configdrive.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_os.p.gz
deleted file mode 100644
index 2f9a5de9c..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_shutil.p.gz
deleted file mode 100644
index 8ffa516c0..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_time.p.gz
deleted file mode 100644
index 6aade88c6..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_uuid.p.gz
deleted file mode 100644
index 276c06397..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_wmi.p.gz
deleted file mode 100644
index 77a1650d4..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_already_running_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_nova.utils.p.gz
deleted file mode 100644
index ce19ed290..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_nova.utils.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_nova.virt.configdrive.p.gz
deleted file mode 100644
index b2dadcd4d..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_nova.virt.configdrive.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_os.p.gz
deleted file mode 100644
index aa378fedd..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_shutil.p.gz
deleted file mode 100644
index 333a27b89..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_time.p.gz
deleted file mode 100644
index 16ca553f6..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_uuid.p.gz
deleted file mode 100644
index 8cf3b564e..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_wmi.p.gz
deleted file mode 100644
index 0a2c8513b..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_resume_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_nova.utils.p.gz
deleted file mode 100644
index ae42d7734..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_nova.utils.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_nova.virt.configdrive.p.gz
deleted file mode 100644
index 4fec34d08..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_nova.virt.configdrive.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_os.p.gz
deleted file mode 100644
index 74e8e95a6..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_shutil.p.gz
deleted file mode 100644
index da0528797..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_time.p.gz
deleted file mode 100644
index 63f02bc75..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_uuid.p.gz
deleted file mode 100644
index c014d5003..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_nova.utils.p.gz
deleted file mode 100644
index 592658541..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_nova.utils.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_nova.virt.configdrive.p.gz
deleted file mode 100644
index 892f3c346..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_nova.virt.configdrive.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_os.p.gz
deleted file mode 100644
index 9996339f5..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_shutil.p.gz
deleted file mode 100644
index 409ee5ef7..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_time.p.gz
deleted file mode 100644
index 9e799c196..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_uuid.p.gz
deleted file mode 100644
index 848024366..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_wmi.p.gz
deleted file mode 100644
index 687952c4c..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_with_update_failure_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_wmi.p.gz
deleted file mode 100644
index 57988a6b6..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_snapshot_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_nova.utils.p.gz
deleted file mode 100644
index 303a47019..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_nova.utils.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_nova.virt.configdrive.p.gz
deleted file mode 100644
index c211622e1..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_nova.virt.configdrive.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_os.p.gz
deleted file mode 100644
index 5e5303cbc..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_shutil.p.gz
deleted file mode 100644
index 1bcbd48f3..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_time.p.gz
deleted file mode 100644
index ae557d73d..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_uuid.p.gz
deleted file mode 100644
index 90ebff4e7..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_wmi.p.gz
deleted file mode 100644
index beccc2737..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_cdrom_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_nova.utils.p.gz
deleted file mode 100644
index af5082ab6..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_nova.utils.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_nova.virt.configdrive.p.gz
deleted file mode 100644
index 837d81b70..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_nova.virt.configdrive.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_os.p.gz
deleted file mode 100644
index ecea62a01..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_shutil.p.gz
deleted file mode 100644
index 283cd7fdd..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_time.p.gz
deleted file mode 100644
index 44dcc89ae..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_uuid.p.gz
deleted file mode 100644
index 5c520c768..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_wmi.p.gz
deleted file mode 100644
index aec53305d..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_config_drive_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_nova.utils.p.gz
deleted file mode 100644
index a16c88e54..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_nova.utils.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_nova.virt.configdrive.p.gz
deleted file mode 100644
index d9c4e9c82..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_nova.virt.configdrive.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_os.p.gz
deleted file mode 100644
index 94aafb39a..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_shutil.p.gz
deleted file mode 100644
index e0ad00bf6..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_time.p.gz
deleted file mode 100644
index 00f7839ba..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_uuid.p.gz
deleted file mode 100644
index 77422d3f5..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_wmi.p.gz
deleted file mode 100644
index 414194a9d..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_cow_image_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_nova.utils.p.gz
deleted file mode 100644
index b1e825822..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_nova.utils.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_nova.virt.configdrive.p.gz
deleted file mode 100644
index 1e3d89fea..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_nova.virt.configdrive.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_os.p.gz
deleted file mode 100644
index 627c78d7e..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_shutil.p.gz
deleted file mode 100644
index e577cdb5e..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_time.p.gz
deleted file mode 100644
index 72962fc52..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_uuid.p.gz
deleted file mode 100644
index 5d1351a14..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_wmi.p.gz
deleted file mode 100644
index eb0ed7241..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_config_drive_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_nova.utils.p.gz
deleted file mode 100644
index c65264688..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_nova.utils.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_nova.virt.configdrive.p.gz
deleted file mode 100644
index ca40d6413..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_nova.virt.configdrive.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_os.p.gz
deleted file mode 100644
index 1d8081a3e..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_shutil.p.gz
deleted file mode 100644
index e03633b90..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_time.p.gz
deleted file mode 100644
index 00c56dacc..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_uuid.p.gz
deleted file mode 100644
index 7381c3cc6..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_wmi.p.gz
deleted file mode 100644
index 115ed1dd5..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_cow_image_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.utils.p.gz
deleted file mode 100644
index df40b08c0..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.utils.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.virt.configdrive.p.gz
deleted file mode 100644
index b51766f75..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_nova.virt.configdrive.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_os.p.gz
deleted file mode 100644
index 092a1f933..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_shutil.p.gz
deleted file mode 100644
index 77f333c00..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_time.p.gz
deleted file mode 100644
index 8ab166a60..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_uuid.p.gz
deleted file mode 100644
index 97e96be17..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_wmi.p.gz
deleted file mode 100644
index 728464ca9..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_spawn_no_vswitch_exception_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_nova.utils.p.gz
deleted file mode 100644
index 4aa6d171a..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_nova.utils.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_nova.virt.configdrive.p.gz
deleted file mode 100644
index df063a22e..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_nova.virt.configdrive.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_os.p.gz
deleted file mode 100644
index b30363fcc..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_shutil.p.gz
deleted file mode 100644
index 1681d9947..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_time.p.gz
deleted file mode 100644
index 4469fd90e..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_uuid.p.gz
deleted file mode 100644
index f94f2ebb9..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_wmi.p.gz
deleted file mode 100644
index 03afe2235..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_already_suspended_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_nova.utils.p.gz
deleted file mode 100644
index 2f95f62bf..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_nova.utils.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_nova.virt.configdrive.p.gz
deleted file mode 100644
index 2e7ab44ad..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_nova.virt.configdrive.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_os.p.gz
deleted file mode 100644
index eb514d086..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_shutil.p.gz
deleted file mode 100644
index 810c9e14d..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_time.p.gz
deleted file mode 100644
index 2eb2a8372..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_uuid.p.gz
deleted file mode 100644
index 67311757a..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_wmi.p.gz
deleted file mode 100644
index 0779125b3..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_suspend_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_nova.utils.p.gz
deleted file mode 100644
index 7e6cc708e..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_nova.utils.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_nova.virt.configdrive.p.gz
deleted file mode 100644
index 0ce4bbf63..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_nova.virt.configdrive.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_os.p.gz
deleted file mode 100644
index 9068792c7..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_shutil.p.gz
deleted file mode 100644
index 9b06cb884..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_time.p.gz
deleted file mode 100644
index e91e6c965..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_uuid.p.gz
deleted file mode 100644
index 271ded270..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_wmi.p.gz
deleted file mode 100644
index 253bdfc82..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_already_running_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_nova.utils.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_nova.utils.p.gz
deleted file mode 100644
index 20486b189..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_nova.utils.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_nova.virt.configdrive.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_nova.virt.configdrive.p.gz
deleted file mode 100644
index be92217ed..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_nova.virt.configdrive.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_os.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_os.p.gz
deleted file mode 100644
index 36059e753..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_os.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_shutil.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_shutil.p.gz
deleted file mode 100644
index aea394e9f..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_shutil.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_time.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_time.p.gz
deleted file mode 100644
index 4850d3cda..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_time.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_uuid.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_uuid.p.gz
deleted file mode 100644
index 99bf1806c..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_uuid.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_wmi.p.gz b/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_wmi.p.gz
deleted file mode 100644
index 87b571e4a..000000000
--- a/nova/tests/hyperv/stubs/test_hypervapi.HyperVAPITestCase.test_unpause_wmi.p.gz
+++ /dev/null
Binary files differ
diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py
index 9dd9e5121..fb26fa4f1 100644
--- a/nova/tests/image/test_glance.py
+++ b/nova/tests/image/test_glance.py
@@ -339,7 +339,6 @@ class TestGlanceImageService(test.TestCase):
def test_update(self):
fixture = self._make_fixture(name='test image')
image = self.service.create(self.context, fixture)
- print image
image_id = image['id']
fixture['name'] = 'new image name'
self.service.update(self.context, image_id, fixture)
diff --git a/nova/tests/integrated/api/client.py b/nova/tests/integrated/api/client.py
index a072b3128..958a5500b 100644
--- a/nova/tests/integrated/api/client.py
+++ b/nova/tests/integrated/api/client.py
@@ -155,7 +155,7 @@ class TestOpenStackClient(object):
LOG.debug(_("%(relative_uri)s => code %(http_status)s") % locals())
if check_response_status:
- if not http_status in check_response_status:
+ if http_status not in check_response_status:
if http_status == 404:
raise OpenStackApiNotFoundException(response=response)
elif http_status == 401:
diff --git a/nova/tests/integrated/api_samples/NMN/multinic-add-fixed-ip-req.json.tpl b/nova/tests/integrated/api_samples/NMN/multinic-add-fixed-ip-req.json.tpl
new file mode 100644
index 000000000..b9744ab2c
--- /dev/null
+++ b/nova/tests/integrated/api_samples/NMN/multinic-add-fixed-ip-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "addFixedIp":{
+ "networkId": %(networkId)s
+ }
+}
diff --git a/nova/tests/integrated/api_samples/NMN/multinic-add-fixed-ip-req.xml.tpl b/nova/tests/integrated/api_samples/NMN/multinic-add-fixed-ip-req.xml.tpl
new file mode 100644
index 000000000..ad1112912
--- /dev/null
+++ b/nova/tests/integrated/api_samples/NMN/multinic-add-fixed-ip-req.xml.tpl
@@ -0,0 +1,3 @@
+<addFixedIp>
+ <networkId>%(networkId)s</networkId>
+</addFixedIp>
diff --git a/nova/tests/integrated/api_samples/NMN/multinic-remove-fixed-ip-req.json.tpl b/nova/tests/integrated/api_samples/NMN/multinic-remove-fixed-ip-req.json.tpl
new file mode 100644
index 000000000..7367e1242
--- /dev/null
+++ b/nova/tests/integrated/api_samples/NMN/multinic-remove-fixed-ip-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "removeFixedIp":{
+ "address": "%(ip)s"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/NMN/multinic-remove-fixed-ip-req.xml.tpl b/nova/tests/integrated/api_samples/NMN/multinic-remove-fixed-ip-req.xml.tpl
new file mode 100644
index 000000000..10b722220
--- /dev/null
+++ b/nova/tests/integrated/api_samples/NMN/multinic-remove-fixed-ip-req.xml.tpl
@@ -0,0 +1,3 @@
+<removeFixedIp>
+ <address>%(ip)s</address>
+</removeFixedIp>
diff --git a/nova/tests/integrated/api_samples/NMN/server-post-req.json.tpl b/nova/tests/integrated/api_samples/NMN/server-post-req.json.tpl
new file mode 100644
index 000000000..d3916d1aa
--- /dev/null
+++ b/nova/tests/integrated/api_samples/NMN/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/NMN/server-post-req.xml.tpl b/nova/tests/integrated/api_samples/NMN/server-post-req.xml.tpl
new file mode 100644
index 000000000..f92614984
--- /dev/null
+++ b/nova/tests/integrated/api_samples/NMN/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/integrated/api_samples/NMN/server-post-resp.json.tpl b/nova/tests/integrated/api_samples/NMN/server-post-resp.json.tpl
new file mode 100644
index 000000000..d5f030c87
--- /dev/null
+++ b/nova/tests/integrated/api_samples/NMN/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/NMN/server-post-resp.xml.tpl b/nova/tests/integrated/api_samples/NMN/server-post-resp.xml.tpl
new file mode 100644
index 000000000..3bb13e69b
--- /dev/null
+++ b/nova/tests/integrated/api_samples/NMN/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/integrated/api_samples/OS-EXT-AZ/server-get-resp.json.tpl b/nova/tests/integrated/api_samples/OS-EXT-AZ/server-get-resp.json.tpl
new file mode 100644
index 000000000..25915610d
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-AZ/server-get-resp.json.tpl
@@ -0,0 +1,56 @@
+{
+ "server": {
+ "updated": "%(timestamp)s",
+ "created": "%(timestamp)s",
+ "OS-EXT-AZ:availability_zone": null,
+ "OS-EXT-AZ:host_availability_zone": "nova",
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(uuid)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/OS-EXT-AZ/server-get-resp.xml.tpl b/nova/tests/integrated/api_samples/OS-EXT-AZ/server-get-resp.xml.tpl
new file mode 100644
index 000000000..1cdbd2012
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-AZ/server-get-resp.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:OS-EXT-AZ="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(timestamp)s" hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(uuid)s" OS-EXT-AZ:availability_zone="None" OS-EXT-AZ:host_availability_zone="nova">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/integrated/api_samples/OS-EXT-AZ/server-post-req.json.tpl b/nova/tests/integrated/api_samples/OS-EXT-AZ/server-post-req.json.tpl
new file mode 100644
index 000000000..d3916d1aa
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-AZ/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/OS-EXT-AZ/server-post-req.xml.tpl b/nova/tests/integrated/api_samples/OS-EXT-AZ/server-post-req.xml.tpl
new file mode 100644
index 000000000..f92614984
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-AZ/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/integrated/api_samples/OS-EXT-AZ/server-post-resp.json.tpl b/nova/tests/integrated/api_samples/OS-EXT-AZ/server-post-resp.json.tpl
new file mode 100644
index 000000000..d5f030c87
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-AZ/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/OS-EXT-AZ/server-post-resp.xml.tpl b/nova/tests/integrated/api_samples/OS-EXT-AZ/server-post-resp.xml.tpl
new file mode 100644
index 000000000..3bb13e69b
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-AZ/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.json.tpl b/nova/tests/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.json.tpl
new file mode 100644
index 000000000..895f0a514
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.json.tpl
@@ -0,0 +1,58 @@
+{
+ "servers": [
+ {
+ "updated": "%(timestamp)s",
+ "created": "%(timestamp)s",
+ "OS-EXT-AZ:availability_zone": null,
+ "OS-EXT-AZ:host_availability_zone": "nova",
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(uuid)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "user_id": "fake"
+ }
+ ]
+}
diff --git a/nova/tests/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.xml.tpl b/nova/tests/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.xml.tpl
new file mode 100644
index 000000000..15cd9b1e1
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.xml.tpl
@@ -0,0 +1,20 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers xmlns:OS-EXT-AZ="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1"> <server status="ACTIVE" updated="%(timestamp)s" hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" OS-EXT-AZ:availability_zone="None" OS-EXT-AZ:host_availability_zone="nova">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+ </server>
+</servers>
diff --git a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.json.tpl b/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/server-get-resp.json.tpl
index c70192949..c70192949 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.json.tpl
+++ b/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/server-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.xml.tpl b/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/server-get-resp.xml.tpl
index beec3a12a..beec3a12a 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-get.xml.tpl
+++ b/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/server-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.json.tpl b/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.json.tpl
index 1fb8e1a47..1fb8e1a47 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.json.tpl
+++ b/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.xml.tpl b/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.xml.tpl
index 1811882a2..1811882a2 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/extended-server-attrs-list.xml.tpl
+++ b/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-STS/server-get-resp.json.tpl b/nova/tests/integrated/api_samples/OS-EXT-STS/server-get-resp.json.tpl
new file mode 100644
index 000000000..7ac35024b
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-STS/server-get-resp.json.tpl
@@ -0,0 +1,57 @@
+{
+ "server": {
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "OS-EXT-STS:power_state": 1,
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(timestamp)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(host)s/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(host)s/openstack/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(timestamp)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/OS-EXT-STS/server-get-resp.xml.tpl b/nova/tests/integrated/api_samples/OS-EXT-STS/server-get-resp.xml.tpl
new file mode 100644
index 000000000..f594be120
--- /dev/null
+++ b/nova/tests/integrated/api_samples/OS-EXT-STS/server-get-resp.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(timestamp)s" hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" OS-EXT-STS:vm_state="active" OS-EXT-STS:task_state="None" OS-EXT-STS:power_state="1">
+ <image id="%(uuid)s">
+ <atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
+ </image>
+ <flavor id="1">
+ <atom:link href="%(host)s/openstack/flavors/1" rel="bookmark"/>
+ </flavor>
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <addresses>
+ <network id="private">
+ <ip version="4" addr="%(ip)s"/>
+ </network>
+ </addresses>
+ <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/integrated/api_samples/OS-EXT-STS/servers-list-resp.json.tpl b/nova/tests/integrated/api_samples/OS-EXT-STS/servers-list-resp.json.tpl
deleted file mode 100644
index 8b97dc28d..000000000
--- a/nova/tests/integrated/api_samples/OS-EXT-STS/servers-list-resp.json.tpl
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "servers": [
- {
- "id": "%(id)s",
- "links": [
- {
- "href": "%(host)s/v2/openstack/servers/%(id)s",
- "rel": "self"
- },
- {
- "href": "%(host)s/openstack/servers/%(id)s",
- "rel": "bookmark"
- }
- ],
- "name": "new-server-test"
- }
- ]
-}
diff --git a/nova/tests/integrated/api_samples/OS-EXT-STS/servers-list-resp.xml.tpl b/nova/tests/integrated/api_samples/OS-EXT-STS/servers-list-resp.xml.tpl
deleted file mode 100644
index 03bee03a6..000000000
--- a/nova/tests/integrated/api_samples/OS-EXT-STS/servers-list-resp.xml.tpl
+++ /dev/null
@@ -1,7 +0,0 @@
-<?xml version='1.0' encoding='UTF-8'?>
-<servers xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
- <server name="new-server-test" id="%(id)s">
- <atom:link href="%(host)s/v2/openstack/servers/%(id)s" rel="self"/>
- <atom:link href="%(host)s/openstack/servers/%(id)s" rel="bookmark"/>
- </server>
-</servers>
diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
index be2fabec4..35d50d025 100644
--- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
@@ -17,6 +17,14 @@
"updated": "%(timestamp)s"
},
{
+ "alias": "OS-EXT-AZ",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ExtendedAvailabilityZone",
+ "namespace": "http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2",
+ "updated": "%(timestamp)s"
+ },
+ {
"alias": "OS-EXT-SRV-ATTR",
"description": "%(text)s",
"links": [],
@@ -177,6 +185,14 @@
"updated": "%(timestamp)s"
},
{
+ "alias": "os-evacuate",
+ "description": "%(text)s",
+ "links": [],
+ "name": "Evacuate",
+ "namespace": "http://docs.openstack.org/compute/ext/evacuate/api/v2",
+ "updated": "%(timestamp)s"
+ },
+ {
"alias": "os-fixed-ips",
"description": "Fixed IPs support.",
"links": [],
diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
index ae2e9ff9e..2adc5988c 100644
--- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
@@ -6,6 +6,9 @@
<extension alias="OS-DCF" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" name="DiskConfig">
<description>%(text)s</description>
</extension>
+ <extension alias="OS-EXT-AZ" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" name="ExtendedAvailabilityZone">
+ <description>%(text)s</description>
+ </extension>
<extension alias="OS-EXT-SRV-ATTR" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" name="ExtendedServerAttributes">
<description>%(text)s</description>
</extension>
@@ -66,6 +69,9 @@
<extension alias="os-deferred-delete" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/deferred-delete/api/v1.1" name="DeferredDelete">
<description>%(text)s</description>
</extension>
+ <extension alias="os-evacuate" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/evacuate/api/v2" name="Evacuate">
+ <description>%(text)s</description>
+ </extension>
<extension alias="os-fixed-ips" name="FixedIPs" namespace="http://docs.openstack.org/compute/ext/fixed_ips/api/v2" updated="2012-10-18T13:25:27-06:00">
<description>Fixed IPs support.</description>
</extension>
diff --git a/nova/tests/integrated/api_samples/all_extensions/server-get-resp.json.tpl b/nova/tests/integrated/api_samples/all_extensions/server-get-resp.json.tpl
index 92e626293..85fc6f605 100644
--- a/nova/tests/integrated/api_samples/all_extensions/server-get-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/server-get-resp.json.tpl
@@ -1,6 +1,8 @@
{
"server": {
"OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": null,
+ "OS-EXT-AZ:host_availability_zone": "nova",
"OS-EXT-SRV-ATTR:host": "%(compute_host)s",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "%(hypervisor_hostname)s",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
diff --git a/nova/tests/integrated/api_samples/all_extensions/server-get-resp.xml.tpl b/nova/tests/integrated/api_samples/all_extensions/server-get-resp.xml.tpl
index 137e59686..bd73accda 100644
--- a/nova/tests/integrated/api_samples/all_extensions/server-get-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/server-get-resp.xml.tpl
@@ -1,5 +1,5 @@
<?xml version='1.0' encoding='UTF-8'?>
-<server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(timestamp)s" hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" key_name="None" config_drive="" OS-EXT-SRV-ATTR:vm_state="active" OS-EXT-SRV-ATTR:task_state="None" OS-EXT-SRV-ATTR:power_state="1" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="%(compute_host)s" OS-EXT-SRV-ATTR:hypervisor_hostname="%(hypervisor_hostname)s" OS-DCF:diskConfig="AUTO">
+<server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:OS-EXT-AZ="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(timestamp)s" hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" key_name="None" config_drive="" OS-EXT-SRV-ATTR:vm_state="active" OS-EXT-SRV-ATTR:task_state="None" OS-EXT-SRV-ATTR:power_state="1" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="%(compute_host)s" OS-EXT-SRV-ATTR:hypervisor_hostname="%(hypervisor_hostname)s" OS-EXT-AZ:availability_zone="None" OS-EXT-AZ:host_availability_zone="nova" OS-DCF:diskConfig="AUTO">
<image id="%(uuid)s">
<atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
</image>
diff --git a/nova/tests/integrated/api_samples/all_extensions/servers-details-resp.json.tpl b/nova/tests/integrated/api_samples/all_extensions/servers-details-resp.json.tpl
index 8f1583baf..a4918203b 100644
--- a/nova/tests/integrated/api_samples/all_extensions/servers-details-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/servers-details-resp.json.tpl
@@ -2,6 +2,8 @@
"servers": [
{
"OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": null,
+ "OS-EXT-AZ:host_availability_zone": "nova",
"OS-EXT-SRV-ATTR:host": "%(compute_host)s",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "%(hypervisor_hostname)s",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
diff --git a/nova/tests/integrated/api_samples/all_extensions/servers-details-resp.xml.tpl b/nova/tests/integrated/api_samples/all_extensions/servers-details-resp.xml.tpl
index 344686e66..d26eb38ef 100644
--- a/nova/tests/integrated/api_samples/all_extensions/servers-details-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/servers-details-resp.xml.tpl
@@ -1,6 +1,6 @@
<?xml version='1.0' encoding='UTF-8'?>
-<servers xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
- <server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(timestamp)s" hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" key_name="None" config_drive="" OS-EXT-SRV-ATTR:vm_state="active" OS-EXT-SRV-ATTR:task_state="None" OS-EXT-SRV-ATTR:power_state="1" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="%(compute_host)s" OS-EXT-SRV-ATTR:hypervisor_hostname="%(hypervisor_hostname)s" OS-DCF:diskConfig="AUTO">
+<servers xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:OS-EXT-AZ="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <server xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1" xmlns:OS-EXT-AZ="http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2" xmlns:OS-EXT-SRV-ATTR="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:OS-EXT-STS="http://docs.openstack.org/compute/ext/extended_status/api/v1.1" xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" status="ACTIVE" updated="%(timestamp)s" hostId="%(hostid)s" name="new-server-test" created="%(timestamp)s" userId="fake" tenantId="openstack" accessIPv4="" accessIPv6="" progress="0" id="%(id)s" key_name="None" config_drive="" OS-EXT-SRV-ATTR:vm_state="active" OS-EXT-SRV-ATTR:task_state="None" OS-EXT-SRV-ATTR:power_state="1" OS-EXT-SRV-ATTR:instance_name="instance-00000001" OS-EXT-SRV-ATTR:host="%(compute_host)s" OS-EXT-SRV-ATTR:hypervisor_hostname="%(hypervisor_hostname)s" OS-EXT-AZ:availability_zone="None" OS-EXT-AZ:host_availability_zone="nova" OS-DCF:diskConfig="AUTO">
<image id="%(uuid)s">
<atom:link href="%(host)s/openstack/images/%(uuid)s" rel="bookmark"/>
</image>
diff --git a/nova/tests/integrated/api_samples/os-evacuate/server-evacuate-req.json.tpl b/nova/tests/integrated/api_samples/os-evacuate/server-evacuate-req.json.tpl
new file mode 100644
index 000000000..179cddce7
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-evacuate/server-evacuate-req.json.tpl
@@ -0,0 +1,7 @@
+{
+ "evacuate": {
+ "host": "%(host)s",
+ "adminPass": "%(adminPass)s",
+ "onSharedStorage": "%(onSharedStorage)s"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-evacuate/server-evacuate-req.xml.tpl b/nova/tests/integrated/api_samples/os-evacuate/server-evacuate-req.xml.tpl
new file mode 100644
index 000000000..b0471f916
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-evacuate/server-evacuate-req.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<evacuate xmlns="http://docs.openstack.org/compute/api/v2"
+ host="%(host)s"
+ adminPass="%(adminPass)s"
+ onSharedStorage="%(onSharedStorage)s"/>
diff --git a/nova/tests/integrated/api_samples/os-evacuate/server-evacuate-resp.json.tpl b/nova/tests/integrated/api_samples/os-evacuate/server-evacuate-resp.json.tpl
new file mode 100644
index 000000000..0da07da5b
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-evacuate/server-evacuate-resp.json.tpl
@@ -0,0 +1,3 @@
+{
+ "adminPass": "%(password)s"
+}
diff --git a/nova/tests/integrated/api_samples/os-evacuate/server-evacuate-resp.xml.tpl b/nova/tests/integrated/api_samples/os-evacuate/server-evacuate-resp.xml.tpl
new file mode 100644
index 000000000..2a779af6d
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-evacuate/server-evacuate-resp.xml.tpl
@@ -0,0 +1 @@
+<adminPass>%(password)s</adminPass> \ No newline at end of file
diff --git a/nova/tests/integrated/api_samples/os-evacuate/server-post-req.json.tpl b/nova/tests/integrated/api_samples/os-evacuate/server-post-req.json.tpl
new file mode 100644
index 000000000..d3916d1aa
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-evacuate/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-evacuate/server-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-evacuate/server-post-req.xml.tpl
new file mode 100644
index 000000000..f92614984
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-evacuate/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/integrated/api_samples/os-evacuate/server-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-evacuate/server-post-resp.json.tpl
new file mode 100644
index 000000000..d5f030c87
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-evacuate/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-evacuate/server-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-evacuate/server-post-resp.xml.tpl
new file mode 100644
index 000000000..3bb13e69b
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-evacuate/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.json.tpl b/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.json.tpl
new file mode 100644
index 000000000..dd858e76c
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "extra_specs": {
+ "key1": "%(value1)s",
+ "key2": "%(value2)s"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.xml.tpl b/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.xml.tpl
new file mode 100644
index 000000000..c94595cad
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<extra_specs>
+ <key1>%(value1)s</key1>
+ <key2>%(value2)s</key2>
+</extra_specs>
diff --git a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.json.tpl b/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.json.tpl
new file mode 100644
index 000000000..dd858e76c
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.json.tpl
@@ -0,0 +1,6 @@
+{
+ "extra_specs": {
+ "key1": "%(value1)s",
+ "key2": "%(value2)s"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.xml.tpl b/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.xml.tpl
new file mode 100644
index 000000000..1008b5bb0
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<extra_specs>
+ <key2>%(value2)s</key2>
+ <key1>%(value1)s</key1>
+</extra_specs>
diff --git a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.json.tpl
new file mode 100644
index 000000000..adfa77008
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.json.tpl
@@ -0,0 +1,3 @@
+{
+ "key1": "%(value1)s"
+}
diff --git a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.xml.tpl
new file mode 100644
index 000000000..e3de59a34
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<extra_spec key="key1">%(value1)s</extra_spec>
diff --git a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.json.tpl b/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.json.tpl
new file mode 100644
index 000000000..dd858e76c
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.json.tpl
@@ -0,0 +1,6 @@
+{
+ "extra_specs": {
+ "key1": "%(value1)s",
+ "key2": "%(value2)s"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.xml.tpl b/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.xml.tpl
new file mode 100644
index 000000000..1008b5bb0
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<extra_specs>
+ <key2>%(value2)s</key2>
+ <key1>%(value1)s</key1>
+</extra_specs>
diff --git a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.json.tpl b/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.json.tpl
new file mode 100644
index 000000000..adfa77008
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.json.tpl
@@ -0,0 +1,3 @@
+{
+ "key1": "%(value1)s"
+}
diff --git a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.xml.tpl b/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.xml.tpl
new file mode 100644
index 000000000..6421e5959
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+ <key1>%(value1)s</key1>
diff --git a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.json.tpl b/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.json.tpl
new file mode 100644
index 000000000..adfa77008
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.json.tpl
@@ -0,0 +1,3 @@
+{
+ "key1": "%(value1)s"
+}
diff --git a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.xml.tpl b/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.xml.tpl
new file mode 100644
index 000000000..e3de59a34
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<extra_spec key="key1">%(value1)s</extra_spec>
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json.tpl b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json.tpl
new file mode 100644
index 000000000..7dc33ddb1
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json.tpl
@@ -0,0 +1,7 @@
+{
+ "dns_entry" :
+ {
+ "ip": "%(ip)s",
+ "dns_type": "%(dns_type)s"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.xml.tpl b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.xml.tpl
new file mode 100644
index 000000000..bd62d3418
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.xml.tpl
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<dns_entry>
+ <ip>%(ip)s</ip>
+ <dns_type>%(dns_type)s</dns_type>
+</dns_entry>
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json.tpl b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json.tpl
new file mode 100644
index 000000000..3ec0743ba
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "dns_entry": {
+ "domain": "%(domain)s",
+ "id": null,
+ "ip": "%(ip)s",
+ "name": "%(name)s",
+ "type": "%(dns_type)s"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.xml.tpl b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.xml.tpl
new file mode 100644
index 000000000..38a659b78
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<dns_entry ip="%(ip)s" domain="%(domain)s" type="%(dns_type)s" id="None" name="%(name)s"/>
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json.tpl b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json.tpl
new file mode 100644
index 000000000..db73be14a
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json.tpl
@@ -0,0 +1,8 @@
+{
+ "domain_entry" :
+ {
+ "domain": "%(domain)s",
+ "scope": "%(scope)s",
+ "project": "%(project)s"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.xml.tpl b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.xml.tpl
new file mode 100644
index 000000000..40866a537
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<domain_entry>
+ <domain>%(domain)s</domain>
+ <scope>%(scope)s</scope>
+ <project>%(project)s</project>
+</domain_entry>
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json.tpl b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json.tpl
new file mode 100644
index 000000000..a14d395d2
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json.tpl
@@ -0,0 +1,8 @@
+{
+ "domain_entry": {
+ "availability_zone": null,
+ "domain": "%(domain)s",
+ "project": "%(project)s",
+ "scope": "%(scope)s"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.xml.tpl b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.xml.tpl
new file mode 100644
index 000000000..1759c403a
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<domain_entry project="%(project)s" scope="%(scope)s" domain="%(domain)s" availability_zone="None"/>
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json.tpl
new file mode 100644
index 000000000..8edd0603f
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "dns_entry": {
+ "domain": "%(domain)s",
+ "id": null,
+ "ip": "%(ip)s",
+ "name": "%(name)s",
+ "type": null
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.xml.tpl
new file mode 100644
index 000000000..a889ef6e2
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.xml.tpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<dns_entry ip="%(ip)s" domain="%(domain)s" type="None" id="None" name="%(name)s"/>
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json.tpl b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json.tpl
new file mode 100644
index 000000000..831cda7b5
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json.tpl
@@ -0,0 +1,11 @@
+{
+ "dns_entries": [
+ {
+ "domain": "%(domain)s",
+ "id": null,
+ "ip": "%(ip)s",
+ "name": "%(name)s",
+ "type": null
+ }
+ ]
+}
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.xml.tpl b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.xml.tpl
new file mode 100644
index 000000000..bf7788f94
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<dns_entries>
+ <dns_entry ip="%(ip)s" domain="%(domain)s" type="None" id="None" name="%(name)s"/>
+</dns_entries>
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json.tpl b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json.tpl
new file mode 100644
index 000000000..a6055cfec
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json.tpl
@@ -0,0 +1,10 @@
+{
+ "domain_entries": [
+ {
+ "availability_zone": null,
+ "domain": "%(domain)s",
+ "project": "%(project)s",
+ "scope": "%(scope)s"
+ }
+ ]
+}
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.xml.tpl b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.xml.tpl
new file mode 100644
index 000000000..e57c290cb
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.xml.tpl
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<domain_entries>
+ <domain_entry project="%(project)s" scope="%(scope)s" domain="%(domain)s" availability_zone="None"/>
+</domain_entries>
diff --git a/nova/tests/integrated/api_samples/os-fping/fping-get-details-resp.json.tpl b/nova/tests/integrated/api_samples/os-fping/fping-get-details-resp.json.tpl
new file mode 100644
index 000000000..f3b222c39
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-fping/fping-get-details-resp.json.tpl
@@ -0,0 +1,7 @@
+{
+ "server": {
+ "alive": false,
+ "id": "%(uuid)s",
+ "project_id": "openstack"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-fping/fping-get-details-resp.xml.tpl b/nova/tests/integrated/api_samples/os-fping/fping-get-details-resp.xml.tpl
new file mode 100644
index 000000000..758519b60
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-fping/fping-get-details-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server>
+ <project_id>openstack</project_id>
+ <id>%(uuid)s</id>
+ <alive>False</alive>
+</server>
diff --git a/nova/tests/integrated/api_samples/os-fping/fping-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-fping/fping-get-resp.json.tpl
new file mode 100644
index 000000000..b33e80668
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-fping/fping-get-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "servers": [
+ {
+ "alive": false,
+ "id": "%(uuid)s",
+ "project_id": "openstack"
+ }
+ ]
+}
diff --git a/nova/tests/integrated/api_samples/os-fping/fping-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-fping/fping-get-resp.xml.tpl
new file mode 100644
index 000000000..290ad6ca6
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-fping/fping-get-resp.xml.tpl
@@ -0,0 +1,8 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<servers>
+ <server>
+ <project_id>openstack</project_id>
+ <id>%(uuid)s</id>
+ <alive>False</alive>
+ </server>
+</servers>
diff --git a/nova/tests/integrated/api_samples/os-fping/server-post-req.json.tpl b/nova/tests/integrated/api_samples/os-fping/server-post-req.json.tpl
new file mode 100644
index 000000000..d3916d1aa
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-fping/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-fping/server-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-fping/server-post-req.xml.tpl
new file mode 100644
index 000000000..f92614984
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-fping/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+ </file>
+ </personality>
+</server>
diff --git a/nova/tests/integrated/api_samples/os-fping/server-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-fping/server-post-resp.json.tpl
new file mode 100644
index 000000000..d5f030c87
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-fping/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-fping/server-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-fping/server-post-resp.xml.tpl
new file mode 100644
index 000000000..3bb13e69b
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-fping/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s">
+ <metadata/>
+ <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/>
+ <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/>
+</server>
diff --git a/nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.json.tpl
new file mode 100644
index 000000000..6974f360f
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.json.tpl
@@ -0,0 +1,17 @@
+{
+ "instance_usage_audit_logs": {
+ "hosts_not_run": [
+ "%(hostid)s"
+ ],
+ "log": {},
+ "num_hosts": 1,
+ "num_hosts_done": 0,
+ "num_hosts_not_run": 1,
+ "num_hosts_running": 0,
+ "overall_status": "0 of 1 hosts done. 0 errors.",
+ "period_beginning": "%(timestamp)s",
+ "period_ending": "%(timestamp)s",
+ "total_errors": 0,
+ "total_instances": 0
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.xml.tpl
new file mode 100644
index 000000000..4eafa8b4a
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.xml.tpl
@@ -0,0 +1,16 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<instance_usage_audit_logs>
+ <total_errors>0</total_errors>
+ <total_instances>0</total_instances>
+ <log/>
+ <num_hosts_running>0</num_hosts_running>
+ <num_hosts_done>0</num_hosts_done>
+ <num_hosts_not_run>1</num_hosts_not_run>
+ <hosts_not_run>
+ <item>%(hostid)s</item>
+ </hosts_not_run>
+ <overall_status>0 of 1 hosts done. 0 errors.</overall_status>
+ <period_ending>%(timestamp)s</period_ending>
+ <period_beginning>%(timestamp)s</period_beginning>
+ <num_hosts>1</num_hosts>
+</instance_usage_audit_logs>
diff --git a/nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.json.tpl
new file mode 100644
index 000000000..eda952304
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.json.tpl
@@ -0,0 +1,17 @@
+{
+ "instance_usage_audit_log": {
+ "hosts_not_run": [
+ "%(hostid)s"
+ ],
+ "log": {},
+ "num_hosts": 1,
+ "num_hosts_done": 0,
+ "num_hosts_not_run": 1,
+ "num_hosts_running": 0,
+ "overall_status": "0 of 1 hosts done. 0 errors.",
+ "period_beginning": "%(timestamp)s",
+ "period_ending": "%(timestamp)s",
+ "total_errors": 0,
+ "total_instances": 0
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.xml.tpl
new file mode 100644
index 000000000..1ef243292
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.xml.tpl
@@ -0,0 +1,16 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<instance_usage_audit_log>
+ <total_errors>0</total_errors>
+ <total_instances>0</total_instances>
+ <log/>
+ <num_hosts_running>0</num_hosts_running>
+ <num_hosts_done>0</num_hosts_done>
+ <num_hosts_not_run>1</num_hosts_not_run>
+ <hosts_not_run>
+ <item>%(hostid)s</item>
+ </hosts_not_run>
+ <overall_status>0 of 1 hosts done. 0 errors.</overall_status>
+ <period_ending>%(timestamp)s</period_ending>
+ <period_beginning>%(timestamp)s</period_beginning>
+ <num_hosts>1</num_hosts>
+</instance_usage_audit_log>
diff --git a/nova/tests/integrated/api_samples/os-networks/network-add-req.json.tpl b/nova/tests/integrated/api_samples/os-networks/network-add-req.json.tpl
new file mode 100644
index 000000000..6489f6e1b
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-networks/network-add-req.json.tpl
@@ -0,0 +1 @@
+{"id": "1"}
diff --git a/nova/tests/integrated/api_samples/os-networks/network-add-req.xml.tpl b/nova/tests/integrated/api_samples/os-networks/network-add-req.xml.tpl
new file mode 100644
index 000000000..9e5822a9e
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-networks/network-add-req.xml.tpl
@@ -0,0 +1 @@
+<id>1</id>
diff --git a/nova/tests/integrated/api_samples/os-networks/network-create-req.json.tpl b/nova/tests/integrated/api_samples/os-networks/network-create-req.json.tpl
new file mode 100644
index 000000000..5e2be031c
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-networks/network-create-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "network": {
+ "label": "new net 111",
+ "cidr": "10.20.105.0/24"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-networks/network-create-req.xml.tpl b/nova/tests/integrated/api_samples/os-networks/network-create-req.xml.tpl
new file mode 100644
index 000000000..d5222f9e8
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-networks/network-create-req.xml.tpl
@@ -0,0 +1,4 @@
+<network>
+ <label>new net 111</label>
+ <cidr>10.20.105.0/24</cidr>
+</network>
diff --git a/nova/tests/integrated/api_samples/os-networks/network-create-resp.json.tpl b/nova/tests/integrated/api_samples/os-networks/network-create-resp.json.tpl
new file mode 100644
index 000000000..e178ab50c
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-networks/network-create-resp.json.tpl
@@ -0,0 +1,32 @@
+{
+ "network": {
+ "bridge": null,
+ "vpn_public_port": null,
+ "dhcp_start": "%(ip)s",
+ "bridge_interface": null,
+ "updated_at": null,
+ "id": "%(id)s",
+ "cidr_v6": null,
+ "deleted_at": null,
+ "gateway": "%(ip)s",
+ "rxtx_base": null,
+ "label": "new net 111",
+ "priority": null,
+ "project_id": null,
+ "vpn_private_address": null,
+ "deleted": null,
+ "vlan": null,
+ "broadcast": "%(ip)s",
+ "netmask": "%(ip)s",
+ "injected": null,
+ "cidr": "10.20.105.0/24",
+ "vpn_public_address": null,
+ "multi_host": null,
+ "dns2": null,
+ "created_at": null,
+ "host": null,
+ "gateway_v6": null,
+ "netmask_v6": null,
+ "dns1": null
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-networks/network-create-resp.xml.tpl b/nova/tests/integrated/api_samples/os-networks/network-create-resp.xml.tpl
new file mode 100644
index 000000000..d709952cd
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-networks/network-create-resp.xml.tpl
@@ -0,0 +1,30 @@
+<network>
+ <bridge>None</bridge>
+ <vpn_public_port>None</vpn_public_port>
+ <dhcp_start>%(ip)s</dhcp_start>
+ <bridge_interface>None</bridge_interface>
+ <updated_at>None</updated_at>
+ <id>%(id)s</id>
+ <cidr_v6>None</cidr_v6>
+ <deleted_at>None</deleted_at>
+ <gateway>%(ip)s</gateway>
+ <rxtx_base>None</rxtx_base>
+ <label>new net 111</label>
+ <priority>None</priority>
+ <project_id>None</project_id>
+ <vpn_private_address>None</vpn_private_address>
+ <deleted>False</deleted>
+ <vlan>None</vlan>
+ <broadcast>%(ip)s</broadcast>
+ <netmask>%(ip)s</netmask>
+ <injected>None</injected>
+ <cidr>10.20.105.0/24</cidr>
+ <vpn_public_address>None</vpn_public_address>
+ <multi_host>None</multi_host>
+ <dns2>None</dns2>
+ <created_at>None</created_at>
+ <host>None</host>
+ <gateway_v6>None</gateway_v6>
+ <netmask_v6>None</netmask_v6>
+ <dns1>None</dns1>
+</network>
diff --git a/nova/tests/integrated/api_samples/os-networks/network-show-resp.json.tpl b/nova/tests/integrated/api_samples/os-networks/network-show-resp.json.tpl
new file mode 100644
index 000000000..5c3630c5d
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-networks/network-show-resp.json.tpl
@@ -0,0 +1,33 @@
+{
+ "network":
+ {
+ "bridge": "br100",
+ "bridge_interface": "eth0",
+ "broadcast": "%(ip)s",
+ "cidr": "10.0.0.0/29",
+ "cidr_v6": null,
+ "created_at": "%(timestamp)s",
+ "deleted": false,
+ "deleted_at": null,
+ "dhcp_start": "%(ip)s",
+ "dns1": null,
+ "dns2": null,
+ "gateway": "%(ip)s",
+ "gateway_v6": null,
+ "host": "nsokolov-desktop",
+ "id": "%(id)s",
+ "injected": false,
+ "label": "mynet_0",
+ "multi_host": false,
+ "netmask": "%(ip)s",
+ "netmask_v6": null,
+ "priority": null,
+ "project_id": "1234",
+ "rxtx_base": null,
+ "updated_at": "%(timestamp)s",
+ "vlan": 100,
+ "vpn_private_address": "%(ip)s",
+ "vpn_public_address": "%(ip)s",
+ "vpn_public_port": 1000
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-networks/network-show-resp.xml.tpl b/nova/tests/integrated/api_samples/os-networks/network-show-resp.xml.tpl
new file mode 100644
index 000000000..7989f47e4
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-networks/network-show-resp.xml.tpl
@@ -0,0 +1,31 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<network>
+ <bridge>br100</bridge>
+ <vpn_public_port>1000</vpn_public_port>
+ <dhcp_start>%(ip)s</dhcp_start>
+ <bridge_interface>eth0</bridge_interface>
+ <updated_at>%(timestamp)s</updated_at>
+ <id>%(id)s</id>
+ <cidr_v6>None</cidr_v6>
+ <deleted_at>None</deleted_at>
+ <gateway>%(ip)s</gateway>
+ <rxtx_base>None</rxtx_base>
+ <label>mynet_0</label>
+ <priority>None</priority>
+ <project_id>1234</project_id>
+ <vpn_private_address>%(ip)s</vpn_private_address>
+ <deleted>False</deleted>
+ <vlan>100</vlan>
+ <broadcast>%(ip)s</broadcast>
+ <netmask>%(ip)s</netmask>
+ <injected>False</injected>
+ <cidr>10.0.0.0/29</cidr>
+ <vpn_public_address>%(ip)s</vpn_public_address>
+ <multi_host>False</multi_host>
+ <dns2>None</dns2>
+ <created_at>%(timestamp)s</created_at>
+ <host>nsokolov-desktop</host>
+ <gateway_v6>None</gateway_v6>
+ <netmask_v6>None</netmask_v6>
+ <dns1>None</dns1>
+</network>
diff --git a/nova/tests/integrated/api_samples/os-networks/networks-disassociate-req.json.tpl b/nova/tests/integrated/api_samples/os-networks/networks-disassociate-req.json.tpl
new file mode 100644
index 000000000..df99b889c
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-networks/networks-disassociate-req.json.tpl
@@ -0,0 +1 @@
+{"disassociate": null}
diff --git a/nova/tests/integrated/api_samples/os-networks/networks-disassociate-req.xml.tpl b/nova/tests/integrated/api_samples/os-networks/networks-disassociate-req.xml.tpl
new file mode 100644
index 000000000..63c030090
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-networks/networks-disassociate-req.xml.tpl
@@ -0,0 +1 @@
+<disassociate>None</disassociate>
diff --git a/nova/tests/integrated/api_samples/os-networks/networks-list-resp.json.tpl b/nova/tests/integrated/api_samples/os-networks/networks-list-resp.json.tpl
new file mode 100644
index 000000000..6da3fb2e2
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-networks/networks-list-resp.json.tpl
@@ -0,0 +1,64 @@
+{
+ "networks": [
+ {
+ "bridge": "br100",
+ "bridge_interface": "eth0",
+ "broadcast": "%(ip)s",
+ "cidr": "10.0.0.0/29",
+ "cidr_v6": null,
+ "created_at": "%(timestamp)s",
+ "deleted": false,
+ "deleted_at": null,
+ "dhcp_start": "%(ip)s",
+ "dns1": null,
+ "dns2": null,
+ "gateway": "%(ip)s",
+ "gateway_v6": null,
+ "host": "nsokolov-desktop",
+ "id": "%(id)s",
+ "injected": false,
+ "label": "mynet_0",
+ "multi_host": false,
+ "netmask": "%(ip)s",
+ "netmask_v6": null,
+ "priority": null,
+ "project_id": "1234",
+ "rxtx_base": null,
+ "updated_at": "%(timestamp)s",
+ "vlan": 100,
+ "vpn_private_address": "%(ip)s",
+ "vpn_public_address": "%(ip)s",
+ "vpn_public_port": 1000
+ },
+ {
+ "bridge": "br101",
+ "bridge_interface": "eth0",
+ "broadcast": "%(ip)s",
+ "cidr": "10.0.0.10/29",
+ "cidr_v6": null,
+ "created_at": "%(timestamp)s",
+ "deleted": false,
+ "deleted_at": null,
+ "dhcp_start": "%(ip)s",
+ "dns1": null,
+ "dns2": null,
+ "gateway": "%(ip)s",
+ "gateway_v6": null,
+ "host": null,
+ "id": "%(id)s",
+ "injected": false,
+ "label": "mynet_1",
+ "multi_host": false,
+ "netmask": "%(ip)s",
+ "netmask_v6": null,
+ "priority": null,
+ "project_id": null,
+ "rxtx_base": null,
+ "updated_at": null,
+ "vlan": 101,
+ "vpn_private_address": "%(ip)s",
+ "vpn_public_address": null,
+ "vpn_public_port": 1001
+ }
+ ]
+}
diff --git a/nova/tests/integrated/api_samples/os-networks/networks-list-resp.xml.tpl b/nova/tests/integrated/api_samples/os-networks/networks-list-resp.xml.tpl
new file mode 100644
index 000000000..c01c08337
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-networks/networks-list-resp.xml.tpl
@@ -0,0 +1,63 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<networks>
+ <network>
+ <bridge>br100</bridge>
+ <vpn_public_port>1000</vpn_public_port>
+ <dhcp_start>%(ip)s</dhcp_start>
+ <bridge_interface>eth0</bridge_interface>
+ <updated_at>%(timestamp)s</updated_at>
+ <id>%(id)s</id>
+ <cidr_v6>None</cidr_v6>
+ <deleted_at>None</deleted_at>
+ <gateway>%(ip)s</gateway>
+ <rxtx_base>None</rxtx_base>
+ <label>mynet_0</label>
+ <priority>None</priority>
+ <project_id>1234</project_id>
+ <vpn_private_address>%(ip)s</vpn_private_address>
+ <deleted>False</deleted>
+ <vlan>100</vlan>
+ <broadcast>%(ip)s</broadcast>
+ <netmask>%(ip)s</netmask>
+ <injected>False</injected>
+ <cidr>10.0.0.0/29</cidr>
+ <vpn_public_address>%(ip)s</vpn_public_address>
+ <multi_host>False</multi_host>
+ <dns2>None</dns2>
+ <created_at>%(timestamp)s</created_at>
+ <host>nsokolov-desktop</host>
+ <gateway_v6>None</gateway_v6>
+ <netmask_v6>None</netmask_v6>
+ <dns1>None</dns1>
+ </network>
+ <network>
+ <bridge>br101</bridge>
+ <vpn_public_port>1001</vpn_public_port>
+ <dhcp_start>%(ip)s</dhcp_start>
+ <bridge_interface>eth0</bridge_interface>
+ <updated_at>None</updated_at>
+ <id>%(id)s</id>
+ <cidr_v6>None</cidr_v6>
+ <deleted_at>None</deleted_at>
+ <gateway>%(ip)s</gateway>
+ <rxtx_base>None</rxtx_base>
+ <label>mynet_1</label>
+ <priority>None</priority>
+ <project_id>None</project_id>
+ <vpn_private_address>%(ip)s</vpn_private_address>
+ <deleted>False</deleted>
+ <vlan>101</vlan>
+ <broadcast>%(ip)s</broadcast>
+ <netmask>%(ip)s</netmask>
+ <injected>False</injected>
+ <cidr>10.0.0.10/29</cidr>
+ <vpn_public_address>None</vpn_public_address>
+ <multi_host>False</multi_host>
+ <dns2>None</dns2>
+ <created_at>%(timestamp)s</created_at>
+ <host>None</host>
+ <gateway_v6>None</gateway_v6>
+ <netmask_v6>None</netmask_v6>
+ <dns1>None</dns1>
+ </network>
+</networks>
diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py
index f17dc025f..90e9a806e 100644
--- a/nova/tests/integrated/integrated_helpers.py
+++ b/nova/tests/integrated/integrated_helpers.py
@@ -58,7 +58,7 @@ def generate_new_element(items, prefix, numeric=False):
candidate = prefix + generate_random_numeric(8)
else:
candidate = prefix + generate_random_alphanumeric(8)
- if not candidate in items:
+ if candidate not in items:
return candidate
LOG.debug("Random collision on %s" % candidate)
diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py
index 4cadbf9e5..02fbdd48a 100644
--- a/nova/tests/integrated/test_api_samples.py
+++ b/nova/tests/integrated/test_api_samples.py
@@ -27,6 +27,7 @@ from lxml import etree
from nova.api.metadata import password
from nova.api.openstack.compute.contrib import coverage_ext
+from nova.api.openstack.compute.contrib import fping
# Import extensions to pull in osapi_compute_extension CONF option used below.
from nova.cloudpipe import pipelib
from nova import context
@@ -34,7 +35,6 @@ from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova.network import api as network_api
-from nova.network import manager as network_manager
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
@@ -42,11 +42,15 @@ from nova.openstack.common.log import logging
from nova.openstack.common import timeutils
import nova.quota
from nova.scheduler import driver
+from nova.servicegroup import api as service_group_api
from nova import test
+from nova.tests.api.openstack.compute.contrib import test_fping
+from nova.tests.api.openstack.compute.contrib import test_networks
from nova.tests.baremetal.db import base as bm_db_base
from nova.tests import fake_network
from nova.tests.image import fake
from nova.tests.integrated import integrated_helpers
+from nova import utils
CONF = cfg.CONF
CONF.import_opt('allow_resize_to_same_host', 'nova.compute.api')
@@ -292,7 +296,7 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
# shouldn't be an issue for this case.
'timestamp': '\d{4}-[0,1]\d-[0-3]\d[ ,T]'
'\d{2}:\d{2}:\d{2}'
- '(Z|(\+|-)\d{2}:\d{2}|\.\d{6})',
+ '(Z|(\+|-)\d{2}:\d{2}|\.\d{6}|)',
'password': '[0-9a-zA-Z]{1,12}',
'ip': '[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}',
'ip6': '([0-9a-zA-Z]{1,4}:){1,7}:?[0-9a-zA-Z]{1,4}',
@@ -373,16 +377,10 @@ class ApiSamplesTrap(ApiSampleTestBase):
# NOT be allowed to grow, and should shrink to zero (and be
# removed) soon.
do_not_approve_additions = []
- do_not_approve_additions.append('NMN')
do_not_approve_additions.append('os-config-drive')
do_not_approve_additions.append('os-create-server-ext')
do_not_approve_additions.append('os-flavor-access')
- do_not_approve_additions.append('os-flavor-extra-specs')
- do_not_approve_additions.append('os-floating-ip-dns')
- do_not_approve_additions.append('os-fping')
do_not_approve_additions.append('os-hypervisors')
- do_not_approve_additions.append('os-instance_usage_audit_log')
- do_not_approve_additions.append('os-networks')
do_not_approve_additions.append('os-services')
do_not_approve_additions.append('os-volumes')
@@ -1176,7 +1174,7 @@ class ExtendedServerAttributesJsonTest(ServersSampleBase):
".extended_server_attributes" + \
".Extended_server_attributes"
- def test_extended_server_attrs_get(self):
+ def test_show(self):
uuid = self._post_server()
response = self._do_get('servers/%s' % uuid)
@@ -1185,10 +1183,10 @@ class ExtendedServerAttributesJsonTest(ServersSampleBase):
subs['id'] = uuid
subs['instance_name'] = 'instance-\d{8}'
subs['hypervisor_hostname'] = r'[\w\.\-]+'
- return self._verify_response('extended-server-attrs-get',
+ return self._verify_response('server-get-resp',
subs, response)
- def test_extended_server_attrs_list(self):
+ def test_detail(self):
uuid = self._post_server()
response = self._do_get('servers/detail')
@@ -1197,7 +1195,7 @@ class ExtendedServerAttributesJsonTest(ServersSampleBase):
subs['id'] = uuid
subs['instance_name'] = 'instance-\d{8}'
subs['hypervisor_hostname'] = r'[\w\.\-]+'
- return self._verify_response('extended-server-attrs-list',
+ return self._verify_response('servers-detail-resp',
subs, response)
@@ -1512,7 +1510,7 @@ class CloudPipeSampleJsonTest(ApiSampleTestBase):
'vpn_public_port': 22}
self.stubs.Set(pipelib.CloudPipe, 'get_encoded_zip', get_user_data)
- self.stubs.Set(network_manager.NetworkManager, "get_network",
+ self.stubs.Set(network_api.API, "get",
network_api_get)
def generalize_subs(self, subs, vanilla_regexes):
@@ -2221,15 +2219,15 @@ class QuotasSampleXmlTests(QuotasSampleJsonTests):
class ExtendedStatusSampleJsonTests(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib"
- ".extended_status.Extended_status")
+ ".extended_status.Extended_status")
def test_show(self):
uuid = self._post_server()
- response = self._do_get('servers')
+ response = self._do_get('servers/%s' % uuid)
self.assertEqual(response.status, 200)
subs = self._get_regexes()
- subs['id'] = uuid
- return self._verify_response('servers-list-resp', subs, response)
+ subs['hostid'] = '[a-f0-9]+'
+ return self._verify_response('server-get-resp', subs, response)
def test_detail(self):
uuid = self._post_server()
@@ -2424,6 +2422,62 @@ class OsNetworksJsonTests(ApiSampleTestBase):
self.assertEqual(response.status, 202)
+class NetworksJsonTests(ApiSampleTestBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".os_networks.Os_networks")
+
+ def setUp(self):
+ super(NetworksJsonTests, self).setUp()
+ fake_network_api = test_networks.FakeNetworkAPI()
+ self.stubs.Set(network_api.API, "get_all",
+ fake_network_api.get_all)
+ self.stubs.Set(network_api.API, "get",
+ fake_network_api.get)
+ self.stubs.Set(network_api.API, "associate",
+ fake_network_api.associate)
+ self.stubs.Set(network_api.API, "delete",
+ fake_network_api.delete)
+ self.stubs.Set(network_api.API, "create",
+ fake_network_api.create)
+ self.stubs.Set(network_api.API, "add_network_to_project",
+ fake_network_api.add_network_to_project)
+
+ def test_network_list(self):
+ response = self._do_get('os-networks')
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ self._verify_response('networks-list-resp', subs, response)
+
+ def test_network_disassociate(self):
+ uuid = test_networks.FAKE_NETWORKS[0]['uuid']
+ response = self._do_post('os-networks/%s/action' % uuid,
+ 'networks-disassociate-req', {})
+ self.assertEqual(response.status, 202)
+
+ def test_network_show(self):
+ uuid = test_networks.FAKE_NETWORKS[0]['uuid']
+ response = self._do_get('os-networks/%s' % uuid)
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ self._verify_response('network-show-resp', subs, response)
+
+ def test_network_create(self):
+ response = self._do_post("os-networks",
+ 'network-create-req', {})
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ self._verify_response('network-create-resp', subs, response)
+
+ def test_network_add(self):
+ response = self._do_post("os-networks/add",
+ 'network-add-req', {})
+ self.assertEqual(response.status, 202)
+
+
+class NetworksXmlTests(NetworksJsonTests):
+ ctype = 'xml'
+
+
class NetworksAssociateJsonTests(ApiSampleTestBase):
extension_name = ("nova.api.openstack.compute.contrib"
".networks_associate.Networks_associate")
@@ -2687,4 +2741,285 @@ class FloatingIPPoolsSampleJsonTests(ApiSampleTestBase):
class FloatingIPPoolsSampleXmlTests(FloatingIPPoolsSampleJsonTests):
+ ctype = 'xml'
+
+
+class MultinicSampleJsonTest(ServersSampleBase):
+ extension_name = "nova.api.openstack.compute.contrib.multinic.Multinic"
+
+ def setUp(self):
+ super(MultinicSampleJsonTest, self).setUp()
+ self.uuid = self._post_server()
+
+ def test_add_fixed_ip(self):
+ subs = {"networkId": 1}
+ response = self._do_post('servers/%s/action' % (self.uuid),
+ 'multinic-add-fixed-ip-req', subs)
+ self.assertEqual(response.status, 202)
+
+ def test_remove_fixed_ip(self):
+ subs = {"ip": "10.0.0.2"}
+ response = self._do_post('servers/%s/action' % (self.uuid),
+ 'multinic-remove-fixed-ip-req', subs)
+ self.assertEqual(response.status, 202)
+
+
+class MultinicSampleXmlTest(MultinicSampleJsonTest):
ctype = "xml"
+
+
+class InstanceUsageAuditLogJsonTest(ApiSampleTestBase):
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "instance_usage_audit_log.Instance_usage_audit_log")
+
+ def test_show_instance_usage_audit_log(self):
+ response = self._do_get('os-instance_usage_audit_log/%s' %
+ urllib.quote('2012-07-05 10:00:00'))
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ return self._verify_response('inst-usage-audit-log-show-get-resp',
+ subs, response)
+
+ def test_index_instance_usage_audit_log(self):
+ response = self._do_get('os-instance_usage_audit_log')
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ return self._verify_response('inst-usage-audit-log-index-get-resp',
+ subs, response)
+
+
+class InstanceUsageAuditLogXmlTest(InstanceUsageAuditLogJsonTest):
+ ctype = "xml"
+
+
+class FlavorExtraSpecsSampleJsonTests(ApiSampleTestBase):
+ extension_name = ("nova.api.openstack.compute.contrib.flavorextraspecs."
+ "Flavorextraspecs")
+
+ def _flavor_extra_specs_create(self):
+ subs = {'value1': 'value1',
+ 'value2': 'value2'
+ }
+ response = self._do_post('flavors/1/os-extra_specs',
+ 'flavor-extra-specs-create-req', subs)
+ self.assertEqual(response.status, 200)
+ return self._verify_response('flavor-extra-specs-create-resp',
+ subs, response)
+
+ def test_flavor_extra_specs_get(self):
+ subs = {'value1': 'value1'}
+ self._flavor_extra_specs_create()
+ response = self._do_get('flavors/1/os-extra_specs/key1')
+ self.assertEqual(response.status, 200)
+ return self._verify_response('flavor-extra-specs-get-resp',
+ subs, response)
+
+ def test_flavor_extra_specs_list(self):
+ subs = {'value1': 'value1',
+ 'value2': 'value2'
+ }
+ self._flavor_extra_specs_create()
+ response = self._do_get('flavors/1/os-extra_specs')
+ self.assertEqual(response.status, 200)
+ return self._verify_response('flavor-extra-specs-list-resp',
+ subs, response)
+
+ def test_flavor_extra_specs_create(self):
+ return self._flavor_extra_specs_create()
+
+ def test_flavor_extra_specs_update(self):
+ subs = {'value1': 'new_value1'}
+ self._flavor_extra_specs_create()
+ response = self._do_put('flavors/1/os-extra_specs/key1',
+ 'flavor-extra-specs-update-req', subs)
+ self.assertEqual(response.status, 200)
+ return self._verify_response('flavor-extra-specs-update-resp',
+ subs, response)
+
+ def test_flavor_extra_specs_delete(self):
+ self._flavor_extra_specs_create()
+ response = self._do_delete('flavors/1/os-extra_specs/key1')
+ self.assertEqual(response.status, 200)
+ self.assertEqual(response.read(), '')
+
+
+class FlavorExtraSpecsSampleXmlTests(FlavorExtraSpecsSampleJsonTests):
+ ctype = 'xml'
+
+
+class FpingSampleJsonTests(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib.fping.Fping")
+
+ def setUp(self):
+ super(FpingSampleJsonTests, self).setUp()
+
+ def fake_check_fping(self):
+ pass
+ self.stubs.Set(utils, "execute", test_fping.execute)
+ self.stubs.Set(fping.FpingController, "check_fping",
+ fake_check_fping)
+
+ def test_get_fping(self):
+ self._post_server()
+ response = self._do_get('os-fping')
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ return self._verify_response('fping-get-resp', subs, response)
+
+ def test_get_fping_details(self):
+ uuid = self._post_server()
+ response = self._do_get('os-fping/%s' % (uuid))
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ return self._verify_response('fping-get-details-resp', subs, response)
+
+
+class FpingSampleXmlTests(FpingSampleJsonTests):
+ ctype = 'xml'
+
+
+class ExtendedAvailabilityZoneJsonTests(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".extended_availability_zone"
+ ".Extended_availability_zone")
+
+ def test_show(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/%s' % uuid)
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ return self._verify_response('server-get-resp', subs, response)
+
+ def test_detail(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/detail')
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ return self._verify_response('servers-detail-resp', subs, response)
+
+
+class ExtendedAvailabilityZoneXmlTests(ExtendedAvailabilityZoneJsonTests):
+ ctype = 'xml'
+
+
+class EvacuateJsonTest(ServersSampleBase):
+
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".evacuate.Evacuate")
+
+ def test_server_evacuate(self):
+ uuid = self._post_server()
+
+ req_subs = {
+ 'host': 'TargetHost',
+ "adminPass": "MySecretPass",
+ "onSharedStorage": 'False'
+ }
+
+ def fake_service_is_up(self, service):
+ """Simulate validation of instance host is down."""
+ return False
+
+ self.stubs.Set(service_group_api.API, 'service_is_up',
+ fake_service_is_up)
+
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server-evacuate-req', req_subs)
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ return self._verify_response('server-evacuate-resp', subs,
+ response)
+
+
+class EvacuateXmlTest(EvacuateJsonTest):
+ ctype = 'xml'
+
+
+class FloatingIpDNSJsonTest(ApiSampleTestBase):
+ extension_name = ("nova.api.openstack.compute.contrib.floating_ip_dns."
+ "Floating_ip_dns")
+
+ domain = 'domain1.example.org'
+ name = 'instance1'
+ scope = 'public'
+ project = 'project1'
+ dns_type = 'A'
+ ip = '192.168.1.1'
+
+ def _create_or_update(self):
+ subs = {'domain': self.domain,
+ 'project': self.project,
+ 'scope': self.scope}
+ response = self._do_put('os-floating-ip-dns/%s' % self.domain,
+ 'floating-ip-dns-create-or-update-req', subs)
+ self.assertEqual(response.status, 200)
+ self._verify_response('floating-ip-dns-create-or-update-resp', subs,
+ response)
+
+ def _create_or_update_entry(self):
+ subs = {'ip': self.ip, 'dns_type': self.dns_type}
+ response = self._do_put('os-floating-ip-dns/%s/entries/%s'
+ % (self.domain, self.name),
+ 'floating-ip-dns-create-or-update-entry-req',
+ subs)
+ self.assertEqual(response.status, 200)
+ subs.update({'name': self.name, 'domain': self.domain})
+ self._verify_response('floating-ip-dns-create-or-update-entry-resp',
+ subs, response)
+
+ def test_floating_ip_dns_list(self):
+ self._create_or_update()
+ response = self._do_get('os-floating-ip-dns')
+ self.assertEqual(response.status, 200)
+ subs = {'domain': self.domain,
+ 'project': self.project,
+ 'scope': self.scope}
+ return self._verify_response('floating-ip-dns-list-resp', subs,
+ response)
+
+ def test_floating_ip_dns_create_or_update(self):
+ self._create_or_update()
+
+ def test_floating_ip_dns_delete(self):
+ self._create_or_update()
+ response = self._do_delete('os-floating-ip-dns/%s' % self.domain)
+ self.assertEqual(response.status, 202)
+
+ def test_floating_ip_dns_create_or_update_entry(self):
+ self._create_or_update_entry()
+
+ def test_floating_ip_dns_entry_get(self):
+ self._create_or_update_entry()
+ response = self._do_get('os-floating-ip-dns/%s/entries/%s'
+ % (self.domain, self.name))
+ self.assertEqual(response.status, 200)
+ subs = {'domain': self.domain,
+ 'ip': self.ip,
+ 'name': self.name}
+ return self._verify_response('floating-ip-dns-entry-get-resp', subs,
+ response)
+
+ def test_floating_ip_dns_entry_delete(self):
+ self._create_or_update_entry()
+ response = self._do_delete('os-floating-ip-dns/%s/entries/%s'
+ % (self.domain, self.name))
+ self.assertEqual(response.status, 202)
+
+ def test_floating_ip_dns_entry_list(self):
+ self._create_or_update_entry()
+ response = self._do_get('os-floating-ip-dns/%s/entries/%s'
+ % (self.domain, self.ip))
+ self.assertEqual(response.status, 200)
+ subs = {'domain': self.domain,
+ 'ip': self.ip,
+ 'name': self.name}
+ return self._verify_response('floating-ip-dns-entry-list-resp', subs,
+ response)
+
+
+class FloatingIpDNSXmlTest(FloatingIpDNSJsonTest):
+ ctype = 'xml'
diff --git a/nova/tests/integrated/test_multiprocess_api.py b/nova/tests/integrated/test_multiprocess_api.py
index b2361b13c..ae4fcc32f 100644
--- a/nova/tests/integrated/test_multiprocess_api.py
+++ b/nova/tests/integrated/test_multiprocess_api.py
@@ -63,7 +63,7 @@ class MultiprocessWSGITest(integrated_helpers._IntegratedTestBase):
try:
traceback.print_exc()
except BaseException:
- print "Couldn't print traceback"
+ LOG.error("Couldn't print traceback")
status = 2
# Really exit
diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py
index 0756775dd..b70db93f2 100644
--- a/nova/tests/integrated/test_servers.py
+++ b/nova/tests/integrated/test_servers.py
@@ -16,6 +16,7 @@
# under the License.
import time
+import zlib
from nova.openstack.common.log import logging
from nova.tests import fake_network
@@ -437,3 +438,43 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self._delete_server(created_server_id)
for server_id in server_map.iterkeys():
self._delete_server(server_id)
+
+ def test_create_server_with_injected_files(self):
+ # Creates a server with injected_files.
+ fake_network.set_stub_network_methods(self.stubs)
+ personality = []
+
+ # Inject a text file
+ data = 'Hello, World!'
+ personality.append({
+ 'path': '/helloworld.txt',
+ 'contents': data.encode('base64'),
+ })
+
+ # Inject a binary file
+ data = zlib.compress('Hello, World!')
+ personality.append({
+ 'path': '/helloworld.zip',
+ 'contents': data.encode('base64'),
+ })
+
+ # Create server
+ server = self._build_minimal_create_server_request()
+ server['personality'] = personality
+
+ post = {'server': server}
+
+ created_server = self.api.post_server(post)
+ LOG.debug("created_server: %s" % created_server)
+ self.assertTrue(created_server['id'])
+ created_server_id = created_server['id']
+
+ # Check it's there
+ found_server = self.api.get_server(created_server_id)
+ self.assertEqual(created_server_id, found_server['id'])
+
+ found_server = self._wait_for_state_change(found_server, 'BUILD')
+ self.assertEqual('ACTIVE', found_server['status'])
+
+ # Cleanup
+ self._delete_server(created_server_id)
diff --git a/nova/tests/network/test_api.py b/nova/tests/network/test_api.py
index a0179ff94..01c727c17 100644
--- a/nova/tests/network/test_api.py
+++ b/nova/tests/network/test_api.py
@@ -26,8 +26,8 @@ from nova import context
from nova import exception
from nova import network
from nova.network import api
+from nova.network import floating_ips
from nova.network import rpcapi as network_rpcapi
-from nova.openstack.common import rpc
from nova import policy
from nova import test
@@ -75,7 +75,7 @@ class ApiTestCase(test.TestCase):
self.mox.StubOutWithMock(
self.network_api.network_rpcapi, "allocate_for_instance")
kwargs = dict(zip(['host', 'instance_id', 'instance_uuid',
- 'project_id', 'requested_networks', 'rxtx_factor', 'vpn'],
+ 'project_id', 'requested_networks', 'rxtx_factor', 'vpn', 'macs'],
itertools.repeat(mox.IgnoreArg())))
self.network_api.network_rpcapi.allocate_for_instance(
mox.IgnoreArg(), **kwargs).AndReturn([])
@@ -90,10 +90,11 @@ class ApiTestCase(test.TestCase):
new_instance = {'uuid': 'new-uuid'}
- def fake_rpc_call(context, topic, msg, timeout=None):
+ def fake_associate(*args, **kwargs):
return orig_instance_uuid
- self.stubs.Set(rpc, 'call', fake_rpc_call)
+ self.stubs.Set(floating_ips.FloatingIP, 'associate_floating_ip',
+ fake_associate)
def fake_instance_get_by_uuid(context, instance_uuid):
return {'uuid': instance_uuid}
diff --git a/nova/tests/network/test_linux_net.py b/nova/tests/network/test_linux_net.py
index 8a7865b83..bc21b80ad 100644
--- a/nova/tests/network/test_linux_net.py
+++ b/nova/tests/network/test_linux_net.py
@@ -15,6 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import calendar
import os
import mox
@@ -25,6 +26,7 @@ from nova.network import driver
from nova.network import linux_net
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
+from nova.openstack.common import timeutils
from nova import test
from nova import utils
@@ -107,6 +109,7 @@ fixed_ips = [{'id': 0,
'address': '192.168.0.100',
'instance_id': 0,
'allocated': True,
+ 'leased': True,
'virtual_interface_id': 0,
'instance_uuid': '00000000-0000-0000-0000-0000000000000000',
'floating_ips': []},
@@ -115,6 +118,7 @@ fixed_ips = [{'id': 0,
'address': '192.168.1.100',
'instance_id': 0,
'allocated': True,
+ 'leased': True,
'virtual_interface_id': 1,
'instance_uuid': '00000000-0000-0000-0000-0000000000000000',
'floating_ips': []},
@@ -123,6 +127,7 @@ fixed_ips = [{'id': 0,
'address': '192.168.0.101',
'instance_id': 1,
'allocated': True,
+ 'leased': True,
'virtual_interface_id': 2,
'instance_uuid': '00000000-0000-0000-0000-0000000000000001',
'floating_ips': []},
@@ -131,6 +136,7 @@ fixed_ips = [{'id': 0,
'address': '192.168.1.101',
'instance_id': 1,
'allocated': True,
+ 'leased': True,
'virtual_interface_id': 3,
'instance_uuid': '00000000-0000-0000-0000-0000000000000001',
'floating_ips': []},
@@ -139,6 +145,7 @@ fixed_ips = [{'id': 0,
'address': '192.168.0.102',
'instance_id': 0,
'allocated': True,
+ 'leased': False,
'virtual_interface_id': 4,
'instance_uuid': '00000000-0000-0000-0000-0000000000000000',
'floating_ips': []},
@@ -147,6 +154,7 @@ fixed_ips = [{'id': 0,
'address': '192.168.1.102',
'instance_id': 1,
'allocated': True,
+ 'leased': False,
'virtual_interface_id': 5,
'instance_uuid': '00000000-0000-0000-0000-0000000000000001',
'floating_ips': []}]
@@ -184,7 +192,7 @@ vifs = [{'id': 0,
'instance_uuid': '00000000-0000-0000-0000-0000000000000001'}]
-def get_associated(context, network_id, host=None):
+def get_associated(context, network_id, host=None, address=None):
result = []
for datum in fixed_ips:
if (datum['network_id'] == network_id and datum['allocated']
@@ -193,6 +201,8 @@ def get_associated(context, network_id, host=None):
instance = instances[datum['instance_uuid']]
if host and host != instance['host']:
continue
+ if address and address != datum['address']:
+ continue
cleaned = {}
cleaned['address'] = datum['address']
cleaned['instance_uuid'] = datum['instance_uuid']
@@ -203,6 +213,8 @@ def get_associated(context, network_id, host=None):
cleaned['instance_hostname'] = instance['hostname']
cleaned['instance_updated'] = instance['updated_at']
cleaned['instance_created'] = instance['created_at']
+ cleaned['allocated'] = datum['allocated']
+ cleaned['leased'] = datum['leased']
result.append(cleaned)
return result
@@ -299,7 +311,6 @@ class LinuxNetworkTestCase(test.TestCase):
"192.168.1.102,net:NW-5"
)
actual_hosts = self.driver.get_dhcp_hosts(self.context, networks[1])
-
self.assertEquals(actual_hosts, expected)
def test_get_dns_hosts_for_nw00(self):
@@ -333,6 +344,41 @@ class LinuxNetworkTestCase(test.TestCase):
self.assertEquals(actual_opts, expected_opts)
+ def test_get_dhcp_leases_for_nw00(self):
+ timestamp = timeutils.utcnow()
+ seconds_since_epoch = calendar.timegm(timestamp.utctimetuple())
+
+ leases = self.driver.get_dhcp_leases(self.context, networks[0])
+ leases = leases.split('\n')
+ for lease in leases:
+ lease = lease.split(' ')
+ data = get_associated(self.context, 0, address=lease[2])[0]
+ self.assertTrue(data['allocated'])
+ self.assertTrue(data['leased'])
+ self.assertTrue(lease[0] > seconds_since_epoch)
+ self.assertTrue(lease[1] == data['vif_address'])
+ self.assertTrue(lease[2] == data['address'])
+ self.assertTrue(lease[3] == data['instance_hostname'])
+ self.assertTrue(lease[4] == '*')
+
+ def test_get_dhcp_leases_for_nw01(self):
+ self.flags(host='fake_instance01')
+ timestamp = timeutils.utcnow()
+ seconds_since_epoch = calendar.timegm(timestamp.utctimetuple())
+
+ leases = self.driver.get_dhcp_leases(self.context, networks[1])
+ leases = leases.split('\n')
+ for lease in leases:
+ lease = lease.split(' ')
+ data = get_associated(self.context, 1, address=lease[2])[0]
+ self.assertTrue(data['allocated'])
+ self.assertTrue(data['leased'])
+ self.assertTrue(lease[0] > seconds_since_epoch)
+ self.assertTrue(lease[1] == data['vif_address'])
+ self.assertTrue(lease[2] == data['address'])
+ self.assertTrue(lease[3] == data['instance_hostname'])
+ self.assertTrue(lease[4] == '*')
+
def test_dhcp_opts_not_default_gateway_network(self):
expected = "NW-0,3"
data = get_associated(self.context, 0)[0]
@@ -461,14 +507,14 @@ class LinuxNetworkTestCase(test.TestCase):
'bridge_interface': iface}
driver.plug(network, 'fakemac')
expected = [
- ('ebtables', '-D', 'INPUT', '-p', 'ARP', '-i', iface,
- '--arp-ip-dst', dhcp, '-j', 'DROP'),
- ('ebtables', '-I', 'INPUT', '-p', 'ARP', '-i', iface,
- '--arp-ip-dst', dhcp, '-j', 'DROP'),
- ('ebtables', '-D', 'OUTPUT', '-p', 'ARP', '-o', iface,
- '--arp-ip-src', dhcp, '-j', 'DROP'),
- ('ebtables', '-I', 'OUTPUT', '-p', 'ARP', '-o', iface,
- '--arp-ip-src', dhcp, '-j', 'DROP'),
+ ('ebtables', '-t', 'filter', '-D', 'INPUT', '-p', 'ARP', '-i',
+ iface, '--arp-ip-dst', dhcp, '-j', 'DROP'),
+ ('ebtables', '-t', 'filter', '-I', 'INPUT', '-p', 'ARP', '-i',
+ iface, '--arp-ip-dst', dhcp, '-j', 'DROP'),
+ ('ebtables', '-t', 'filter', '-D', 'OUTPUT', '-p', 'ARP', '-o',
+ iface, '--arp-ip-src', dhcp, '-j', 'DROP'),
+ ('ebtables', '-t', 'filter', '-I', 'OUTPUT', '-p', 'ARP', '-o',
+ iface, '--arp-ip-src', dhcp, '-j', 'DROP'),
('iptables-save', '-c'),
('iptables-restore', '-c'),
('ip6tables-save', '-c'),
@@ -500,10 +546,10 @@ class LinuxNetworkTestCase(test.TestCase):
driver.unplug(network)
expected = [
- ('ebtables', '-D', 'INPUT', '-p', 'ARP', '-i', iface,
- '--arp-ip-dst', dhcp, '-j', 'DROP'),
- ('ebtables', '-D', 'OUTPUT', '-p', 'ARP', '-o', iface,
- '--arp-ip-src', dhcp, '-j', 'DROP'),
+ ('ebtables', '-t', 'filter', '-D', 'INPUT', '-p', 'ARP', '-i',
+ iface, '--arp-ip-dst', dhcp, '-j', 'DROP'),
+ ('ebtables', '-t', 'filter', '-D', 'OUTPUT', '-p', 'ARP', '-o',
+ iface, '--arp-ip-src', dhcp, '-j', 'DROP'),
('iptables-save', '-c'),
('iptables-restore', '-c'),
('ip6tables-save', '-c'),
diff --git a/nova/tests/network/test_manager.py b/nova/tests/network/test_manager.py
index 94f297fe9..3728dd2e0 100644
--- a/nova/tests/network/test_manager.py
+++ b/nova/tests/network/test_manager.py
@@ -15,7 +15,6 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-import shutil
import fixtures
import mox
@@ -25,10 +24,12 @@ from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova import ipv6
+from nova.network import floating_ips
from nova.network import linux_net
from nova.network import manager as network_manager
from nova.network import model as net_model
from nova.openstack.common import cfg
+from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
@@ -185,6 +186,8 @@ class FlatNetworkTestCase(test.TestCase):
'vif_uuid':
'00000000-0000-0000-0000-00000000000000%02d' % nid,
'ovs_interfaceid': None,
+ 'qbh_params': None,
+ 'qbg_params': None,
'should_create_vlan': False,
'should_create_bridge': False}
self.assertThat(info, matchers.DictMatches(check))
@@ -669,7 +672,7 @@ class VlanNetworkTestCase(test.TestCase):
is_admin=False)
def fake1(*args, **kwargs):
- return '10.0.0.1'
+ return {'address': '10.0.0.1', 'network': 'fakenet'}
# floating ip that's already associated
def fake2(*args, **kwargs):
@@ -725,7 +728,7 @@ class VlanNetworkTestCase(test.TestCase):
'floating_ip_fixed_ip_associate',
fake1)
self.stubs.Set(self.network.db, 'floating_ip_disassociate', fake1)
- self.stubs.Set(self.network.driver, 'bind_floating_ip', fake8)
+ self.stubs.Set(self.network.driver, 'ensure_floating_forward', fake8)
self.assertRaises(exception.NoFloatingIpInterface,
self.network._associate_floating_ip,
ctxt,
@@ -775,6 +778,42 @@ class VlanNetworkTestCase(test.TestCase):
mox.IgnoreArg())
self.assertTrue(self.local)
+ def test_add_floating_ip_nat_before_bind(self):
+ # Tried to verify order with documented mox record/verify
+ # functionality, but it doesn't seem to work since I can't make it
+ # fail. I'm using stubs and a flag for now, but if this mox feature
+ # can be made to work, it would be a better way to test this.
+ #
+ # self.mox.StubOutWithMock(self.network.driver,
+ # 'ensure_floating_forward')
+ # self.mox.StubOutWithMock(self.network.driver, 'bind_floating_ip')
+ #
+ # self.network.driver.ensure_floating_forward(mox.IgnoreArg(),
+ # mox.IgnoreArg(),
+ # mox.IgnoreArg(),
+ # mox.IgnoreArg())
+ # self.network.driver.bind_floating_ip(mox.IgnoreArg(),
+ # mox.IgnoreArg())
+ # self.mox.ReplayAll()
+
+ nat_called = [False]
+
+ def fake_nat(*args, **kwargs):
+ nat_called[0] = True
+
+ def fake_bind(*args, **kwargs):
+ self.assertTrue(nat_called[0])
+
+ self.stubs.Set(self.network.driver,
+ 'ensure_floating_forward',
+ fake_nat)
+ self.stubs.Set(self.network.driver, 'bind_floating_ip', fake_bind)
+
+ self.network.l3driver.add_floating_ip('fakefloat',
+ 'fakefixed',
+ 'fakeiface',
+ 'fakenet')
+
def test_floating_ip_init_host(self):
def get_all_by_host(_context, _host):
@@ -789,9 +828,9 @@ class VlanNetworkTestCase(test.TestCase):
self.stubs.Set(self.network.db, 'floating_ip_get_all_by_host',
get_all_by_host)
- def fixed_ip_get(_context, fixed_ip_id):
+ def fixed_ip_get(_context, fixed_ip_id, get_network):
if fixed_ip_id == 1:
- return {'address': 'fakefixed'}
+ return {'address': 'fakefixed', 'network': 'fakenet'}
raise exception.FixedIpNotFound(id=fixed_ip_id)
self.stubs.Set(self.network.db, 'fixed_ip_get', fixed_ip_get)
@@ -799,7 +838,8 @@ class VlanNetworkTestCase(test.TestCase):
self.flags(public_interface=False)
self.network.l3driver.add_floating_ip('fakefloat',
'fakefixed',
- 'fakeiface')
+ 'fakeiface',
+ 'fakenet')
self.mox.ReplayAll()
self.network.init_host_floating_ips()
self.mox.UnsetStubs()
@@ -809,7 +849,8 @@ class VlanNetworkTestCase(test.TestCase):
self.flags(public_interface='fooiface')
self.network.l3driver.add_floating_ip('fakefloat',
'fakefixed',
- 'fooiface')
+ 'fooiface',
+ 'fakenet')
self.mox.ReplayAll()
self.network.init_host_floating_ips()
self.mox.UnsetStubs()
@@ -1576,15 +1617,14 @@ class BackdoorPortTestCase(test.TestCase):
self.assertEqual(port, self.manager.backdoor_port)
-class TestFloatingIPManager(network_manager.FloatingIP,
+class TestFloatingIPManager(floating_ips.FloatingIP,
network_manager.NetworkManager):
"""Dummy manager that implements FloatingIP."""
class AllocateTestCase(test.TestCase):
- def test_allocate_for_instance(self):
- address = "10.10.10.10"
- self.flags(auto_assign_floating_ip=True)
+ def setUp(self):
+ super(AllocateTestCase, self).setUp()
self.conductor = self.start_service(
'conductor', manager=CONF.conductor.manager)
self.compute = self.start_service('compute')
@@ -1596,6 +1636,10 @@ class AllocateTestCase(test.TestCase):
self.project_id,
is_admin=True)
+ def test_allocate_for_instance(self):
+ address = "10.10.10.10"
+ self.flags(auto_assign_floating_ip=True)
+
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
@@ -1610,7 +1654,7 @@ class AllocateTestCase(test.TestCase):
nw_info = self.network.allocate_for_instance(self.context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
- project_id=project_id)
+ project_id=project_id, macs=None)
self.assertEquals(1, len(nw_info))
fixed_ip = nw_info.fixed_ips()[0]['address']
self.assertTrue(utils.is_valid_ipv4(fixed_ip))
@@ -1620,6 +1664,44 @@ class AllocateTestCase(test.TestCase):
host=self.network.host,
project_id=project_id)
+ def test_allocate_for_instance_with_mac(self):
+ available_macs = set(['ca:fe:de:ad:be:ef'])
+ inst = db.instance_create(self.context, {'host': self.compute.host,
+ 'display_name': HOST,
+ 'instance_type_id': 1})
+ networks = db.network_get_all(self.context)
+ for network in networks:
+ db.network_update(self.context, network['id'],
+ {'host': self.network.host})
+ project_id = self.context.project_id
+ nw_info = self.network.allocate_for_instance(self.context,
+ instance_id=inst['id'], instance_uuid=inst['uuid'],
+ host=inst['host'], vpn=None, rxtx_factor=3,
+ project_id=project_id, macs=available_macs)
+ assigned_macs = [vif['address'] for vif in nw_info]
+ self.assertEquals(1, len(assigned_macs))
+ self.assertEquals(available_macs.pop(), assigned_macs[0])
+ self.network.deallocate_for_instance(self.context,
+ instance_id=inst['id'],
+ host=self.network.host,
+ project_id=project_id)
+
+ def test_allocate_for_instance_not_enough_macs(self):
+ available_macs = set()
+ inst = db.instance_create(self.context, {'host': self.compute.host,
+ 'display_name': HOST,
+ 'instance_type_id': 1})
+ networks = db.network_get_all(self.context)
+ for network in networks:
+ db.network_update(self.context, network['id'],
+ {'host': self.network.host})
+ project_id = self.context.project_id
+ self.assertRaises(exception.VirtualInterfaceCreateException,
+ self.network.allocate_for_instance, self.context,
+ instance_id=inst['id'], instance_uuid=inst['uuid'],
+ host=inst['host'], vpn=None, rxtx_factor=3,
+ project_id=project_id, macs=available_macs)
+
class FloatingIPTestCase(test.TestCase):
"""Tests nova.network.manager.FloatingIP."""
@@ -1666,8 +1748,8 @@ class FloatingIPTestCase(test.TestCase):
'fixed_ip_get',
lambda _x, _y: fixed_ip)
- self.stubs.Set(self.network,
- '_get_network_by_id',
+ self.stubs.Set(self.network.db,
+ 'network_get',
lambda _x, _y: network)
self.stubs.Set(self.network.db,
@@ -1724,8 +1806,8 @@ class FloatingIPTestCase(test.TestCase):
'fixed_ip_get_by_address',
lambda _x, _y: fixed_ip)
- self.stubs.Set(self.network,
- '_get_network_by_id',
+ self.stubs.Set(self.network.db,
+ 'network_get',
lambda _x, _y: network)
self.stubs.Set(self.network.db,
@@ -1804,11 +1886,13 @@ class FloatingIPTestCase(test.TestCase):
def fake_is_stale_floating_ip_address(context, floating_ip):
return floating_ip['address'] == '172.24.4.23'
- def fake_fixed_ip_get(context, fixed_ip_id):
+ def fake_fixed_ip_get(context, fixed_ip_id, get_network):
return {'instance_uuid': 'fake_uuid',
- 'address': '10.0.0.2'}
+ 'address': '10.0.0.2',
+ 'network': 'fakenet'}
- def fake_remove_floating_ip(floating_addr, fixed_addr, interface):
+ def fake_remove_floating_ip(floating_addr, fixed_addr, interface,
+ network):
called['count'] += 1
def fake_floating_ip_update(context, address, args):
@@ -1845,11 +1929,13 @@ class FloatingIPTestCase(test.TestCase):
def fake_is_stale_floating_ip_address(context, floating_ip):
return floating_ip['address'] == '172.24.4.23'
- def fake_fixed_ip_get(context, fixed_ip_id):
+ def fake_fixed_ip_get(context, fixed_ip_id, get_network):
return {'instance_uuid': 'fake_uuid',
- 'address': '10.0.0.2'}
+ 'address': '10.0.0.2',
+ 'network': 'fakenet'}
- def fake_add_floating_ip(floating_addr, fixed_addr, interface):
+ def fake_add_floating_ip(floating_addr, fixed_addr, interface,
+ network):
called['count'] += 1
def fake_floating_ip_update(context, address, args):
@@ -2040,12 +2126,12 @@ class FloatingIPTestCase(test.TestCase):
# address column, so fake the collision-avoidance here
def fake_vif_save(vif):
if vif.address == crash_test_dummy_vif['address']:
- raise exception.DBError("If you're smart, you'll retry!")
+ raise db_session.DBError("If you're smart, you'll retry!")
self.stubs.Set(models.VirtualInterface, 'save', fake_vif_save)
# Attempt to add another and make sure that both MACs are consumed
# by the retry loop
- self.network.add_virtual_interface(ctxt, 'fake_uuid', 'fake_net')
+ self.network._add_virtual_interface(ctxt, 'fake_uuid', 'fake_net')
self.assertEqual(macs, [])
def test_deallocate_client_exceptions(self):
diff --git a/nova/tests/network/test_quantumv2.py b/nova/tests/network/test_quantumv2.py
index c9b2e43b3..1805044a1 100644
--- a/nova/tests/network/test_quantumv2.py
+++ b/nova/tests/network/test_quantumv2.py
@@ -96,7 +96,8 @@ class TestQuantumClient(test.TestCase):
client.Client.__init__(
endpoint_url=CONF.quantum_url,
token=my_context.auth_token,
- timeout=CONF.quantum_url_timeout).AndReturn(None)
+ timeout=CONF.quantum_url_timeout,
+ insecure=False).AndReturn(None)
self.mox.ReplayAll()
quantumv2.get_client(my_context)
@@ -117,7 +118,8 @@ class TestQuantumClient(test.TestCase):
client.Client.__init__(
endpoint_url=CONF.quantum_url,
auth_strategy=None,
- timeout=CONF.quantum_url_timeout).AndReturn(None)
+ timeout=CONF.quantum_url_timeout,
+ insecure=False).AndReturn(None)
self.mox.ReplayAll()
quantumv2.get_client(my_context)
@@ -424,7 +426,8 @@ class TestQuantumv2(test.TestCase):
return api
api.get_instance_nw_info(mox.IgnoreArg(),
self.instance,
- networks=nets).AndReturn(None)
+ networks=nets,
+ conductor_api=mox.IgnoreArg()).AndReturn(None)
self.mox.ReplayAll()
return api
diff --git a/nova/tests/network/test_rpcapi.py b/nova/tests/network/test_rpcapi.py
index 5ba7459fb..f3a032dfe 100644
--- a/nova/tests/network/test_rpcapi.py
+++ b/nova/tests/network/test_rpcapi.py
@@ -28,6 +28,10 @@ CONF = cfg.CONF
class NetworkRpcAPITestCase(test.TestCase):
+ def setUp(self):
+ super(NetworkRpcAPITestCase, self).setUp()
+ self.flags(multi_host=True)
+
def _test_network_api(self, method, rpc_method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
rpcapi = network_rpcapi.NetworkAPI()
@@ -45,13 +49,18 @@ class NetworkRpcAPITestCase(test.TestCase):
'_rpc_allocate_fixed_ip', 'deallocate_fixed_ip', 'update_dns',
'_associate_floating_ip', '_disassociate_floating_ip',
'lease_fixed_ip', 'release_fixed_ip', 'migrate_instance_start',
- 'migrate_instance_finish', 'get_backdoor_port'
+ 'migrate_instance_finish', 'get_backdoor_port',
+ 'allocate_for_instance', 'deallocate_for_instance',
]
if method in targeted_methods and 'host' in kwargs:
- if method != 'deallocate_fixed_ip':
+ if method not in ['allocate_for_instance',
+ 'deallocate_for_instance',
+ 'deallocate_fixed_ip']:
del expected_msg['args']['host']
host = kwargs['host']
- expected_topic = rpc.queue_get_for(ctxt, CONF.network_topic, host)
+ if CONF.multi_host:
+ expected_topic = rpc.queue_get_for(ctxt, CONF.network_topic,
+ host)
expected_msg['version'] = expected_version
self.fake_args = None
@@ -148,6 +157,16 @@ class NetworkRpcAPITestCase(test.TestCase):
self._test_network_api('deallocate_floating_ip', rpc_method='call',
address='addr', affect_auto_assigned=True)
+ def test_allocate_floating_ip_no_multi(self):
+ self.flags(multi_host=False)
+ self._test_network_api('allocate_floating_ip', rpc_method='call',
+ project_id='fake_id', pool='fake_pool', auto_assigned=False)
+
+ def test_deallocate_floating_ip_no_multi(self):
+ self.flags(multi_host=False)
+ self._test_network_api('deallocate_floating_ip', rpc_method='call',
+ address='addr', affect_auto_assigned=True)
+
def test_associate_floating_ip(self):
self._test_network_api('associate_floating_ip', rpc_method='call',
floating_address='blah', fixed_address='foo',
@@ -161,7 +180,8 @@ class NetworkRpcAPITestCase(test.TestCase):
self._test_network_api('allocate_for_instance', rpc_method='call',
instance_id='fake_id', instance_uuid='fake_uuid',
project_id='fake_id', host='fake_host',
- rxtx_factor='fake_factor', vpn=False, requested_networks={})
+ rxtx_factor='fake_factor', vpn=False, requested_networks={},
+ macs=set(), version="1.8")
def test_deallocate_for_instance(self):
self._test_network_api('deallocate_for_instance', rpc_method='call',
diff --git a/nova/tests/scheduler/test_chance_scheduler.py b/nova/tests/scheduler/test_chance_scheduler.py
index 76fba900d..dcbe86f75 100644
--- a/nova/tests/scheduler/test_chance_scheduler.py
+++ b/nova/tests/scheduler/test_chance_scheduler.py
@@ -25,6 +25,7 @@ import mox
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import utils as compute_utils
from nova.compute import vm_states
+from nova.conductor import api as conductor_api
from nova import context
from nova import db
from nova import exception
@@ -134,7 +135,8 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
{'vm_state': vm_states.ERROR,
'task_state': None}).AndReturn(({}, {}))
compute_utils.add_instance_fault_from_exc(ctxt,
- new_ref, mox.IsA(exception.NoValidHost), mox.IgnoreArg())
+ mox.IsA(conductor_api.LocalAPI), new_ref,
+ mox.IsA(exception.NoValidHost), mox.IgnoreArg())
self.mox.ReplayAll()
self.driver.schedule_run_instance(
diff --git a/nova/tests/scheduler/test_filter_scheduler.py b/nova/tests/scheduler/test_filter_scheduler.py
index 2bd2cb85b..ffc228786 100644
--- a/nova/tests/scheduler/test_filter_scheduler.py
+++ b/nova/tests/scheduler/test_filter_scheduler.py
@@ -19,15 +19,19 @@ Tests For Filter Scheduler.
import mox
from nova.compute import instance_types
+from nova.compute import rpcapi as compute_rpcapi
from nova.compute import utils as compute_utils
from nova.compute import vm_states
+from nova.conductor import api as conductor_api
from nova import context
from nova import db
from nova import exception
+from nova.openstack.common import rpc
from nova.scheduler import driver
from nova.scheduler import filter_scheduler
from nova.scheduler import host_manager
from nova.scheduler import weights
+from nova import servicegroup
from nova.tests.scheduler import fakes
from nova.tests.scheduler import test_scheduler
@@ -36,6 +40,16 @@ def fake_get_filtered_hosts(hosts, filter_properties):
return list(hosts)
+def fake_get_group_filtered_hosts(hosts, filter_properties):
+ group_hosts = filter_properties.get('group_hosts') or []
+ if group_hosts:
+ hosts = list(hosts)
+ hosts.pop(0)
+ return hosts
+ else:
+ return list(hosts)
+
+
class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
"""Test case for Filter Scheduler."""
@@ -62,7 +76,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
uuid, {'vm_state': vm_states.ERROR, 'task_state':
None}).AndReturn(({}, {}))
compute_utils.add_instance_fault_from_exc(fake_context,
- new_ref, mox.IsA(exception.NoValidHost), mox.IgnoreArg())
+ mox.IsA(conductor_api.LocalAPI), new_ref,
+ mox.IsA(exception.NoValidHost), mox.IgnoreArg())
self.mox.ReplayAll()
sched.schedule_run_instance(
fake_context, request_spec, None, None, None, None, {})
@@ -92,7 +107,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
uuid, {'vm_state': vm_states.ERROR, 'task_state':
None}).AndReturn(({}, {}))
compute_utils.add_instance_fault_from_exc(fake_context,
- new_ref, mox.IsA(exception.NoValidHost), mox.IgnoreArg())
+ mox.IsA(conductor_api.LocalAPI), new_ref,
+ mox.IsA(exception.NoValidHost), mox.IgnoreArg())
self.mox.ReplayAll()
sched.schedule_run_instance(
fake_context, request_spec, None, None, None, None, {})
@@ -339,3 +355,204 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertEqual([['host', 'node']],
filter_properties['retry']['hosts'])
+
+ def test_live_migration_dest_check_service_memory_overcommit(self):
+ # Live-migration should work since default is to overcommit memory.
+ self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
+ self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
+ self.mox.StubOutWithMock(self.driver, '_get_compute_info')
+ self.mox.StubOutWithMock(self.driver, '_live_migration_common_check')
+ self.mox.StubOutWithMock(rpc, 'call')
+ self.mox.StubOutWithMock(self.driver.compute_rpcapi, 'live_migration')
+
+ dest = 'fake_host2'
+ block_migration = False
+ disk_over_commit = False
+ instance = self._live_migration_instance()
+
+ self.driver._live_migration_src_check(self.context, instance)
+ db.service_get_by_compute_host(self.context,
+ dest).AndReturn('fake_service3')
+ self.servicegroup_api.service_is_up('fake_service3').AndReturn(True)
+
+ self.driver._get_compute_info(self.context, dest).AndReturn(
+ {'memory_mb': 2048,
+ 'free_disk_gb': 512,
+ 'local_gb_used': 512,
+ 'free_ram_mb': 512,
+ 'local_gb': 1024,
+ 'vcpus': 4,
+ 'vcpus_used': 2,
+ 'updated_at': None})
+
+ self.driver._live_migration_common_check(self.context, instance, dest)
+
+ rpc.call(self.context, "compute.fake_host2",
+ {"method": 'check_can_live_migrate_destination',
+ "args": {'instance': instance,
+ 'block_migration': block_migration,
+ 'disk_over_commit': disk_over_commit},
+ "version": compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION},
+ None).AndReturn({})
+
+ self.driver.compute_rpcapi.live_migration(self.context,
+ host=instance['host'], instance=instance, dest=dest,
+ block_migration=block_migration, migrate_data={})
+
+ self.mox.ReplayAll()
+ result = self.driver.schedule_live_migration(self.context,
+ instance=instance, dest=dest,
+ block_migration=block_migration,
+ disk_over_commit=disk_over_commit)
+ self.assertEqual(result, None)
+
+ def test_live_migration_assert_memory_no_overcommit(self):
+ # Test that memory check passes with no memory overcommit.
+ def fake_get(context, host):
+ return {'memory_mb': 2048,
+ 'free_disk_gb': 512,
+ 'local_gb_used': 512,
+ 'free_ram_mb': 1024,
+ 'local_gb': 1024,
+ 'vcpus': 4,
+ 'vcpus_used': 2,
+ 'updated_at': None}
+
+ self.stubs.Set(self.driver, '_get_compute_info', fake_get)
+
+ self.flags(ram_allocation_ratio=1.0)
+ instance = self._live_migration_instance()
+ dest = 'fake_host2'
+ result = self.driver._assert_compute_node_has_enough_memory(
+ self.context, instance, dest)
+ self.assertEqual(result, None)
+
+ def test_live_migration_assert_memory_no_overcommit_lack_memory(self):
+ # Test that memory check fails with no memory overcommit.
+ def fake_get(context, host):
+ return {'memory_mb': 2048,
+ 'free_disk_gb': 512,
+ 'local_gb_used': 512,
+ 'free_ram_mb': 1023,
+ 'local_gb': 1024,
+ 'vcpus': 4,
+ 'vcpus_used': 2,
+ 'updated_at': None}
+
+ self.stubs.Set(self.driver, '_get_compute_info', fake_get)
+
+ self.flags(ram_allocation_ratio=1.0)
+ instance = self._live_migration_instance()
+ dest = 'fake_host2'
+ self.assertRaises(exception.MigrationError,
+ self.driver._assert_compute_node_has_enough_memory,
+ context, instance, dest)
+
+ def test_live_migration_assert_memory_overcommit(self):
+ # Test that memory check passes with memory overcommit.
+ def fake_get(context, host):
+ return {'memory_mb': 2048,
+ 'free_disk_gb': 512,
+ 'local_gb_used': 512,
+ 'free_ram_mb': -1024,
+ 'local_gb': 1024,
+ 'vcpus': 4,
+ 'vcpus_used': 2,
+ 'updated_at': None}
+
+ self.stubs.Set(self.driver, '_get_compute_info', fake_get)
+
+ self.flags(ram_allocation_ratio=2.0)
+ instance = self._live_migration_instance()
+ dest = 'fake_host2'
+ result = self.driver._assert_compute_node_has_enough_memory(
+ self.context, instance, dest)
+ self.assertEqual(result, None)
+
+ def test_live_migration_assert_memory_overcommit_lack_memory(self):
+ # Test that memory check fails with memory overcommit.
+ def fake_get(context, host):
+ return {'memory_mb': 2048,
+ 'free_disk_gb': 512,
+ 'local_gb_used': 512,
+ 'free_ram_mb': -1025,
+ 'local_gb': 1024,
+ 'vcpus': 4,
+ 'vcpus_used': 2,
+ 'updated_at': None}
+
+ self.stubs.Set(self.driver, '_get_compute_info', fake_get)
+
+ self.flags(ram_allocation_ratio=2.0)
+ instance = self._live_migration_instance()
+ dest = 'fake_host2'
+ self.assertRaises(exception.MigrationError,
+ self.driver._assert_compute_node_has_enough_memory,
+ self.context, instance, dest)
+
+ def test_basic_schedule_run_instances_anti_affinity(self):
+ filter_properties = {'scheduler_hints':
+ {'group': 'cats'}}
+ # Request spec 1
+ instance_opts1 = {'project_id': 1, 'os_type': 'Linux',
+ 'memory_mb': 512, 'root_gb': 512,
+ 'ephemeral_gb': 0, 'vcpus': 1,
+ 'system_metadata': {'system': 'metadata'}}
+ request_spec1 = {'instance_uuids': ['fake-uuid1-1', 'fake-uuid1-2'],
+ 'instance_properties': instance_opts1,
+ 'instance_type': {'memory_mb': 512, 'root_gb': 512,
+ 'ephemeral_gb': 0, 'vcpus': 1}}
+ self.next_weight = 1.0
+
+ def _fake_weigh_objects(_self, functions, hosts, options):
+ self.next_weight += 2.0
+ host_state = hosts[0]
+ return [weights.WeighedHost(host_state, self.next_weight)]
+
+ sched = fakes.FakeFilterScheduler()
+
+ fake_context = context.RequestContext('user', 'project',
+ is_admin=True)
+
+ self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
+ fake_get_group_filtered_hosts)
+ self.stubs.Set(weights.HostWeightHandler,
+ 'get_weighed_objects', _fake_weigh_objects)
+ fakes.mox_host_manager_db_calls(self.mox, fake_context)
+
+ self.mox.StubOutWithMock(driver, 'instance_update_db')
+ self.mox.StubOutWithMock(compute_rpcapi.ComputeAPI, 'run_instance')
+ self.mox.StubOutWithMock(sched, 'group_hosts')
+
+ instance1_1 = {'uuid': 'fake-uuid1-1'}
+ instance1_2 = {'uuid': 'fake-uuid1-2'}
+
+ sched.group_hosts(mox.IgnoreArg(), 'cats').AndReturn([])
+
+ def inc_launch_index1(*args, **kwargs):
+ request_spec1['instance_properties']['launch_index'] = (
+ request_spec1['instance_properties']['launch_index'] + 1)
+
+ expected_metadata = {'system_metadata':
+ {'system': 'metadata', 'group': 'cats'}}
+ driver.instance_update_db(fake_context, instance1_1['uuid'],
+ extra_values=expected_metadata).WithSideEffects(
+ inc_launch_index1).AndReturn(instance1_1)
+ compute_rpcapi.ComputeAPI.run_instance(fake_context, host='host3',
+ instance=instance1_1, requested_networks=None,
+ injected_files=None, admin_password=None, is_first_time=None,
+ request_spec=request_spec1, filter_properties=mox.IgnoreArg(),
+ node='node3')
+
+ driver.instance_update_db(fake_context, instance1_2['uuid'],
+ extra_values=expected_metadata).WithSideEffects(
+ inc_launch_index1).AndReturn(instance1_2)
+ compute_rpcapi.ComputeAPI.run_instance(fake_context, host='host4',
+ instance=instance1_2, requested_networks=None,
+ injected_files=None, admin_password=None, is_first_time=None,
+ request_spec=request_spec1, filter_properties=mox.IgnoreArg(),
+ node='node4')
+ self.mox.ReplayAll()
+ sched.schedule_run_instance(fake_context, request_spec1,
+ None, None, None, None, filter_properties)
diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py
index 6fcd19d92..edd2e0d61 100644
--- a/nova/tests/scheduler/test_host_filters.py
+++ b/nova/tests/scheduler/test_host_filters.py
@@ -337,6 +337,20 @@ class HostFiltersTestCase(test.TestCase):
self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ def test_affinity_different_filter_handles_deleted_instance(self):
+ filt_cls = self.class_map['DifferentHostFilter']()
+ host = fakes.FakeHostState('host1', 'node1', {})
+ instance = fakes.FakeInstance(context=self.context,
+ params={'host': 'host1'})
+ instance_uuid = instance.uuid
+ db.instance_destroy(self.context, instance_uuid)
+
+ filter_properties = {'context': self.context.elevated(),
+ 'scheduler_hints': {
+ 'different_host': [instance_uuid], }}
+
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
def test_affinity_same_filter_no_list_passes(self):
filt_cls = self.class_map['SameHostFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
@@ -388,6 +402,20 @@ class HostFiltersTestCase(test.TestCase):
self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ def test_affinity_same_filter_handles_deleted_instance(self):
+ filt_cls = self.class_map['SameHostFilter']()
+ host = fakes.FakeHostState('host1', 'node1', {})
+ instance = fakes.FakeInstance(context=self.context,
+ params={'host': 'host1'})
+ instance_uuid = instance.uuid
+ db.instance_destroy(self.context, instance_uuid)
+
+ filter_properties = {'context': self.context.elevated(),
+ 'scheduler_hints': {
+ 'same_host': [instance_uuid], }}
+
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
def test_affinity_simple_cidr_filter_passes(self):
filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
@@ -1372,3 +1400,17 @@ class HostFiltersTestCase(test.TestCase):
{'num_instances': 5})
filter_properties = {}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ def test_group_anti_affinity_filter_passes(self):
+ filt_cls = self.class_map['GroupAntiAffinityFilter']()
+ host = fakes.FakeHostState('host1', 'node1', {})
+ filter_properties = {'group_hosts': []}
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ filter_properties = {'group_hosts': ['host2']}
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+ def test_group_anti_affinity_filter_fails(self):
+ filt_cls = self.class_map['GroupAntiAffinityFilter']()
+ host = fakes.FakeHostState('host1', 'node1', {})
+ filter_properties = {'group_hosts': ['host1']}
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/scheduler/test_host_manager.py b/nova/tests/scheduler/test_host_manager.py
index ae7774bac..91e719ab6 100644
--- a/nova/tests/scheduler/test_host_manager.py
+++ b/nova/tests/scheduler/test_host_manager.py
@@ -82,11 +82,12 @@ class HostManagerTestCase(test.TestCase):
self.host_manager._choose_host_filters(specified_filters).AndReturn(
[FakeFilterClass1])
- def _verify_result(self, info, result):
+ def _verify_result(self, info, result, filters=True):
for x in info['got_fprops']:
self.assertEqual(x, info['expected_fprops'])
- self.assertEqual(set(info['expected_objs']), set(info['got_objs']))
- self.assertEqual(set(result), set(info['got_objs']))
+ if filters:
+ self.assertEqual(set(info['expected_objs']), set(info['got_objs']))
+ self.assertEqual(set(info['expected_objs']), set(result))
def test_get_filtered_hosts(self):
fake_properties = {'moo': 1, 'cow': 2}
@@ -143,7 +144,7 @@ class HostManagerTestCase(test.TestCase):
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
- self._verify_result(info, result)
+ self._verify_result(info, result, False)
def test_get_filtered_hosts_with_no_matching_force_hosts(self):
fake_properties = {'force_hosts': ['fake_host5', 'fake_host6']}
@@ -156,7 +157,7 @@ class HostManagerTestCase(test.TestCase):
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
- self._verify_result(info, result)
+ self._verify_result(info, result, False)
def test_get_filtered_hosts_with_ignore_and_force(self):
# Ensure ignore_hosts processed before force_hosts in host filters.
@@ -172,7 +173,7 @@ class HostManagerTestCase(test.TestCase):
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
- self._verify_result(info, result)
+ self._verify_result(info, result, False)
def test_update_service_capabilities(self):
service_states = self.host_manager.service_states
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index eb4c3864f..44e1f3537 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -26,15 +26,18 @@ from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import utils as compute_utils
from nova.compute import vm_states
+from nova.conductor import api as conductor_api
from nova import context
from nova import db
from nova import exception
from nova.openstack.common import jsonutils
+from nova.openstack.common.notifier import api as notifier
from nova.openstack.common import rpc
from nova.scheduler import driver
from nova.scheduler import manager
from nova import servicegroup
from nova import test
+from nova.tests import fake_instance_actions
from nova.tests import matchers
from nova.tests.scheduler import fakes
@@ -55,6 +58,7 @@ class SchedulerManagerTestCase(test.TestCase):
self.topic = 'fake_topic'
self.fake_args = (1, 2, 3)
self.fake_kwargs = {'cat': 'meow', 'dog': 'woof'}
+ fake_instance_actions.stub_out_action_events(self.stubs)
def test_1_correct_init(self):
# Correct scheduler driver
@@ -177,8 +181,8 @@ class SchedulerManagerTestCase(test.TestCase):
self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
- request_spec = {'instance_properties':
- {'uuid': fake_instance_uuid}}
+ request_spec = {'instance_properties': inst,
+ 'instance_uuids': [fake_instance_uuid]}
self.manager.driver.schedule_run_instance(self.context,
request_spec, None, None, None, None, {}).AndRaise(
@@ -187,7 +191,8 @@ class SchedulerManagerTestCase(test.TestCase):
fake_instance_uuid,
{"vm_state": vm_states.ERROR,
"task_state": None}).AndReturn((inst, inst))
- compute_utils.add_instance_fault_from_exc(self.context, new_ref,
+ compute_utils.add_instance_fault_from_exc(self.context,
+ mox.IsA(conductor_api.LocalAPI), new_ref,
mox.IsA(exception.NoValidHost), mox.IgnoreArg())
self.mox.ReplayAll()
@@ -196,6 +201,7 @@ class SchedulerManagerTestCase(test.TestCase):
def test_prep_resize_no_valid_host_back_in_active_state(self):
fake_instance_uuid = 'fake-instance-id'
+ fake_instance = {'uuid': fake_instance_uuid}
inst = {"vm_state": "", "task_state": ""}
self._mox_schedule_method_helper('schedule_prep_resize')
@@ -211,7 +217,7 @@ class SchedulerManagerTestCase(test.TestCase):
'image': 'fake_image',
'request_spec': request_spec,
'filter_properties': 'fake_props',
- 'instance': 'fake_instance',
+ 'instance': fake_instance,
'instance_type': 'fake_type',
'reservations': list('fake_res'),
}
@@ -221,7 +227,8 @@ class SchedulerManagerTestCase(test.TestCase):
fake_instance_uuid,
{"vm_state": vm_states.ACTIVE, "task_state": None}).AndReturn(
(inst, inst))
- compute_utils.add_instance_fault_from_exc(self.context, new_ref,
+ compute_utils.add_instance_fault_from_exc(self.context,
+ mox.IsA(conductor_api.LocalAPI), new_ref,
mox.IsA(exception.NoValidHost), mox.IgnoreArg())
self.mox.ReplayAll()
@@ -229,6 +236,7 @@ class SchedulerManagerTestCase(test.TestCase):
def test_prep_resize_exception_host_in_error_state_and_raise(self):
fake_instance_uuid = 'fake-instance-id'
+ fake_instance = {'uuid': fake_instance_uuid}
self._mox_schedule_method_helper('schedule_prep_resize')
@@ -242,7 +250,7 @@ class SchedulerManagerTestCase(test.TestCase):
'image': 'fake_image',
'request_spec': request_spec,
'filter_properties': 'fake_props',
- 'instance': 'fake_instance',
+ 'instance': fake_instance,
'instance_type': 'fake_type',
'reservations': list('fake_res'),
}
@@ -258,7 +266,8 @@ class SchedulerManagerTestCase(test.TestCase):
fake_instance_uuid,
{"vm_state": vm_states.ERROR,
"task_state": None}).AndReturn((inst, inst))
- compute_utils.add_instance_fault_from_exc(self.context, new_ref,
+ compute_utils.add_instance_fault_from_exc(self.context,
+ mox.IsA(conductor_api.LocalAPI), new_ref,
mox.IsA(test.TestingException), mox.IgnoreArg())
self.mox.ReplayAll()
@@ -266,6 +275,25 @@ class SchedulerManagerTestCase(test.TestCase):
self.assertRaises(test.TestingException, self.manager.prep_resize,
**kwargs)
+ def test_set_vm_state_and_notify_adds_instance_fault(self):
+ request = {'instance_properties': {'uuid': 'fake-uuid'}}
+ updates = {'vm_state': 'foo'}
+ fake_inst = {'uuid': 'fake-uuid'}
+
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ self.mox.StubOutWithMock(db, 'instance_fault_create')
+ self.mox.StubOutWithMock(notifier, 'notify')
+ db.instance_update_and_get_original(self.context, 'fake-uuid',
+ updates).AndReturn((None,
+ fake_inst))
+ db.instance_fault_create(self.context, mox.IgnoreArg())
+ notifier.notify(self.context, mox.IgnoreArg(), 'scheduler.foo',
+ notifier.ERROR, mox.IgnoreArg())
+ self.mox.ReplayAll()
+
+ self.manager._set_vm_state_and_notify('foo', {'vm_state': 'foo'},
+ self.context, None, request)
+
class SchedulerTestCase(test.TestCase):
"""Test case for base scheduler driver class."""
@@ -322,7 +350,8 @@ class SchedulerTestCase(test.TestCase):
'root_gb': 1024,
'ephemeral_gb': 0,
'vm_state': '',
- 'task_state': ''}
+ 'task_state': '',
+ 'instance_type': {'memory_mb': 1024}}
def test_live_migration_basic(self):
# Test basic schedule_live_migration functionality.
@@ -361,9 +390,7 @@ class SchedulerTestCase(test.TestCase):
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
- self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(rpc, 'call')
- self.mox.StubOutWithMock(rpc, 'cast')
self.mox.StubOutWithMock(self.driver.compute_rpcapi,
'live_migration')
@@ -384,9 +411,14 @@ class SchedulerTestCase(test.TestCase):
# assert_compute_node_has_enough_memory()
db.service_get_by_compute_host(self.context, dest).AndReturn(
{'compute_node': [{'memory_mb': 2048,
+ 'free_disk_gb': 512,
+ 'local_gb_used': 512,
+ 'free_ram_mb': 1280,
+ 'local_gb': 1024,
+ 'vcpus': 4,
+ 'vcpus_used': 2,
+ 'updated_at': None,
'hypervisor_version': 1}]})
- db.instance_get_all_by_host(self.context, dest).AndReturn(
- [dict(memory_mb=256), dict(memory_mb=512)])
# Common checks (same hypervisor, etc)
db.service_get_by_compute_host(self.context, dest).AndReturn(
@@ -529,11 +561,14 @@ class SchedulerTestCase(test.TestCase):
def test_live_migration_dest_check_service_lack_memory(self):
# Confirms exception raises when dest doesn't have enough memory.
+ # Flag needed to make FilterScheduler test hit memory limit since the
+ # default for it is to allow memory overcommit by a factor of 1.5.
+ self.flags(ram_allocation_ratio=1.0)
+
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
self.mox.StubOutWithMock(self.driver, '_get_compute_info')
- self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
dest = 'fake_host2'
block_migration = False
@@ -546,9 +581,14 @@ class SchedulerTestCase(test.TestCase):
self.servicegroup_api.service_is_up('fake_service3').AndReturn(True)
self.driver._get_compute_info(self.context, dest).AndReturn(
- {'memory_mb': 2048})
- db.instance_get_all_by_host(self.context, dest).AndReturn(
- [dict(memory_mb=1024), dict(memory_mb=512)])
+ {'memory_mb': 2048,
+ 'free_disk_gb': 512,
+ 'local_gb_used': 512,
+ 'free_ram_mb': 512,
+ 'local_gb': 1024,
+ 'vcpus': 4,
+ 'vcpus_used': 2,
+ 'updated_at': None})
self.mox.ReplayAll()
self.assertRaises(exception.MigrationError,
@@ -620,6 +660,24 @@ class SchedulerTestCase(test.TestCase):
block_migration=block_migration,
disk_over_commit=disk_over_commit)
+ def test_handle_schedule_error_adds_instance_fault(self):
+ instance = {'uuid': 'fake-uuid'}
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ self.mox.StubOutWithMock(db, 'instance_fault_create')
+ self.mox.StubOutWithMock(notifier, 'notify')
+ db.instance_update_and_get_original(self.context, instance['uuid'],
+ mox.IgnoreArg()).AndReturn(
+ (None, instance))
+ db.instance_fault_create(self.context, mox.IgnoreArg())
+ notifier.notify(self.context, mox.IgnoreArg(),
+ 'scheduler.run_instance',
+ notifier.ERROR, mox.IgnoreArg())
+ self.mox.ReplayAll()
+
+ driver.handle_schedule_error(self.context,
+ exception.NoValidHost('test'),
+ instance['uuid'], {})
+
class SchedulerDriverBaseTestCase(SchedulerTestCase):
"""Test cases for base scheduler driver class methods
diff --git a/nova/tests/servicegroup/test_zk_driver.py b/nova/tests/servicegroup/test_zk_driver.py
new file mode 100644
index 000000000..753153bb5
--- /dev/null
+++ b/nova/tests/servicegroup/test_zk_driver.py
@@ -0,0 +1,65 @@
+# Copyright (c) AT&T 2012-2013 Yun Mao <yunmao@gmail.com>
+# Copyright (c) IBM 2012 Alexey Roytman <roytman at il dot ibm dot com>.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Test the ZooKeeper driver for servicegroup.
+
+You need to install ZooKeeper locally and related dependencies
+to run the test. It's unclear how to install python-zookeeper lib
+in venv so you might have to run the test without it.
+
+To set up in Ubuntu 12.04:
+$ sudo apt-get install zookeeper zookeeperd python-zookeeper
+$ sudo pip install evzookeeper
+$ nosetests nova.tests.servicegroup.test_zk_driver
+"""
+
+import eventlet
+
+from nova import servicegroup
+from nova import test
+
+
+class ZKServiceGroupTestCase(test.TestCase):
+
+ def setUp(self):
+ super(ZKServiceGroupTestCase, self).setUp()
+ servicegroup.API._driver = None
+ try:
+ from nova.servicegroup.drivers import zk
+ _unused = zk
+ except ImportError:
+ self.skipTest("Unable to test due to lack of ZooKeeper")
+ self.flags(servicegroup_driver='zk')
+ self.flags(address='localhost:2181', group="zk")
+
+ def test_join_leave(self):
+ self.servicegroup_api = servicegroup.API()
+ service_id = {'topic': 'unittest', 'host': 'serviceA'}
+ self.servicegroup_api.join(service_id['host'], service_id['topic'])
+ self.assertTrue(self.servicegroup_api.service_is_up(service_id))
+ self.servicegroup_api.leave(service_id['host'], service_id['topic'])
+ # make sure zookeeper is updated and watcher is triggered
+ eventlet.sleep(1)
+ self.assertFalse(self.servicegroup_api.service_is_up(service_id))
+
+ def test_stop(self):
+ self.servicegroup_api = servicegroup.API()
+ service_id = {'topic': 'unittest', 'host': 'serviceA'}
+ pulse = self.servicegroup_api.join(service_id['host'],
+ service_id['topic'], None)
+ self.assertTrue(self.servicegroup_api.service_is_up(service_id))
+ pulse.stop()
+ eventlet.sleep(1)
+ self.assertFalse(self.servicegroup_api.service_is_up(service_id))
diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py
index 11c16d6dd..949f54512 100644
--- a/nova/tests/test_api.py
+++ b/nova/tests/test_api.py
@@ -22,12 +22,13 @@ import random
import StringIO
import boto
+import boto.connection
from boto.ec2 import regioninfo
from boto import exception as boto_exc
# newer versions of boto use their own wrapper on top of httplib.HTTPResponse
-try:
- import boto.connection as httplib
-except ImportError:
+if hasattr(boto.connection, 'HTTPResponse'):
+ httplib = boto.connection
+else:
import httplib
import fixtures
import webob
diff --git a/nova/tests/test_availability_zones.py b/nova/tests/test_availability_zones.py
index 2c5c06921..4192fa08f 100644
--- a/nova/tests/test_availability_zones.py
+++ b/nova/tests/test_availability_zones.py
@@ -23,7 +23,6 @@ from nova import availability_zones as az
from nova import context
from nova import db
from nova.openstack.common import cfg
-from nova import service
from nova import test
CONF = cfg.CONF
diff --git a/nova/tests/test_bdm.py b/nova/tests/test_bdm.py
index 4d62d6bbf..43ca4d7b0 100644
--- a/nova/tests/test_bdm.py
+++ b/nova/tests/test_bdm.py
@@ -246,6 +246,5 @@ class BlockDeviceMappingEc2CloudTestCase(test.TestCase):
result = {}
cloud._format_mappings(properties, result)
- print result
self.assertEqual(result['blockDeviceMapping'].sort(),
expected_result['blockDeviceMapping'].sort())
diff --git a/nova/tests/test_block_device.py b/nova/tests/test_block_device.py
index 6a77d98ae..8189057cb 100644
--- a/nova/tests/test_block_device.py
+++ b/nova/tests/test_block_device.py
@@ -91,3 +91,38 @@ class BlockDeviceTestCase(test.TestCase):
self.assertEqual(block_device.strip_prefix('a'), 'a')
self.assertEqual(block_device.strip_prefix('xvda'), 'a')
self.assertEqual(block_device.strip_prefix('vda'), 'a')
+
+ def test_volume_in_mapping(self):
+ swap = {'device_name': '/dev/sdb',
+ 'swap_size': 1}
+ ephemerals = [{'num': 0,
+ 'virtual_name': 'ephemeral0',
+ 'device_name': '/dev/sdc1',
+ 'size': 1},
+ {'num': 2,
+ 'virtual_name': 'ephemeral2',
+ 'device_name': '/dev/sdd',
+ 'size': 1}]
+ block_device_mapping = [{'mount_device': '/dev/sde',
+ 'device_path': 'fake_device'},
+ {'mount_device': '/dev/sdf',
+ 'device_path': 'fake_device'}]
+ block_device_info = {
+ 'root_device_name': '/dev/sda',
+ 'swap': swap,
+ 'ephemerals': ephemerals,
+ 'block_device_mapping': block_device_mapping}
+
+ def _assert_volume_in_mapping(device_name, true_or_false):
+ in_mapping = block_device.volume_in_mapping(
+ device_name, block_device_info)
+ self.assertEquals(in_mapping, true_or_false)
+
+ _assert_volume_in_mapping('sda', False)
+ _assert_volume_in_mapping('sdb', True)
+ _assert_volume_in_mapping('sdc1', True)
+ _assert_volume_in_mapping('sdd', True)
+ _assert_volume_in_mapping('sde', True)
+ _assert_volume_in_mapping('sdf', True)
+ _assert_volume_in_mapping('sdg', False)
+ _assert_volume_in_mapping('sdh1', False)
diff --git a/nova/tests/test_context.py b/nova/tests/test_context.py
index 0915bf157..527534fd5 100644
--- a/nova/tests/test_context.py
+++ b/nova/tests/test_context.py
@@ -74,3 +74,22 @@ class ContextTestCase(test.TestCase):
self.assertTrue(c)
self.assertIn("'extra_arg1': 'meow'", info['log_msg'])
self.assertIn("'extra_arg2': 'wuff'", info['log_msg'])
+
+ def test_service_catalog_default(self):
+ ctxt = context.RequestContext('111', '222')
+ self.assertEquals(ctxt.service_catalog, [])
+
+ def test_service_catalog_cinder_only(self):
+ service_catalog = [
+ {u'type': u'compute', u'name': u'nova'},
+ {u'type': u's3', u'name': u's3'},
+ {u'type': u'image', u'name': u'glance'},
+ {u'type': u'volume', u'name': u'cinder'},
+ {u'type': u'ec2', u'name': u'ec2'},
+ {u'type': u'object-store', u'name': u'swift'},
+ {u'type': u'identity', u'name': u'keystone'}]
+
+ volume_catalog = [{u'type': u'volume', u'name': u'cinder'}]
+ ctxt = context.RequestContext('111', '222',
+ service_catalog=service_catalog)
+ self.assertEquals(ctxt.service_catalog, volume_catalog)
diff --git a/nova/tests/test_crypto.py b/nova/tests/test_crypto.py
index 25df336fb..cec3ca9c1 100644
--- a/nova/tests/test_crypto.py
+++ b/nova/tests/test_crypto.py
@@ -212,3 +212,31 @@ e6fCXWECgYEAqgpGvva5kJ1ISgNwnJbwiNw0sOT9BMOsdNZBElf0kJIIy6FMPvap
def test_ssh_encrypt_failure(self):
self.assertRaises(exception.EncryptionFailure,
crypto.ssh_encrypt_text, '', self.text)
+
+
+class ConversionTests(test.TestCase):
+ k1 = ("ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA4CqmrxfU7x4sJrubpMNxeglul+d"
+ "ByrsicnvQcHDEjPzdvoz+BaoAG9bjCA5mCeTBIISsVTVXz/hxNeiuBV6LH/UR/c"
+ "27yl53ypN+821ImoexQZcKItdnjJ3gVZlDob1f9+1qDVy63NJ1c+TstkrCTRVeo"
+ "9VyE7RpdSS4UCiBe8Xwk3RkedioFxePrI0Ktc2uASw2G0G2Rl7RN7KZOJbCivfF"
+ "LQMAOu6e+7fYvuE1gxGHHj7dxaBY/ioGOm1W4JmQ1V7AKt19zTBlZKduN8FQMSF"
+ "r35CDlvoWs0+OP8nwlebKNCi/5sdL8qiSLrAcPB4LqdkAf/blNSVA2Yl83/c4lQ"
+ "== test@test")
+
+ k2 = ("-----BEGIN PUBLIC KEY-----\n"
+ "MIIBIDANBgkqhkiG9w0BAQEFAAOCAQ0AMIIBCAKCAQEA4CqmrxfU7x4sJrubpMNx\n"
+ "eglul+dByrsicnvQcHDEjPzdvoz+BaoAG9bjCA5mCeTBIISsVTVXz/hxNeiuBV6L\n"
+ "H/UR/c27yl53ypN+821ImoexQZcKItdnjJ3gVZlDob1f9+1qDVy63NJ1c+TstkrC\n"
+ "TRVeo9VyE7RpdSS4UCiBe8Xwk3RkedioFxePrI0Ktc2uASw2G0G2Rl7RN7KZOJbC\n"
+ "ivfFLQMAOu6e+7fYvuE1gxGHHj7dxaBY/ioGOm1W4JmQ1V7AKt19zTBlZKduN8FQ\n"
+ "MSFr35CDlvoWs0+OP8nwlebKNCi/5sdL8qiSLrAcPB4LqdkAf/blNSVA2Yl83/c4\n"
+ "lQIBIw==\n"
+ "-----END PUBLIC KEY-----\n")
+
+ def test_convert_keys(self):
+ result = crypto.convert_from_sshrsa_to_pkcs8(self.k1)
+ self.assertEqual(result, self.k2)
+
+ def test_convert_failure(self):
+ self.assertRaises(exception.EncryptionFailure,
+ crypto.convert_from_sshrsa_to_pkcs8, '')
diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py
index 40552d1c0..c6bf2941e 100644
--- a/nova/tests/test_db_api.py
+++ b/nova/tests/test_db_api.py
@@ -29,6 +29,7 @@ from nova.openstack.common import cfg
from nova.openstack.common import timeutils
from nova import test
from nova.tests import matchers
+from nova import utils
CONF = cfg.CONF
@@ -253,11 +254,11 @@ class DbApiTestCase(test.TestCase):
values = {'address': 'fixed'}
fixed = db.fixed_ip_create(ctxt, values)
res = db.floating_ip_fixed_ip_associate(ctxt, floating, fixed, 'foo')
- self.assertEqual(res, fixed)
+ self.assertEqual(res['address'], fixed)
res = db.floating_ip_fixed_ip_associate(ctxt, floating, fixed, 'foo')
self.assertEqual(res, None)
res = db.floating_ip_disassociate(ctxt, floating)
- self.assertEqual(res, fixed)
+ self.assertEqual(res['address'], fixed)
res = db.floating_ip_disassociate(ctxt, floating)
self.assertEqual(res, None)
@@ -467,6 +468,34 @@ class DbApiTestCase(test.TestCase):
self.assertEquals(spec, old_ref['extra_specs'])
self.assertEquals(spec, new_ref['extra_specs'])
+ def _test_instance_update_updates_metadata(self, metadata_type):
+ ctxt = context.get_admin_context()
+
+ instance = db.instance_create(ctxt, {})
+
+ def set_and_check(meta):
+ inst = db.instance_update(ctxt, instance['uuid'],
+ {metadata_type: dict(meta)})
+ _meta = utils.metadata_to_dict(inst[metadata_type])
+ self.assertEqual(meta, _meta)
+
+ meta = {'speed': '88', 'units': 'MPH'}
+ set_and_check(meta)
+
+ meta['gigawatts'] = '1.21'
+ set_and_check(meta)
+
+ del meta['gigawatts']
+ set_and_check(meta)
+
+ def test_instance_update_updates_system_metadata(self):
+ # Ensure that system_metadata is updated during instance_update
+ self._test_instance_update_updates_metadata('system_metadata')
+
+ def test_instance_update_updates_metadata(self):
+ # Ensure that metadata is updated during instance_update
+ self._test_instance_update_updates_metadata('metadata')
+
def test_instance_fault_create(self):
# Ensure we can create an instance fault.
ctxt = context.get_admin_context()
@@ -721,6 +750,34 @@ class DbApiTestCase(test.TestCase):
self.assertEqual(start_time, events[0]['start_time'])
self.assertEqual(finish_time, events[0]['finish_time'])
+ def test_instance_action_and_event_start_string_time(self):
+ """Create an instance action and event with a string start_time."""
+ ctxt = context.get_admin_context()
+ uuid = str(stdlib_uuid.uuid4())
+
+ start_time = timeutils.utcnow()
+ start_time_str = timeutils.strtime(start_time)
+ action_values = {'action': 'run_instance',
+ 'instance_uuid': uuid,
+ 'request_id': ctxt.request_id,
+ 'user_id': ctxt.user_id,
+ 'project_id': ctxt.project_id,
+ 'start_time': start_time_str}
+ action = db.action_start(ctxt, action_values)
+
+ event_values = {'event': 'schedule',
+ 'instance_uuid': uuid,
+ 'request_id': ctxt.request_id,
+ 'start_time': start_time_str}
+ db.action_event_start(ctxt, event_values)
+
+ # Retrieve the event to ensure it was successfully added
+ events = db.action_events_get(ctxt, action['id'])
+ self.assertEqual(1, len(events))
+ self.assertEqual('schedule', events[0]['event'])
+ # db api still returns models with datetime, not str, values
+ self.assertEqual(start_time, events[0]['start_time'])
+
def test_instance_action_event_get_by_id(self):
"""Get a specific instance action event."""
ctxt1 = context.get_admin_context()
@@ -1181,7 +1238,7 @@ class AggregateDBApiTestCase(test.TestCase):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata={'availability_zone':
'fake_avail_zone'})
- self.assertEqual(result.availability_zone, 'fake_avail_zone')
+ self.assertEqual(result['availability_zone'], 'fake_avail_zone')
new_values = _get_fake_aggr_values()
new_values['availability_zone'] = 'different_avail_zone'
updated = db.aggregate_update(ctxt, 1, new_values)
@@ -1199,8 +1256,8 @@ class AggregateDBApiTestCase(test.TestCase):
updated = db.aggregate_get(ctxt, result['id'])
self.assertThat(values['metadata'],
matchers.DictMatches(expected))
- self.assertNotEqual(result.availability_zone,
- updated.availability_zone)
+ self.assertNotEqual(result['availability_zone'],
+ updated['availability_zone'])
def test_aggregate_update_with_existing_metadata(self):
ctxt = context.get_admin_context()
@@ -1278,10 +1335,10 @@ class AggregateDBApiTestCase(test.TestCase):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata={'availability_zone':
'fake_avail_zone'})
- db.aggregate_metadata_delete(ctxt, result.id, 'availability_zone')
- expected = db.aggregate_metadata_get(ctxt, result.id)
- aggregate = db.aggregate_get(ctxt, result.id)
- self.assertEquals(aggregate.availability_zone, None)
+ db.aggregate_metadata_delete(ctxt, result['id'], 'availability_zone')
+ expected = db.aggregate_metadata_get(ctxt, result['id'])
+ aggregate = db.aggregate_get(ctxt, result['id'])
+ self.assertEquals(aggregate['availability_zone'], None)
self.assertThat({}, matchers.DictMatches(expected))
def test_aggregate_metadata_delete_raise_not_found(self):
@@ -1510,6 +1567,25 @@ class MigrationTestCase(test.TestCase):
self.assertEqual(migration['instance_uuid'], instance['uuid'])
+class TestFixedIPGetByNetworkHost(test.TestCase):
+ def test_not_found_exception(self):
+ ctxt = context.get_admin_context()
+
+ self.assertRaises(
+ exception.FixedIpNotFoundForNetworkHost,
+ db.fixed_ip_get_by_network_host,
+ ctxt, 1, 'ignore')
+
+ def test_fixed_ip_found(self):
+ ctxt = context.get_admin_context()
+ db.fixed_ip_create(ctxt, dict(network_id=1, host='host'))
+
+ fip = db.fixed_ip_get_by_network_host(ctxt, 1, 'host')
+
+ self.assertEquals(1, fip['network_id'])
+ self.assertEquals('host', fip['host'])
+
+
class TestIpAllocation(test.TestCase):
def setUp(self):
diff --git a/nova/tests/test_hypervapi.py b/nova/tests/test_hypervapi.py
index 9fec9d151..0c2f90a4d 100644
--- a/nova/tests/test_hypervapi.py
+++ b/nova/tests/test_hypervapi.py
@@ -18,37 +18,53 @@
Test suite for the Hyper-V driver and related APIs.
"""
-import json
+import io
+import mox
import os
import platform
import shutil
-import sys
+import time
import uuid
+from nova.api.metadata import base as instance_metadata
from nova.compute import power_state
from nova.compute import task_states
from nova import context
from nova import db
from nova.image import glance
from nova.openstack.common import cfg
+from nova import test
from nova.tests import fake_network
-from nova.tests.hyperv import basetestcase
from nova.tests.hyperv import db_fakes
-from nova.tests.hyperv import hypervutils
-from nova.tests.hyperv import mockproxy
-import nova.tests.image.fake as fake_image
+from nova.tests.hyperv import fake
+from nova.tests.image import fake as fake_image
from nova.tests import matchers
+from nova import utils
+from nova.virt import configdrive
+from nova.virt.hyperv import basevolumeutils
from nova.virt.hyperv import constants
from nova.virt.hyperv import driver as driver_hyperv
+from nova.virt.hyperv import hostutils
+from nova.virt.hyperv import livemigrationutils
+from nova.virt.hyperv import networkutils
+from nova.virt.hyperv import pathutils
+from nova.virt.hyperv import vhdutils
from nova.virt.hyperv import vmutils
+from nova.virt.hyperv import volumeutils
+from nova.virt.hyperv import volumeutilsv2
from nova.virt import images
CONF = cfg.CONF
+CONF.import_opt('vswitch_name', 'nova.virt.hyperv.vif')
-class HyperVAPITestCase(basetestcase.BaseTestCase):
+class HyperVAPITestCase(test.TestCase):
"""Unit tests for Hyper-V driver calls."""
+ def __init__(self, test_case_name):
+ self._mox = mox.Mox()
+ super(HyperVAPITestCase, self).__init__(test_case_name)
+
def setUp(self):
super(HyperVAPITestCase, self).setUp()
@@ -56,22 +72,22 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
self._project_id = 'fake'
self._instance_data = None
self._image_metadata = None
- self._dest_server = None
self._fetched_image = None
self._update_image_raise_exception = False
- self._post_method_called = False
- self._recover_method_called = False
self._volume_target_portal = 'testtargetportal:3260'
- self._volume_id = '8957e088-dbee-4216-8056-978353a3e737'
+ self._volume_id = '0ef5d708-45ab-4129-8c59-d774d2837eb7'
self._context = context.RequestContext(self._user_id, self._project_id)
+ self._instance_ide_disks = []
+ self._instance_ide_dvds = []
+ self._instance_volume_disks = []
self._setup_stubs()
self.flags(instances_path=r'C:\Hyper-V\test\instances',
vswitch_name='external',
- network_api_class='nova.network.quantumv2.api.API')
+ network_api_class='nova.network.quantumv2.api.API',
+ force_volumeutils_v1=True)
- self._hypervutils = hypervutils.HyperVUtils()
self._conn = driver_hyperv.HyperVDriver(None)
def _setup_stubs(self):
@@ -79,14 +95,8 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
fake_image.stub_out_image_service(self.stubs)
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
- def fake_dumps(msg, default=None, **kwargs):
- return '""'
- self.stubs.Set(json, 'dumps', fake_dumps)
-
def fake_fetch(context, image_id, target, user, project):
self._fetched_image = target
- if not os.path.exists(target):
- self._hypervutils.create_vhd(target)
self.stubs.Set(images, 'fetch', fake_fetch)
def fake_get_remote_image_service(context, name):
@@ -98,104 +108,198 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
self._image_metadata = image_metadata
return (FakeGlanceImageService(), 1)
self.stubs.Set(glance, 'get_remote_image_service',
- fake_get_remote_image_service)
-
- # Modules to mock
- modules_to_mock = [
- 'wmi',
- 'os',
- 'shutil',
- 'uuid',
- 'time',
- 'multiprocessing',
- '_winreg',
- 'nova.virt.configdrive',
- 'nova.utils',
- 'ctypes'
- ]
+ fake_get_remote_image_service)
+
+ def fake_sleep(ms):
+ pass
+ self.stubs.Set(time, 'sleep', fake_sleep)
+
+ self.stubs.Set(pathutils, 'PathUtils', fake.PathUtils)
+ self._mox.StubOutWithMock(fake.PathUtils, 'open')
+
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'vm_exists')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'create_vm')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'destroy_vm')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'attach_ide_drive')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'create_scsi_controller')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'create_nic')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'set_vm_state')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'list_instances')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_summary_info')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'take_vm_snapshot')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'remove_vm_snapshot')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'set_nic_connection')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_iscsi_controller')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_ide_controller')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'get_attached_disks_count')
+ self._mox.StubOutWithMock(vmutils.VMUtils,
+ 'attach_volume_to_controller')
+ self._mox.StubOutWithMock(vmutils.VMUtils,
+ 'get_mounted_disk_by_drive_number')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'detach_vm_disk')
+
+ self._mox.StubOutWithMock(vhdutils.VHDUtils, 'create_differencing_vhd')
+ self._mox.StubOutWithMock(vhdutils.VHDUtils, 'reconnect_parent_vhd')
+ self._mox.StubOutWithMock(vhdutils.VHDUtils, 'merge_vhd')
+ self._mox.StubOutWithMock(vhdutils.VHDUtils, 'get_vhd_parent_path')
+
+ self._mox.StubOutWithMock(hostutils.HostUtils, 'get_cpus_info')
+ self._mox.StubOutWithMock(hostutils.HostUtils,
+ 'is_cpu_feature_present')
+ self._mox.StubOutWithMock(hostutils.HostUtils, 'get_memory_info')
+ self._mox.StubOutWithMock(hostutils.HostUtils, 'get_volume_info')
+ self._mox.StubOutWithMock(hostutils.HostUtils, 'get_windows_version')
+
+ self._mox.StubOutWithMock(networkutils.NetworkUtils,
+ 'get_external_vswitch')
+ self._mox.StubOutWithMock(networkutils.NetworkUtils,
+ 'create_vswitch_port')
+
+ self._mox.StubOutWithMock(livemigrationutils.LiveMigrationUtils,
+ 'live_migrate_vm')
+ self._mox.StubOutWithMock(livemigrationutils.LiveMigrationUtils,
+ 'check_live_migration_config')
+
+ self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
+ 'volume_in_mapping')
+ self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
+ 'get_session_id_from_mounted_disk')
+ self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
+ 'get_device_number_for_target')
+
+ self._mox.StubOutWithMock(volumeutils.VolumeUtils,
+ 'login_storage_target')
+ self._mox.StubOutWithMock(volumeutils.VolumeUtils,
+ 'logout_storage_target')
+ self._mox.StubOutWithMock(volumeutils.VolumeUtils,
+ 'execute_log_out')
+
+ self._mox.StubOutWithMock(volumeutilsv2.VolumeUtilsV2,
+ 'login_storage_target')
+ self._mox.StubOutWithMock(volumeutilsv2.VolumeUtilsV2,
+ 'logout_storage_target')
+ self._mox.StubOutWithMock(volumeutilsv2.VolumeUtilsV2,
+ 'execute_log_out')
+
+ self._mox.StubOutWithMock(shutil, 'copyfile')
+ self._mox.StubOutWithMock(shutil, 'rmtree')
+
+ self._mox.StubOutWithMock(os, 'remove')
+
+ self._mox.StubOutClassWithMocks(instance_metadata, 'InstanceMetadata')
+ self._mox.StubOutWithMock(instance_metadata.InstanceMetadata,
+ 'metadata_for_config_drive')
+
+ # Can't use StubOutClassWithMocks due to __exit__ and __enter__
+ self._mox.StubOutWithMock(configdrive, 'ConfigDriveBuilder')
+ self._mox.StubOutWithMock(configdrive.ConfigDriveBuilder, 'make_drive')
+
+ self._mox.StubOutWithMock(utils, 'execute')
- # Modules in which the mocks are going to be injected
- from nova.virt.hyperv import baseops
- from nova.virt.hyperv import basevolumeutils
- from nova.virt.hyperv import hostops
- from nova.virt.hyperv import livemigrationops
- from nova.virt.hyperv import snapshotops
- from nova.virt.hyperv import vif
- from nova.virt.hyperv import vmops
- from nova.virt.hyperv import volumeops
- from nova.virt.hyperv import volumeutils
- from nova.virt.hyperv import volumeutilsV2
-
- modules_to_test = [
- driver_hyperv,
- basevolumeutils,
- baseops,
- hostops,
- vif,
- vmops,
- vmutils,
- volumeops,
- volumeutils,
- volumeutilsV2,
- snapshotops,
- livemigrationops,
- hypervutils,
- db_fakes,
- sys.modules[__name__]
- ]
+ def tearDown(self):
+ self._mox.UnsetStubs()
+ super(HyperVAPITestCase, self).tearDown()
- self._inject_mocks_in_modules(modules_to_mock, modules_to_test)
+ def test_get_available_resource(self):
+ cpu_info = {'Architecture': 'fake',
+ 'Name': 'fake',
+ 'Manufacturer': 'ACME, Inc.',
+ 'NumberOfCores': 2,
+ 'NumberOfLogicalProcessors': 4}
- if isinstance(snapshotops.wmi, mockproxy.Mock):
- from nova.virt.hyperv import ioutils
- import StringIO
+ tot_mem_kb = 2000000L
+ free_mem_kb = 1000000L
- def fake_open(name, mode):
- return StringIO.StringIO("fake file content")
- self.stubs.Set(ioutils, 'open', fake_open)
+ tot_hdd_b = 4L * 1024 ** 3
+ free_hdd_b = 3L * 1024 ** 3
- def tearDown(self):
- try:
- if self._instance_data and self._hypervutils.vm_exists(
- self._instance_data["name"]):
- self._hypervutils.remove_vm(self._instance_data["name"])
+ windows_version = '6.2.9200'
- if self._dest_server and \
- self._hypervutils.remote_vm_exists(self._dest_server,
- self._instance_data["name"]):
- self._hypervutils.remove_remote_vm(self._dest_server,
- self._instance_data["name"])
+ hostutils.HostUtils.get_memory_info().AndReturn((tot_mem_kb,
+ free_mem_kb))
- self._hypervutils.logout_iscsi_volume_sessions(self._volume_id)
+ m = hostutils.HostUtils.get_volume_info(mox.IsA(str))
+ m.AndReturn((tot_hdd_b, free_hdd_b))
- shutil.rmtree(CONF.instances_path, True)
+ hostutils.HostUtils.get_cpus_info().AndReturn([cpu_info])
+ m = hostutils.HostUtils.is_cpu_feature_present(mox.IsA(int))
+ m.MultipleTimes()
- fake_image.FakeImageService_reset()
- finally:
- super(HyperVAPITestCase, self).tearDown()
+ m = hostutils.HostUtils.get_windows_version()
+ m.AndReturn(windows_version)
- def test_get_available_resource(self):
+ self._mox.ReplayAll()
dic = self._conn.get_available_resource(None)
+ self._mox.VerifyAll()
+ self.assertEquals(dic['vcpus'], cpu_info['NumberOfLogicalProcessors'])
self.assertEquals(dic['hypervisor_hostname'], platform.node())
+ self.assertEquals(dic['memory_mb'], tot_mem_kb / 1024)
+ self.assertEquals(dic['memory_mb_used'],
+ tot_mem_kb / 1024 - free_mem_kb / 1024)
+ self.assertEquals(dic['local_gb'], tot_hdd_b / 1024 ** 3)
+ self.assertEquals(dic['local_gb_used'],
+ tot_hdd_b / 1024 ** 3 - free_hdd_b / 1024 ** 3)
+ self.assertEquals(dic['hypervisor_version'],
+ windows_version.replace('.', ''))
def test_get_host_stats(self):
+ tot_mem_kb = 2000000L
+ free_mem_kb = 1000000L
+
+ tot_hdd_b = 4L * 1024 ** 3
+ free_hdd_b = 3L * 1024 ** 3
+
+ hostutils.HostUtils.get_memory_info().AndReturn((tot_mem_kb,
+ free_mem_kb))
+
+ m = hostutils.HostUtils.get_volume_info(mox.IsA(str))
+ m.AndReturn((tot_hdd_b, free_hdd_b))
+
+ self._mox.ReplayAll()
dic = self._conn.get_host_stats(True)
+ self._mox.VerifyAll()
+
+ self.assertEquals(dic['disk_total'], tot_hdd_b / 1024 ** 3)
+ self.assertEquals(dic['disk_available'], free_hdd_b / 1024 ** 3)
+
+ self.assertEquals(dic['host_memory_total'], tot_mem_kb / 1024)
+ self.assertEquals(dic['host_memory_free'], free_mem_kb / 1024)
self.assertEquals(dic['disk_total'],
- dic['disk_used'] + dic['disk_available'])
+ dic['disk_used'] + dic['disk_available'])
self.assertEquals(dic['host_memory_total'],
- dic['host_memory_overhead'] + dic['host_memory_free'])
+ dic['host_memory_overhead'] +
+ dic['host_memory_free'])
def test_list_instances(self):
- num_vms = self._hypervutils.get_vm_count()
+ fake_instances = ['fake1', 'fake2']
+ vmutils.VMUtils.list_instances().AndReturn(fake_instances)
+
+ self._mox.ReplayAll()
instances = self._conn.list_instances()
+ self._mox.VerifyAll()
- self.assertEquals(len(instances), num_vms)
+ self.assertEquals(instances, fake_instances)
def test_get_info(self):
- self._spawn_instance(True)
+ self._instance_data = self._get_instance_data()
+
+ summary_info = {'NumberOfProcessors': 2,
+ 'EnabledState': constants.HYPERV_VM_STATE_ENABLED,
+ 'MemoryUsage': 1000,
+ 'UpTime': 1}
+
+ m = vmutils.VMUtils.vm_exists(mox.Func(self._check_instance_name))
+ m.AndReturn(True)
+
+ func = mox.Func(self._check_instance_name)
+ m = vmutils.VMUtils.get_vm_summary_info(func)
+ m.AndReturn(summary_info)
+
+ self._mox.ReplayAll()
info = self._conn.get_info(self._instance_data)
+ self._mox.VerifyAll()
self.assertEquals(info["state"], power_state.RUNNING)
@@ -205,189 +309,238 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
def test_spawn_no_cow_image(self):
self._test_spawn_instance(False)
- def test_spawn_config_drive(self):
- self.skip('broken by move to contextlib for configdrive')
-
+ def _setup_spawn_config_drive_mocks(self, use_cdrom):
+ im = instance_metadata.InstanceMetadata(mox.IgnoreArg(),
+ content=mox.IsA(list),
+ extra_md=mox.IsA(dict))
+
+ cdb = self._mox.CreateMockAnything()
+ m = configdrive.ConfigDriveBuilder(instance_md=mox.IgnoreArg())
+ m.AndReturn(cdb)
+ # __enter__ and __exit__ are required by "with"
+ cdb.__enter__().AndReturn(cdb)
+ cdb.make_drive(mox.IsA(str))
+ cdb.__exit__(None, None, None).AndReturn(None)
+
+ if not use_cdrom:
+ utils.execute(CONF.qemu_img_cmd,
+ 'convert',
+ '-f',
+ 'raw',
+ '-O',
+ 'vpc',
+ mox.IsA(str),
+ mox.IsA(str),
+ attempts=1)
+ os.remove(mox.IsA(str))
+
+ m = vmutils.VMUtils.attach_ide_drive(mox.IsA(str),
+ mox.IsA(str),
+ mox.IsA(int),
+ mox.IsA(int),
+ mox.IsA(str))
+ m.WithSideEffects(self._add_ide_disk)
+
+ def _test_spawn_config_drive(self, use_cdrom):
self.flags(force_config_drive=True)
+ self.flags(config_drive_cdrom=use_cdrom)
self.flags(mkisofs_cmd='mkisofs.exe')
- self._spawn_instance(True)
+ self._setup_spawn_config_drive_mocks(use_cdrom)
- (vhd_paths, _, dvd_paths) = self._hypervutils.get_vm_disks(
- self._instance_data["name"])
- self.assertEquals(len(dvd_paths), 0)
- self.assertEquals(len(vhd_paths), 2)
+ if use_cdrom:
+ expected_ide_disks = 1
+ expected_ide_dvds = 1
+ else:
+ expected_ide_disks = 2
+ expected_ide_dvds = 0
- def test_spawn_config_drive_cdrom(self):
- self.skip('broken by move to contextlib for configdrive')
+ self._test_spawn_instance(expected_ide_disks=expected_ide_disks,
+ expected_ide_dvds=expected_ide_dvds)
- self.flags(force_config_drive=True)
- self.flags(config_drive_cdrom=True)
- self.flags(mkisofs_cmd='mkisofs.exe')
-
- self._spawn_instance(True)
+ def test_spawn_config_drive(self):
+ self._test_spawn_config_drive(False)
- (vhd_paths, _, dvd_paths) = self._hypervutils.get_vm_disks(
- self._instance_data["name"])
- self.assertEquals(len(dvd_paths), 1)
- self.assertEquals(len(vhd_paths), 1)
- self.assertTrue(os.path.exists(dvd_paths[0]))
+ def test_spawn_config_drive_cdrom(self):
+ self._test_spawn_config_drive(True)
def test_spawn_no_config_drive(self):
self.flags(force_config_drive=False)
- self._spawn_instance(True)
+ expected_ide_disks = 1
+ expected_ide_dvds = 0
- (_, _, dvd_paths) = self._hypervutils.get_vm_disks(
- self._instance_data["name"])
- self.assertEquals(len(dvd_paths), 0)
+ self._test_spawn_instance(expected_ide_disks=expected_ide_disks,
+ expected_ide_dvds=expected_ide_dvds)
- def test_spawn_no_vswitch_exception(self):
+ def test_spawn_nova_net_vif(self):
self.flags(network_api_class='nova.network.api.API')
# Reinstantiate driver, as the VIF plugin is loaded during __init__
self._conn = driver_hyperv.HyperVDriver(None)
- # Set flag to a non existing vswitch
- self.flags(vswitch_name=str(uuid.uuid4()))
- self.assertRaises(vmutils.HyperVException, self._spawn_instance, True)
- self.assertFalse(self._hypervutils.vm_exists(
- self._instance_data["name"]))
+ def setup_vif_mocks():
+ fake_vswitch_path = 'fake vswitch path'
+ fake_vswitch_port = 'fake port'
+
+ m = networkutils.NetworkUtils.get_external_vswitch(
+ CONF.vswitch_name)
+ m.AndReturn(fake_vswitch_path)
+
+ m = networkutils.NetworkUtils.create_vswitch_port(
+ fake_vswitch_path, mox.IsA(str))
+ m.AndReturn(fake_vswitch_port)
+
+ vmutils.VMUtils.set_nic_connection(mox.IsA(str), mox.IsA(str),
+ fake_vswitch_port)
+
+ self._test_spawn_instance(setup_vif_mocks_func=setup_vif_mocks)
+
+ def test_spawn_nova_net_vif_no_vswitch_exception(self):
+ self.flags(network_api_class='nova.network.api.API')
+ # Reinstantiate driver, as the VIF plugin is loaded during __init__
+ self._conn = driver_hyperv.HyperVDriver(None)
+
+ def setup_vif_mocks():
+ m = networkutils.NetworkUtils.get_external_vswitch(
+ CONF.vswitch_name)
+ m.AndRaise(vmutils.HyperVException(_('fake vswitch not found')))
+
+ self.assertRaises(vmutils.HyperVException, self._test_spawn_instance,
+ setup_vif_mocks_func=setup_vif_mocks,
+ with_exception=True)
+
+ def _check_instance_name(self, vm_name):
+ return vm_name == self._instance_data['name']
def _test_vm_state_change(self, action, from_state, to_state):
- self._spawn_instance(True)
- if from_state:
- self._hypervutils.set_vm_state(self._instance_data["name"],
- from_state)
- action(self._instance_data)
+ self._instance_data = self._get_instance_data()
- vmstate = self._hypervutils.get_vm_state(self._instance_data["name"])
- self.assertEquals(vmstate, to_state)
+ vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
+ to_state)
+
+ self._mox.ReplayAll()
+ action(self._instance_data)
+ self._mox.VerifyAll()
def test_pause(self):
self._test_vm_state_change(self._conn.pause, None,
- constants.HYPERV_VM_STATE_PAUSED)
+ constants.HYPERV_VM_STATE_PAUSED)
def test_pause_already_paused(self):
self._test_vm_state_change(self._conn.pause,
- constants.HYPERV_VM_STATE_PAUSED,
- constants.HYPERV_VM_STATE_PAUSED)
+ constants.HYPERV_VM_STATE_PAUSED,
+ constants.HYPERV_VM_STATE_PAUSED)
def test_unpause(self):
self._test_vm_state_change(self._conn.unpause,
- constants.HYPERV_VM_STATE_PAUSED,
- constants.HYPERV_VM_STATE_ENABLED)
+ constants.HYPERV_VM_STATE_PAUSED,
+ constants.HYPERV_VM_STATE_ENABLED)
def test_unpause_already_running(self):
self._test_vm_state_change(self._conn.unpause, None,
- constants.HYPERV_VM_STATE_ENABLED)
+ constants.HYPERV_VM_STATE_ENABLED)
def test_suspend(self):
self._test_vm_state_change(self._conn.suspend, None,
- constants.HYPERV_VM_STATE_SUSPENDED)
+ constants.HYPERV_VM_STATE_SUSPENDED)
def test_suspend_already_suspended(self):
self._test_vm_state_change(self._conn.suspend,
- constants.HYPERV_VM_STATE_SUSPENDED,
- constants.HYPERV_VM_STATE_SUSPENDED)
+ constants.HYPERV_VM_STATE_SUSPENDED,
+ constants.HYPERV_VM_STATE_SUSPENDED)
def test_resume(self):
self._test_vm_state_change(lambda i: self._conn.resume(i, None),
- constants.HYPERV_VM_STATE_SUSPENDED,
- constants.HYPERV_VM_STATE_ENABLED)
+ constants.HYPERV_VM_STATE_SUSPENDED,
+ constants.HYPERV_VM_STATE_ENABLED)
def test_resume_already_running(self):
self._test_vm_state_change(lambda i: self._conn.resume(i, None), None,
- constants.HYPERV_VM_STATE_ENABLED)
+ constants.HYPERV_VM_STATE_ENABLED)
def test_power_off(self):
self._test_vm_state_change(self._conn.power_off, None,
- constants.HYPERV_VM_STATE_DISABLED)
+ constants.HYPERV_VM_STATE_DISABLED)
def test_power_off_already_powered_off(self):
- self._test_vm_state_change(self._conn.suspend,
- constants.HYPERV_VM_STATE_DISABLED,
- constants.HYPERV_VM_STATE_DISABLED)
+ self._test_vm_state_change(self._conn.power_off,
+ constants.HYPERV_VM_STATE_DISABLED,
+ constants.HYPERV_VM_STATE_DISABLED)
def test_power_on(self):
self._test_vm_state_change(self._conn.power_on,
- constants.HYPERV_VM_STATE_DISABLED,
- constants.HYPERV_VM_STATE_ENABLED)
+ constants.HYPERV_VM_STATE_DISABLED,
+ constants.HYPERV_VM_STATE_ENABLED)
def test_power_on_already_running(self):
self._test_vm_state_change(self._conn.power_on, None,
- constants.HYPERV_VM_STATE_ENABLED)
+ constants.HYPERV_VM_STATE_ENABLED)
def test_reboot(self):
- self._spawn_instance(True)
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
- self._conn.reboot(self._instance_data, network_info, None)
+ self._instance_data = self._get_instance_data()
+
+ vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
+ constants.HYPERV_VM_STATE_REBOOT)
- vmstate = self._hypervutils.get_vm_state(self._instance_data["name"])
- self.assertEquals(vmstate, constants.HYPERV_VM_STATE_ENABLED)
+ self._mox.ReplayAll()
+ self._conn.reboot(self._context, self._instance_data, network_info,
+ None)
+ self._mox.VerifyAll()
def test_destroy(self):
- self._spawn_instance(True)
- (vhd_paths, _, _) = self._hypervutils.get_vm_disks(
- self._instance_data["name"])
+ self._instance_data = self._get_instance_data()
- self._conn.destroy(self._instance_data)
+ m = vmutils.VMUtils.vm_exists(mox.Func(self._check_instance_name))
+ m.AndReturn(True)
- self.assertFalse(self._hypervutils.vm_exists(
- self._instance_data["name"]))
- self._instance_data = None
+ m = vmutils.VMUtils.destroy_vm(mox.Func(self._check_instance_name),
+ True)
+ m.AndReturn([])
- for vhd_path in vhd_paths:
- self.assertFalse(os.path.exists(vhd_path))
+ self._mox.ReplayAll()
+ self._conn.destroy(self._instance_data)
+ self._mox.VerifyAll()
def test_live_migration(self):
- self.flags(limit_cpu_features=True)
- self._spawn_instance(False)
+ self._test_live_migration(False)
- # Existing server
- self._dest_server = "HV12OSDEMO2"
+ def test_live_migration_with_target_failure(self):
+ self._test_live_migration(True)
- self._live_migration(self._dest_server)
+ def _test_live_migration(self, test_failure):
+ dest_server = 'fake_server'
- instance_name = self._instance_data["name"]
- self.assertFalse(self._hypervutils.vm_exists(instance_name))
- self.assertTrue(self._hypervutils.remote_vm_exists(self._dest_server,
- instance_name))
+ instance_data = self._get_instance_data()
- self.assertTrue(self._post_method_called)
- self.assertFalse(self._recover_method_called)
+ fake_post_method = self._mox.CreateMockAnything()
+ if not test_failure:
+ fake_post_method(self._context, instance_data, dest_server,
+ False)
- def test_live_migration_with_target_failure(self):
- self.flags(limit_cpu_features=True)
- self._spawn_instance(False)
+ fake_recover_method = self._mox.CreateMockAnything()
+ if test_failure:
+ fake_recover_method(self._context, instance_data, dest_server,
+ False)
- dest_server = "nonexistingserver"
+ m = livemigrationutils.LiveMigrationUtils.live_migrate_vm(
+ instance_data['name'], dest_server)
+ if test_failure:
+ m.AndRaise(Exception('Simulated failure'))
- exception_raised = False
+ self._mox.ReplayAll()
try:
- self._live_migration(dest_server)
+ self._conn.live_migration(self._context, instance_data,
+ dest_server, fake_post_method,
+ fake_recover_method)
+ exception_raised = False
except Exception:
exception_raised = True
- # Cannot use assertRaises with pythoncom.com_error on Linux
- self.assertTrue(exception_raised)
-
- instance_name = self._instance_data["name"]
- self.assertTrue(self._hypervutils.vm_exists(instance_name))
-
- self.assertFalse(self._post_method_called)
- self.assertTrue(self._recover_method_called)
-
- def _live_migration(self, dest_server):
- def fake_post_method(context, instance_ref, dest, block_migration):
- self._post_method_called = True
-
- def fake_recover_method(context, instance_ref, dest, block_migration):
- self._recover_method_called = True
-
- self._conn.live_migration(self._context, self._instance_data,
- dest_server, fake_post_method, fake_recover_method)
+ self.assertTrue(not test_failure ^ exception_raised)
+ self._mox.VerifyAll()
def test_pre_live_migration_cow_image(self):
self._test_pre_live_migration(True)
@@ -398,83 +551,134 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
def _test_pre_live_migration(self, cow):
self.flags(use_cow_images=cow)
- instance_name = 'openstack_unit_test_vm_' + str(uuid.uuid4())
+ instance_data = self._get_instance_data()
network_info = fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
- instance_data = db_fakes.get_fake_instance_data(instance_name,
- self._project_id, self._user_id)
- block_device_info = None
+ m = livemigrationutils.LiveMigrationUtils.check_live_migration_config()
+ m.AndReturn(True)
+
+ if cow:
+ m = basevolumeutils.BaseVolumeUtils.volume_in_mapping(mox.IsA(str),
+ None)
+ m.AndReturn([])
+
+ self._mox.ReplayAll()
self._conn.pre_live_migration(self._context, instance_data,
- block_device_info, network_info)
+ None, network_info)
+ self._mox.VerifyAll()
if cow:
- self.assertTrue(not self._fetched_image is None)
+ self.assertTrue(self._fetched_image is not None)
else:
self.assertTrue(self._fetched_image is None)
def test_snapshot_with_update_failure(self):
- expected_calls = [
- {'args': (),
- 'kwargs':
- {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
- {'args': (),
- 'kwargs':
- {'task_state': task_states.IMAGE_UPLOADING,
- 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
- func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
-
- self._spawn_instance(True)
+ (snapshot_name, func_call_matcher) = self._setup_snapshot_mocks()
self._update_image_raise_exception = True
- snapshot_name = 'test_snapshot_' + str(uuid.uuid4())
+
+ self._mox.ReplayAll()
self.assertRaises(vmutils.HyperVException, self._conn.snapshot,
self._context, self._instance_data, snapshot_name,
func_call_matcher.call)
+ self._mox.VerifyAll()
- # assert states changed in correct order
+ # Assert states changed in correct order
self.assertIsNone(func_call_matcher.match())
- # assert VM snapshots have been removed
- self.assertEquals(self._hypervutils.get_vm_snapshots_count(
- self._instance_data["name"]), 0)
-
- def test_snapshot(self):
+ def _setup_snapshot_mocks(self):
expected_calls = [
{'args': (),
- 'kwargs':
- {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
- {'args': (),
- 'kwargs':
- {'task_state': task_states.IMAGE_UPLOADING,
- 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ 'kwargs': {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs': {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}
+ ]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
- self._spawn_instance(True)
-
snapshot_name = 'test_snapshot_' + str(uuid.uuid4())
+
+ fake_hv_snapshot_path = 'fake_snapshot_path'
+ fake_parent_vhd_path = 'C:\\fake_vhd_path\\parent.vhd'
+
+ self._instance_data = self._get_instance_data()
+
+ func = mox.Func(self._check_instance_name)
+ m = vmutils.VMUtils.take_vm_snapshot(func)
+ m.AndReturn(fake_hv_snapshot_path)
+
+ m = vhdutils.VHDUtils.get_vhd_parent_path(mox.IsA(str))
+ m.AndReturn(fake_parent_vhd_path)
+
+ self._fake_dest_disk_path = None
+
+ def copy_dest_disk_path(src, dest):
+ self._fake_dest_disk_path = dest
+
+ m = shutil.copyfile(mox.IsA(str), mox.IsA(str))
+ m.WithSideEffects(copy_dest_disk_path)
+
+ self._fake_dest_base_disk_path = None
+
+ def copy_dest_base_disk_path(src, dest):
+ self._fake_dest_base_disk_path = dest
+
+ m = shutil.copyfile(fake_parent_vhd_path, mox.IsA(str))
+ m.WithSideEffects(copy_dest_base_disk_path)
+
+ def check_dest_disk_path(path):
+ return path == self._fake_dest_disk_path
+
+ def check_dest_base_disk_path(path):
+ return path == self._fake_dest_base_disk_path
+
+ func1 = mox.Func(check_dest_disk_path)
+ func2 = mox.Func(check_dest_base_disk_path)
+ # Make sure that the hyper-v base and differential VHDs are merged
+ vhdutils.VHDUtils.reconnect_parent_vhd(func1, func2)
+ vhdutils.VHDUtils.merge_vhd(func1, func2)
+
+ def check_snapshot_path(snapshot_path):
+ return snapshot_path == fake_hv_snapshot_path
+
+ # Make sure that the Hyper-V snapshot is removed
+ func = mox.Func(check_snapshot_path)
+ vmutils.VMUtils.remove_vm_snapshot(func)
+
+ shutil.rmtree(mox.IsA(str))
+
+ m = fake.PathUtils.open(func2, 'rb')
+ m.AndReturn(io.BytesIO(b'fake content'))
+
+ return (snapshot_name, func_call_matcher)
+
+ def test_snapshot(self):
+ (snapshot_name, func_call_matcher) = self._setup_snapshot_mocks()
+
+ self._mox.ReplayAll()
self._conn.snapshot(self._context, self._instance_data, snapshot_name,
func_call_matcher.call)
+ self._mox.VerifyAll()
self.assertTrue(self._image_metadata and
- "disk_format" in self._image_metadata and
- self._image_metadata["disk_format"] == "vhd")
+ "disk_format" in self._image_metadata and
+ self._image_metadata["disk_format"] == "vhd")
- # assert states changed in correct order
+ # Assert states changed in correct order
self.assertIsNone(func_call_matcher.match())
- # assert VM snapshots have been removed
- self.assertEquals(self._hypervutils.get_vm_snapshots_count(
- self._instance_data["name"]), 0)
+ def _get_instance_data(self):
+ instance_name = 'openstack_unit_test_vm_' + str(uuid.uuid4())
+ return db_fakes.get_fake_instance_data(instance_name,
+ self._project_id,
+ self._user_id)
def _spawn_instance(self, cow, block_device_info=None):
self.flags(use_cow_images=cow)
- instance_name = 'openstack_unit_test_vm_' + str(uuid.uuid4())
-
- self._instance_data = db_fakes.get_fake_instance_data(instance_name,
- self._project_id, self._user_id)
+ self._instance_data = self._get_instance_data()
instance = db.instance_create(self._context, self._instance_data)
image = db_fakes.get_fake_image_data(self._project_id, self._user_id)
@@ -487,73 +691,216 @@ class HyperVAPITestCase(basetestcase.BaseTestCase):
network_info=network_info,
block_device_info=block_device_info)
- def _test_spawn_instance(self, cow):
- self._spawn_instance(cow)
+ def _add_ide_disk(self, vm_name, path, ctrller_addr,
+ drive_addr, drive_type):
+ if drive_type == constants.IDE_DISK:
+ self._instance_ide_disks.append(path)
+ elif drive_type == constants.IDE_DVD:
+ self._instance_ide_dvds.append(path)
+
+ def _add_volume_disk(self, vm_name, controller_path, address,
+ mounted_disk_path):
+ self._instance_volume_disks.append(mounted_disk_path)
- self.assertTrue(self._hypervutils.vm_exists(
- self._instance_data["name"]))
+ def _setup_spawn_instance_mocks(self, cow, setup_vif_mocks_func=None,
+ with_exception=False,
+ block_device_info=None):
+ self._test_vm_name = None
- vmstate = self._hypervutils.get_vm_state(self._instance_data["name"])
- self.assertEquals(vmstate, constants.HYPERV_VM_STATE_ENABLED)
+ def set_vm_name(vm_name):
+ self._test_vm_name = vm_name
- (vhd_paths, _, _) = self._hypervutils.get_vm_disks(
- self._instance_data["name"])
- self.assertEquals(len(vhd_paths), 1)
+ def check_vm_name(vm_name):
+ return vm_name == self._test_vm_name
+
+ m = vmutils.VMUtils.vm_exists(mox.IsA(str))
+ m.WithSideEffects(set_vm_name).AndReturn(False)
+
+ if not block_device_info:
+ m = basevolumeutils.BaseVolumeUtils.volume_in_mapping(mox.IsA(str),
+ None)
+ m.AndReturn([])
+ else:
+ m = basevolumeutils.BaseVolumeUtils.volume_in_mapping(
+ mox.IsA(str), block_device_info)
+ m.AndReturn(True)
- parent_path = self._hypervutils.get_vhd_parent_path(vhd_paths[0])
if cow:
- self.assertTrue(not parent_path is None)
- self.assertEquals(self._fetched_image, parent_path)
+ def check_path(parent_path):
+ return parent_path == self._fetched_image
+
+ vhdutils.VHDUtils.create_differencing_vhd(mox.IsA(str),
+ mox.Func(check_path))
+
+ vmutils.VMUtils.create_vm(mox.Func(check_vm_name), mox.IsA(int),
+ mox.IsA(int), mox.IsA(bool))
+
+ if not block_device_info:
+ m = vmutils.VMUtils.attach_ide_drive(mox.Func(check_vm_name),
+ mox.IsA(str),
+ mox.IsA(int),
+ mox.IsA(int),
+ mox.IsA(str))
+ m.WithSideEffects(self._add_ide_disk).InAnyOrder()
+
+ m = vmutils.VMUtils.create_scsi_controller(mox.Func(check_vm_name))
+ m.InAnyOrder()
+
+ vmutils.VMUtils.create_nic(mox.Func(check_vm_name), mox.IsA(str),
+ mox.IsA(str)).InAnyOrder()
+
+ if setup_vif_mocks_func:
+ setup_vif_mocks_func()
+
+ # TODO(alexpilotti) Based on where the exception is thrown
+ # some of the above mock calls need to be skipped
+ if with_exception:
+ m = vmutils.VMUtils.vm_exists(mox.Func(check_vm_name))
+ m.AndReturn(True)
+
+ vmutils.VMUtils.destroy_vm(mox.Func(check_vm_name), True)
else:
- self.assertTrue(parent_path is None)
- self.assertEquals(self._fetched_image, vhd_paths[0])
+ vmutils.VMUtils.set_vm_state(mox.Func(check_vm_name),
+ constants.HYPERV_VM_STATE_ENABLED)
+
+ def _test_spawn_instance(self, cow=True,
+ expected_ide_disks=1,
+ expected_ide_dvds=0,
+ setup_vif_mocks_func=None,
+ with_exception=False):
+ self._setup_spawn_instance_mocks(cow, setup_vif_mocks_func,
+ with_exception)
+
+ self._mox.ReplayAll()
+ self._spawn_instance(cow, )
+ self._mox.VerifyAll()
+
+ self.assertEquals(len(self._instance_ide_disks), expected_ide_disks)
+ self.assertEquals(len(self._instance_ide_dvds), expected_ide_dvds)
+
+ if not cow:
+ self.assertEquals(self._fetched_image, self._instance_ide_disks[0])
+
+ def test_attach_volume(self):
+ instance_data = self._get_instance_data()
+ instance_name = instance_data['name']
- def _attach_volume(self):
- self._spawn_instance(True)
connection_info = db_fakes.get_fake_volume_info_data(
self._volume_target_portal, self._volume_id)
+ data = connection_info['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+ target_portal = data['target_portal']
- self._conn.attach_volume(connection_info,
- self._instance_data, '/dev/sdc')
+ mount_point = '/dev/sdc'
- def test_attach_volume(self):
- self._attach_volume()
+ volumeutils.VolumeUtils.login_storage_target(target_lun,
+ target_iqn,
+ target_portal)
+
+ fake_mounted_disk = "fake_mounted_disk"
+ fake_device_number = 0
+ fake_controller_path = 'fake_scsi_controller_path'
+ fake_free_slot = 1
+
+ m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
+ target_lun)
+ m.AndReturn(fake_device_number)
+
+ m = vmutils.VMUtils.get_mounted_disk_by_drive_number(
+ fake_device_number)
+ m.AndReturn(fake_mounted_disk)
- (_, volumes_paths, _) = self._hypervutils.get_vm_disks(
- self._instance_data["name"])
- self.assertEquals(len(volumes_paths), 1)
+ m = vmutils.VMUtils.get_vm_iscsi_controller(instance_name)
+ m.AndReturn(fake_controller_path)
- sessions_exist = self._hypervutils.iscsi_volume_sessions_exist(
- self._volume_id)
- self.assertTrue(sessions_exist)
+ m = vmutils.VMUtils.get_attached_disks_count(fake_controller_path)
+ m.AndReturn(fake_free_slot)
+
+ m = vmutils.VMUtils.attach_volume_to_controller(instance_name,
+ fake_controller_path,
+ fake_free_slot,
+ fake_mounted_disk)
+ m.WithSideEffects(self._add_volume_disk)
+
+ self._mox.ReplayAll()
+ self._conn.attach_volume(connection_info, instance_data, mount_point)
+ self._mox.VerifyAll()
+
+ self.assertEquals(len(self._instance_volume_disks), 1)
def test_detach_volume(self):
- self._attach_volume()
+ instance_data = self._get_instance_data()
+ instance_name = instance_data['name']
+
connection_info = db_fakes.get_fake_volume_info_data(
self._volume_target_portal, self._volume_id)
+ data = connection_info['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+ target_portal = data['target_portal']
+ mount_point = '/dev/sdc'
- self._conn.detach_volume(connection_info,
- self._instance_data, '/dev/sdc')
+ fake_mounted_disk = "fake_mounted_disk"
+ fake_device_number = 0
+ fake_free_slot = 1
+ m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
+ target_lun)
+ m.AndReturn(fake_device_number)
- (_, volumes_paths, _) = self._hypervutils.get_vm_disks(
- self._instance_data["name"])
- self.assertEquals(len(volumes_paths), 0)
+ m = vmutils.VMUtils.get_mounted_disk_by_drive_number(
+ fake_device_number)
+ m.AndReturn(fake_mounted_disk)
- sessions_exist = self._hypervutils.iscsi_volume_sessions_exist(
- self._volume_id)
- self.assertFalse(sessions_exist)
+ vmutils.VMUtils.detach_vm_disk(mox.IsA(str), fake_mounted_disk)
+
+ volumeutils.VolumeUtils.logout_storage_target(mox.IsA(str))
+
+ self._mox.ReplayAll()
+ self._conn.detach_volume(connection_info, instance_data, mount_point)
+ self._mox.VerifyAll()
def test_boot_from_volume(self):
+ connection_info = db_fakes.get_fake_volume_info_data(
+ self._volume_target_portal, self._volume_id)
+ data = connection_info['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+ target_portal = data['target_portal']
+
block_device_info = db_fakes.get_fake_block_device_info(
self._volume_target_portal, self._volume_id)
- self._spawn_instance(False, block_device_info)
+ fake_mounted_disk = "fake_mounted_disk"
+ fake_device_number = 0
+ fake_controller_path = 'fake_scsi_controller_path'
+
+ volumeutils.VolumeUtils.login_storage_target(target_lun,
+ target_iqn,
+ target_portal)
+
+ m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
+ target_lun)
+ m.AndReturn(fake_device_number)
- (_, volumes_paths, _) = self._hypervutils.get_vm_disks(
- self._instance_data["name"])
+ m = vmutils.VMUtils.get_mounted_disk_by_drive_number(
+ fake_device_number)
+ m.AndReturn(fake_mounted_disk)
- self.assertEquals(len(volumes_paths), 1)
+ m = vmutils.VMUtils.get_vm_ide_controller(mox.IsA(str), mox.IsA(int))
+ m.AndReturn(fake_controller_path)
+
+ m = vmutils.VMUtils.attach_volume_to_controller(mox.IsA(str),
+ fake_controller_path,
+ 0,
+ fake_mounted_disk)
+ m.WithSideEffects(self._add_volume_disk)
+
+ self._setup_spawn_instance_mocks(cow=False,
+ block_device_info=block_device_info)
+
+ self._mox.ReplayAll()
+ self._spawn_instance(False, block_device_info)
+ self._mox.VerifyAll()
- sessions_exist = self._hypervutils.iscsi_volume_sessions_exist(
- self._volume_id)
- self.assertTrue(sessions_exist)
+ self.assertEquals(len(self._instance_volume_disks), 1)
diff --git a/nova/tests/test_imagebackend.py b/nova/tests/test_imagebackend.py
index 495e7c947..76fd1d5b6 100644
--- a/nova/tests/test_imagebackend.py
+++ b/nova/tests/test_imagebackend.py
@@ -55,8 +55,8 @@ class _ImageTestCase(object):
def test_cache(self):
self.mox.StubOutWithMock(os.path, 'exists')
- os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
+ os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
@@ -72,7 +72,9 @@ class _ImageTestCase(object):
def test_cache_image_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(True)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
@@ -82,8 +84,8 @@ class _ImageTestCase(object):
def test_cache_base_dir_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
- os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
@@ -98,8 +100,8 @@ class _ImageTestCase(object):
def test_cache_template_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
- os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
fn = self.mox.CreateMockAnything()
self.mox.ReplayAll()
@@ -195,7 +197,6 @@ class Qcow2TestCase(_ImageTestCase, test.TestCase):
def test_create_image_with_size(self):
fn = self.prepare_mocks()
fn(target=self.TEMPLATE_PATH)
- self.mox.StubOutWithMock(os.path, 'exists')
imagebackend.libvirt_utils.create_cow_image(self.TEMPLATE_PATH,
self.PATH)
imagebackend.disk.extend(self.PATH, self.SIZE)
diff --git a/nova/tests/test_imagecache.py b/nova/tests/test_imagecache.py
index 8142312b9..611519514 100644
--- a/nova/tests/test_imagecache.py
+++ b/nova/tests/test_imagecache.py
@@ -331,7 +331,6 @@ class ImageCacheManagerTestCase(test.TestCase):
base_file1 = os.path.join(base_dir, fingerprint)
base_file2 = os.path.join(base_dir, fingerprint + '_sm')
base_file3 = os.path.join(base_dir, fingerprint + '_10737418240')
- print res
self.assertTrue(res == [(base_file1, False, False),
(base_file2, True, False),
(base_file3, False, True)])
diff --git a/nova/tests/test_instance_types.py b/nova/tests/test_instance_types.py
index b70b96b7f..0829df2c6 100644
--- a/nova/tests/test_instance_types.py
+++ b/nova/tests/test_instance_types.py
@@ -21,8 +21,8 @@ from nova.compute import instance_types
from nova import context
from nova import db
from nova.db.sqlalchemy import models
-from nova.db.sqlalchemy import session as sql_session
from nova import exception
+from nova.openstack.common.db.sqlalchemy import session as sql_session
from nova.openstack.common import log as logging
from nova import test
@@ -359,6 +359,59 @@ class InstanceTypeTestCase(test.TestCase):
self.assertTrue(instance["instance_type"])
+class InstanceTypeToolsTest(test.TestCase):
+ def _dict_to_metadata(self, data):
+ return [{'key': key, 'value': value} for key, value in data.items()]
+
+ def _test_extract_instance_type(self, prefix):
+ instance_type = instance_types.get_default_instance_type()
+
+ metadata = {}
+ instance_types.save_instance_type_info(metadata, instance_type,
+ prefix)
+ instance = {'system_metadata': self._dict_to_metadata(metadata)}
+ _instance_type = instance_types.extract_instance_type(instance, prefix)
+
+ props = instance_types.system_metadata_instance_type_props.keys()
+ for key in instance_type.keys():
+ if key not in props:
+ del instance_type[key]
+
+ self.assertEqual(instance_type, _instance_type)
+
+ def test_extract_instance_type(self):
+ self._test_extract_instance_type('')
+
+ def test_extract_instance_type_prefix(self):
+ self._test_extract_instance_type('foo_')
+
+ def test_save_instance_type_info(self):
+ instance_type = instance_types.get_default_instance_type()
+
+ example = {}
+ example_prefix = {}
+
+ for key in instance_types.system_metadata_instance_type_props.keys():
+ example['instance_type_%s' % key] = instance_type[key]
+ example_prefix['fooinstance_type_%s' % key] = instance_type[key]
+
+ metadata = {}
+ instance_types.save_instance_type_info(metadata, instance_type)
+ self.assertEqual(example, metadata)
+
+ metadata = {}
+ instance_types.save_instance_type_info(metadata, instance_type, 'foo')
+ self.assertEqual(example_prefix, metadata)
+
+ def test_delete_instance_type_info(self):
+ instance_type = instance_types.get_default_instance_type()
+ metadata = {}
+ instance_types.save_instance_type_info(metadata, instance_type)
+ instance_types.save_instance_type_info(metadata, instance_type, '_')
+ instance_types.delete_instance_type_info(metadata, '', '_')
+ self.assertEqual(metadata, {})
+
+
class InstanceTypeFilteringTest(test.TestCase):
"""Test cases for the filter option available for instance_type_get_all."""
def setUp(self):
diff --git a/nova/tests/test_iptables_network.py b/nova/tests/test_iptables_network.py
index c8f310303..95af25ebd 100644
--- a/nova/tests/test_iptables_network.py
+++ b/nova/tests/test_iptables_network.py
@@ -170,3 +170,22 @@ class IptablesManagerTestCase(test.TestCase):
self.assertTrue('[0:0] -A %s -j %s-%s' %
(chain, self.binary_name, chain) in new_lines,
"Built-in chain %s not wrapped" % (chain,))
+
+ def test_missing_table(self):
+ current_lines = []
+ new_lines = self.manager._modify_rules(current_lines,
+ self.manager.ipv4['filter'],
+ table_name='filter')
+
+ for line in ['*filter',
+ 'COMMIT']:
+ self.assertTrue(line in new_lines, "One of iptables key lines"
+ "went missing.")
+
+ self.assertTrue(len(new_lines) > 4, "No iptables rules added")
+
+ self.assertTrue("#Generated by nova" == new_lines[0] and
+ "*filter" == new_lines[1] and
+ "COMMIT" == new_lines[-2] and
+ "#Completed by nova" == new_lines[-1],
+ "iptables rules not generated in the correct order")
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index f96536893..c93bb0168 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -55,13 +55,12 @@ from nova.virt import driver
from nova.virt import fake
from nova.virt import firewall as base_firewall
from nova.virt import images
+from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import driver as libvirt_driver
from nova.virt.libvirt import firewall
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import utils as libvirt_utils
-from nova.virt.libvirt import volume
-from nova.virt.libvirt import volume_nfs
try:
@@ -133,306 +132,6 @@ class FakeVirtDomain(object):
return self._fake_dom_xml
-class LibvirtVolumeTestCase(test.TestCase):
-
- def setUp(self):
- super(LibvirtVolumeTestCase, self).setUp()
- self.executes = []
-
- def fake_execute(*cmd, **kwargs):
- self.executes.append(cmd)
- return None, None
-
- self.stubs.Set(utils, 'execute', fake_execute)
-
- class FakeLibvirtDriver(object):
- def __init__(self, hyperv="QEMU"):
- self.hyperv = hyperv
-
- def get_hypervisor_type(self):
- return self.hyperv
-
- def get_all_block_devices(self):
- return []
-
- self.fake_conn = FakeLibvirtDriver(fake.FakeVirtAPI())
- self.connr = {
- 'ip': '127.0.0.1',
- 'initiator': 'fake_initiator',
- 'host': 'fake_host'
- }
-
- def test_libvirt_volume_driver_serial(self):
- libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
- name = 'volume-00000001'
- vol = {'id': 1, 'name': name}
- connection_info = {
- 'driver_volume_type': 'fake',
- 'data': {
- 'device_path': '/foo',
- },
- 'serial': 'fake_serial',
- }
- mount_device = "vde"
- conf = libvirt_driver.connect_volume(connection_info, mount_device)
- tree = conf.format_dom()
- self.assertEqual(tree.get('type'), 'block')
- self.assertEqual(tree.find('./serial').text, 'fake_serial')
-
- def iscsi_connection(self, volume, location, iqn):
- return {
- 'driver_volume_type': 'iscsi',
- 'data': {
- 'volume_id': volume['id'],
- 'target_portal': location,
- 'target_iqn': iqn,
- 'target_lun': 1,
- }
- }
-
- def test_libvirt_iscsi_driver(self):
- # NOTE(vish) exists is to make driver assume connecting worked
- self.stubs.Set(os.path, 'exists', lambda x: True)
- libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
- location = '10.0.2.15:3260'
- name = 'volume-00000001'
- iqn = 'iqn.2010-10.org.openstack:%s' % name
- vol = {'id': 1, 'name': name}
- connection_info = self.iscsi_connection(vol, location, iqn)
- mount_device = "vde"
- conf = libvirt_driver.connect_volume(connection_info, mount_device)
- tree = conf.format_dom()
- dev_str = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (location, iqn)
- self.assertEqual(tree.get('type'), 'block')
- self.assertEqual(tree.find('./source').get('dev'), dev_str)
- libvirt_driver.disconnect_volume(connection_info, mount_device)
- expected_commands = [('iscsiadm', '-m', 'node', '-T', iqn,
- '-p', location),
- ('iscsiadm', '-m', 'node', '-T', iqn,
- '-p', location, '--login'),
- ('iscsiadm', '-m', 'node', '-T', iqn,
- '-p', location, '--op', 'update',
- '-n', 'node.startup', '-v', 'automatic'),
- ('iscsiadm', '-m', 'node', '-T', iqn,
- '-p', location, '--op', 'update',
- '-n', 'node.startup', '-v', 'manual'),
- ('iscsiadm', '-m', 'node', '-T', iqn,
- '-p', location, '--logout'),
- ('iscsiadm', '-m', 'node', '-T', iqn,
- '-p', location, '--op', 'delete')]
- self.assertEqual(self.executes, expected_commands)
-
- def test_libvirt_iscsi_driver_still_in_use(self):
- # NOTE(vish) exists is to make driver assume connecting worked
- self.stubs.Set(os.path, 'exists', lambda x: True)
- libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
- location = '10.0.2.15:3260'
- name = 'volume-00000001'
- iqn = 'iqn.2010-10.org.openstack:%s' % name
- devs = ['/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (location, iqn)]
- self.stubs.Set(self.fake_conn, 'get_all_block_devices', lambda: devs)
- vol = {'id': 1, 'name': name}
- connection_info = self.iscsi_connection(vol, location, iqn)
- mount_device = "vde"
- conf = libvirt_driver.connect_volume(connection_info, mount_device)
- tree = conf.format_dom()
- dev_str = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (location, iqn)
- self.assertEqual(tree.get('type'), 'block')
- self.assertEqual(tree.find('./source').get('dev'), dev_str)
- libvirt_driver.disconnect_volume(connection_info, mount_device)
- expected_commands = [('iscsiadm', '-m', 'node', '-T', iqn,
- '-p', location),
- ('iscsiadm', '-m', 'node', '-T', iqn,
- '-p', location, '--login'),
- ('iscsiadm', '-m', 'node', '-T', iqn,
- '-p', location, '--op', 'update',
- '-n', 'node.startup', '-v', 'automatic')]
- self.assertEqual(self.executes, expected_commands)
-
- def sheepdog_connection(self, volume):
- return {
- 'driver_volume_type': 'sheepdog',
- 'data': {
- 'name': volume['name']
- }
- }
-
- def test_libvirt_sheepdog_driver(self):
- libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
- name = 'volume-00000001'
- vol = {'id': 1, 'name': name}
- connection_info = self.sheepdog_connection(vol)
- mount_device = "vde"
- conf = libvirt_driver.connect_volume(connection_info, mount_device)
- tree = conf.format_dom()
- self.assertEqual(tree.get('type'), 'network')
- self.assertEqual(tree.find('./source').get('protocol'), 'sheepdog')
- self.assertEqual(tree.find('./source').get('name'), name)
- libvirt_driver.disconnect_volume(connection_info, mount_device)
-
- def rbd_connection(self, volume):
- return {
- 'driver_volume_type': 'rbd',
- 'data': {
- 'name': '%s/%s' % ('rbd', volume['name']),
- 'auth_enabled': CONF.rbd_secret_uuid is not None,
- 'auth_username': CONF.rbd_user,
- 'secret_type': 'ceph',
- 'secret_uuid': CONF.rbd_secret_uuid,
- }
- }
-
- def test_libvirt_rbd_driver(self):
- libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
- name = 'volume-00000001'
- vol = {'id': 1, 'name': name}
- connection_info = self.rbd_connection(vol)
- mount_device = "vde"
- conf = libvirt_driver.connect_volume(connection_info, mount_device)
- tree = conf.format_dom()
- self.assertEqual(tree.get('type'), 'network')
- self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
- rbd_name = '%s/%s' % ('rbd', name)
- self.assertEqual(tree.find('./source').get('name'), rbd_name)
- self.assertEqual(tree.find('./source/auth'), None)
- libvirt_driver.disconnect_volume(connection_info, mount_device)
-
- def test_libvirt_rbd_driver_auth_enabled(self):
- libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
- name = 'volume-00000001'
- vol = {'id': 1, 'name': name}
- connection_info = self.rbd_connection(vol)
- uuid = '875a8070-d0b9-4949-8b31-104d125c9a64'
- user = 'foo'
- secret_type = 'ceph'
- connection_info['data']['auth_enabled'] = True
- connection_info['data']['auth_username'] = user
- connection_info['data']['secret_type'] = secret_type
- connection_info['data']['secret_uuid'] = uuid
-
- mount_device = "vde"
- conf = libvirt_driver.connect_volume(connection_info, mount_device)
- tree = conf.format_dom()
- self.assertEqual(tree.get('type'), 'network')
- self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
- rbd_name = '%s/%s' % ('rbd', name)
- self.assertEqual(tree.find('./source').get('name'), rbd_name)
- self.assertEqual(tree.find('./auth').get('username'), user)
- self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
- self.assertEqual(tree.find('./auth/secret').get('uuid'), uuid)
- libvirt_driver.disconnect_volume(connection_info, mount_device)
-
- def test_libvirt_rbd_driver_auth_enabled_flags_override(self):
- libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
- name = 'volume-00000001'
- vol = {'id': 1, 'name': name}
- connection_info = self.rbd_connection(vol)
- uuid = '875a8070-d0b9-4949-8b31-104d125c9a64'
- user = 'foo'
- secret_type = 'ceph'
- connection_info['data']['auth_enabled'] = True
- connection_info['data']['auth_username'] = user
- connection_info['data']['secret_type'] = secret_type
- connection_info['data']['secret_uuid'] = uuid
-
- flags_uuid = '37152720-1785-11e2-a740-af0c1d8b8e4b'
- flags_user = 'bar'
- self.flags(rbd_user=flags_user,
- rbd_secret_uuid=flags_uuid)
-
- mount_device = "vde"
- conf = libvirt_driver.connect_volume(connection_info, mount_device)
- tree = conf.format_dom()
- self.assertEqual(tree.get('type'), 'network')
- self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
- rbd_name = '%s/%s' % ('rbd', name)
- self.assertEqual(tree.find('./source').get('name'), rbd_name)
- self.assertEqual(tree.find('./auth').get('username'), flags_user)
- self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
- self.assertEqual(tree.find('./auth/secret').get('uuid'), flags_uuid)
- libvirt_driver.disconnect_volume(connection_info, mount_device)
-
- def test_libvirt_rbd_driver_auth_disabled(self):
- libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
- name = 'volume-00000001'
- vol = {'id': 1, 'name': name}
- connection_info = self.rbd_connection(vol)
- uuid = '875a8070-d0b9-4949-8b31-104d125c9a64'
- user = 'foo'
- secret_type = 'ceph'
- connection_info['data']['auth_enabled'] = False
- connection_info['data']['auth_username'] = user
- connection_info['data']['secret_type'] = secret_type
- connection_info['data']['secret_uuid'] = uuid
-
- mount_device = "vde"
- conf = libvirt_driver.connect_volume(connection_info, mount_device)
- tree = conf.format_dom()
- self.assertEqual(tree.get('type'), 'network')
- self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
- rbd_name = '%s/%s' % ('rbd', name)
- self.assertEqual(tree.find('./source').get('name'), rbd_name)
- self.assertEqual(tree.find('./auth'), None)
- libvirt_driver.disconnect_volume(connection_info, mount_device)
-
- def test_libvirt_rbd_driver_auth_disabled_flags_override(self):
- libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
- name = 'volume-00000001'
- vol = {'id': 1, 'name': name}
- connection_info = self.rbd_connection(vol)
- uuid = '875a8070-d0b9-4949-8b31-104d125c9a64'
- user = 'foo'
- secret_type = 'ceph'
- connection_info['data']['auth_enabled'] = False
- connection_info['data']['auth_username'] = user
- connection_info['data']['secret_type'] = secret_type
- connection_info['data']['secret_uuid'] = uuid
-
- # NOTE: Supplying the rbd_secret_uuid will enable authentication
- # locally in nova-compute even if not enabled in nova-volume/cinder
- flags_uuid = '37152720-1785-11e2-a740-af0c1d8b8e4b'
- flags_user = 'bar'
- self.flags(rbd_user=flags_user,
- rbd_secret_uuid=flags_uuid)
-
- mount_device = "vde"
- conf = libvirt_driver.connect_volume(connection_info, mount_device)
- tree = conf.format_dom()
- self.assertEqual(tree.get('type'), 'network')
- self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
- rbd_name = '%s/%s' % ('rbd', name)
- self.assertEqual(tree.find('./source').get('name'), rbd_name)
- self.assertEqual(tree.find('./auth').get('username'), flags_user)
- self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
- self.assertEqual(tree.find('./auth/secret').get('uuid'), flags_uuid)
- libvirt_driver.disconnect_volume(connection_info, mount_device)
-
- def test_libvirt_nfs_driver(self):
- # NOTE(vish) exists is to make driver assume connecting worked
- mnt_base = '/mnt'
- self.flags(nfs_mount_point_base=mnt_base)
-
- libvirt_driver = volume_nfs.NfsVolumeDriver(self.fake_conn)
- export_string = '192.168.1.1:/nfs/share1'
- name = 'volume-00001'
- export_mnt_base = os.path.join(mnt_base,
- libvirt_driver.get_hash_str(export_string))
- file_path = os.path.join(export_mnt_base, name)
-
- connection_info = {'data': {'export': export_string, 'name': name}}
- mount_device = "vde"
- conf = libvirt_driver.connect_volume(connection_info, mount_device)
- tree = conf.format_dom()
- self.assertEqual(tree.get('type'), 'file')
- self.assertEqual(tree.find('./source').get('file'), file_path)
- libvirt_driver.disconnect_volume(connection_info, mount_device)
-
- expected_commands = [
- ('stat', export_mnt_base),
- ('mount', '-t', 'nfs', export_string, export_mnt_base)]
- self.assertEqual(self.executes, expected_commands)
-
-
class CacheConcurrencyTestCase(test.TestCase):
def setUp(self):
super(CacheConcurrencyTestCase, self).setUp()
@@ -574,7 +273,7 @@ class LibvirtConnTestCase(test.TestCase):
'nova.virt.libvirt.driver.libvirt_utils',
fake_libvirt_utils))
self.useFixture(fixtures.MonkeyPatch(
- 'nova.virt.libvirt.snapshots.libvirt_utils',
+ 'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
def fake_extend(image, size):
@@ -658,9 +357,11 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance_ref)
cfg = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
- None, None)
+ None, disk_info)
self.assertEquals(cfg.acpi, True)
self.assertEquals(cfg.apic, True)
self.assertEquals(cfg.memory, 1024 * 1024 * 2)
@@ -703,9 +404,11 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance_ref)
cfg = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 2),
- None, None)
+ None, disk_info)
self.assertEquals(cfg.acpi, True)
self.assertEquals(cfg.memory, 1024 * 1024 * 2)
self.assertEquals(cfg.vcpus, 1)
@@ -730,19 +433,46 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEquals(type(cfg.devices[7]),
vconfig.LibvirtConfigGuestGraphics)
+ def test_get_guest_config_bug_1118829(self):
+ self.flags(libvirt_type='uml')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = db.instance_create(self.context, self.test_instance)
+
+ disk_info = {'disk_bus': 'virtio',
+ 'cdrom_bus': 'ide',
+ 'mapping': {u'vda': {'bus': 'virtio',
+ 'type': 'disk',
+ 'dev': u'vda'},
+ 'root': {'bus': 'virtio',
+ 'type': 'disk',
+ 'dev': 'vda'}}}
+
+ # NOTE(jdg): For this specific test leave this blank
+ # This will exercise the failed code path still,
+ # and won't require fakes and stubs of the iscsi discovery
+ block_device_info = {}
+ cfg = conn.get_guest_config(instance_ref, [], None, disk_info,
+ None, block_device_info)
+ instance_ref = db.instance_get(self.context, instance_ref['id'])
+ self.assertEquals(instance_ref['root_device_name'], '/dev/vda')
+
def test_get_guest_config_with_root_device_name(self):
self.flags(libvirt_type='uml')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
- cfg = conn.get_guest_config(instance_ref, [], None, None,
- {'root_device_name': 'dev/vdb'})
+ block_device_info = {'root_device_name': '/dev/vdb'}
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance_ref,
+ block_device_info)
+ cfg = conn.get_guest_config(instance_ref, [], None, disk_info,
+ None, block_device_info)
self.assertEquals(cfg.acpi, False)
self.assertEquals(cfg.memory, 1024 * 1024 * 2)
self.assertEquals(cfg.vcpus, 1)
self.assertEquals(cfg.os_type, "uml")
self.assertEquals(cfg.os_boot_dev, None)
- self.assertEquals(cfg.os_root, 'dev/vdb')
+ self.assertEquals(cfg.os_root, '/dev/vdb')
self.assertEquals(len(cfg.devices), 3)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
@@ -760,7 +490,10 @@ class LibvirtConnTestCase(test.TestCase):
{'connection_info': conn_info, 'mount_device': '/dev/vdc'},
{'connection_info': conn_info, 'mount_device': '/dev/vdd'}]}
- cfg = conn.get_guest_config(instance_ref, [], None, None, info)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance_ref, info)
+ cfg = conn.get_guest_config(instance_ref, [], None, disk_info,
+ None, info)
self.assertEquals(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestDisk)
self.assertEquals(cfg.devices[2].target_dev, 'vdc')
@@ -775,12 +508,13 @@ class LibvirtConnTestCase(test.TestCase):
# make configdrive.enabled_for() return True
instance_ref['config_drive'] = 'ANY_ID'
- cfg = conn.get_guest_config(instance_ref, [], None, None)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance_ref)
+ cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEquals(type(cfg.devices[2]),
vconfig.LibvirtConfigGuestDisk)
- self.assertEquals(cfg.devices[2].target_dev,
- conn.default_last_device)
+ self.assertEquals(cfg.devices[2].target_dev, 'vdz')
def test_get_guest_config_with_vnc(self):
self.flags(libvirt_type='kvm',
@@ -791,7 +525,9 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
- cfg = conn.get_guest_config(instance_ref, [], None, None)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance_ref)
+ cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEquals(len(cfg.devices), 5)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
@@ -815,7 +551,9 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
- cfg = conn.get_guest_config(instance_ref, [], None, None)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance_ref)
+ cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEquals(len(cfg.devices), 6)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
@@ -844,7 +582,9 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
- cfg = conn.get_guest_config(instance_ref, [], None, None)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance_ref)
+ cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEquals(len(cfg.devices), 6)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
@@ -873,7 +613,9 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
- cfg = conn.get_guest_config(instance_ref, [], None, None)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance_ref)
+ cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEquals(len(cfg.devices), 6)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
@@ -902,7 +644,9 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
- cfg = conn.get_guest_config(instance_ref, [], None, None)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance_ref)
+ cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
self.assertEquals(len(cfg.devices), 8)
self.assertEquals(type(cfg.devices[0]),
vconfig.LibvirtConfigGuestDisk)
@@ -931,9 +675,11 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance_ref)
conf = conn.get_guest_config(instance_ref,
- _fake_network_info(self.stubs, 1),
- None, None)
+ _fake_network_info(self.stubs, 1),
+ None, disk_info)
self.assertEquals(conf.cpu, None)
def test_get_guest_cpu_config_default_kvm(self):
@@ -949,9 +695,11 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
- None, None)
+ None, disk_info)
self.assertEquals(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, "host-model")
@@ -964,9 +712,11 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance_ref)
conf = conn.get_guest_config(instance_ref,
- _fake_network_info(self.stubs, 1),
- None, None)
+ _fake_network_info(self.stubs, 1),
+ None, disk_info)
self.assertEquals(conf.cpu, None)
def test_get_guest_cpu_config_default_lxc(self):
@@ -976,9 +726,11 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance_ref)
conf = conn.get_guest_config(instance_ref,
- _fake_network_info(self.stubs, 1),
- None, None)
+ _fake_network_info(self.stubs, 1),
+ None, disk_info)
self.assertEquals(conf.cpu, None)
def test_get_guest_cpu_config_host_passthrough_new(self):
@@ -992,9 +744,11 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="host-passthrough")
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
- None, None)
+ None, disk_info)
self.assertEquals(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, "host-passthrough")
@@ -1011,9 +765,11 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="host-model")
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
- None, None)
+ None, disk_info)
self.assertEquals(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, "host-model")
@@ -1031,9 +787,11 @@ class LibvirtConnTestCase(test.TestCase):
self.flags(libvirt_cpu_mode="custom")
self.flags(libvirt_cpu_model="Penryn")
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
- None, None)
+ None, disk_info)
self.assertEquals(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, "custom")
@@ -1049,11 +807,14 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="host-passthrough")
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance_ref)
self.assertRaises(exception.NovaException,
conn.get_guest_config,
instance_ref,
_fake_network_info(self.stubs, 1),
- None, None)
+ None,
+ disk_info)
def test_get_guest_cpu_config_host_model_old(self):
def get_lib_version_stub(self):
@@ -1083,9 +844,11 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(self.context, self.test_instance)
self.flags(libvirt_cpu_mode="host-model")
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
- None, None)
+ None, disk_info)
self.assertEquals(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, None)
@@ -1107,9 +870,11 @@ class LibvirtConnTestCase(test.TestCase):
self.flags(libvirt_cpu_mode="custom")
self.flags(libvirt_cpu_model="Penryn")
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance_ref)
conf = conn.get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
- None, None)
+ None, disk_info)
self.assertEquals(type(conf.cpu),
vconfig.LibvirtConfigGuestCPU)
self.assertEquals(conf.cpu.mode, None)
@@ -1874,14 +1639,16 @@ class LibvirtConnTestCase(test.TestCase):
conn.attach_volume,
{"driver_volume_type": "badtype"},
{"name": "fake-instance"},
- "/dev/fake")
+ "/dev/sda")
def test_multi_nic(self):
instance_data = dict(self.test_instance)
network_info = _fake_network_info(self.stubs, 2)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, instance_data)
- xml = conn.to_xml(instance_ref, network_info, None, False)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance_ref)
+ xml = conn.to_xml(instance_ref, network_info, disk_info)
tree = etree.fromstring(xml)
interfaces = tree.findall("./devices/interface")
self.assertEquals(len(interfaces), 2)
@@ -1901,7 +1668,9 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEquals(conn.uri, 'lxc:///')
network_info = _fake_network_info(self.stubs, 1)
- xml = conn.to_xml(instance_ref, network_info)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance_ref)
+ xml = conn.to_xml(instance_ref, network_info, disk_info)
tree = etree.fromstring(xml)
check = [
@@ -1942,7 +1711,9 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
network_info = _fake_network_info(self.stubs, 1)
- xml = conn.to_xml(instance_ref, network_info)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance_ref)
+ xml = conn.to_xml(instance_ref, network_info, disk_info)
tree = etree.fromstring(xml)
for i, (check, expected_result) in enumerate(checks):
@@ -1975,8 +1746,10 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(user_context, self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
- xml = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True).to_xml(
- instance_ref, network_info, image_meta)
+ drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance_ref)
+ xml = drv.to_xml(instance_ref, network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
disks = tree.findall('./devices/disk/driver')
for disk in disks:
@@ -1986,8 +1759,10 @@ class LibvirtConnTestCase(test.TestCase):
# The O_DIRECT availability is cached on first use in
# LibvirtDriver, hence we re-create it here
- xml = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True).to_xml(
- instance_ref, network_info, image_meta)
+ drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance_ref)
+ xml = drv.to_xml(instance_ref, network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
disks = tree.findall('./devices/disk/driver')
for disk in disks:
@@ -1999,11 +1774,13 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(user_context, self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
- xml = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True).to_xml(
- instance_ref,
- network_info,
- image_meta,
- block_device_info=block_device_info)
+ drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance_ref,
+ block_device_info,
+ image_meta)
+ xml = drv.to_xml(instance_ref, network_info, disk_info, image_meta,
+ block_device_info=block_device_info)
tree = etree.fromstring(xml)
got_disks = tree.findall('./devices/disk')
@@ -2026,8 +1803,10 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(user_context, self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
- xml = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True).to_xml(
- instance_ref, network_info, image_meta)
+ drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance_ref)
+ xml = drv.to_xml(instance_ref, network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
self.assertEqual(tree.find('./uuid').text,
instance_ref['uuid'])
@@ -2181,7 +1960,11 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEquals(conn.uri, expected_uri)
network_info = _fake_network_info(self.stubs, 1)
- xml = conn.to_xml(instance_ref, network_info, None, rescue)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance_ref,
+ rescue=rescue)
+ xml = conn.to_xml(instance_ref, network_info, disk_info,
+ rescue=rescue)
tree = etree.fromstring(xml)
for i, (check, expected_result) in enumerate(checks):
self.assertEqual(check(tree),
@@ -2518,9 +2301,14 @@ class LibvirtConnTestCase(test.TestCase):
).AndReturn(vol['block_device_mapping'])
self.mox.StubOutWithMock(conn, "volume_driver_method")
for v in vol['block_device_mapping']:
+ disk_info = {
+ 'bus': "scsi",
+ 'dev': v['mount_device'].rpartition("/")[2],
+ 'type': "disk"
+ }
conn.volume_driver_method('connect_volume',
- v['connection_info'],
- v['mount_device'].rpartition("/")[2])
+ v['connection_info'],
+ disk_info)
self.mox.StubOutWithMock(conn, 'plug_vifs')
conn.plug_vifs(mox.IsA(inst_ref), nw_info)
@@ -2546,10 +2334,14 @@ class LibvirtConnTestCase(test.TestCase):
# Creating mocks
self.mox.StubOutWithMock(conn, "volume_driver_method")
for v in vol['block_device_mapping']:
+ disk_info = {
+ 'bus': "scsi",
+ 'dev': v['mount_device'].rpartition("/")[2],
+ 'type': "disk"
+ }
conn.volume_driver_method('connect_volume',
v['connection_info'],
- v['mount_device'].
- rpartition("/")[2])
+ disk_info)
self.mox.StubOutWithMock(conn, 'plug_vifs')
conn.plug_vifs(mox.IsA(inst_ref), nw_info)
self.mox.ReplayAll()
@@ -2561,7 +2353,7 @@ class LibvirtConnTestCase(test.TestCase):
migrate_data)
self.assertEqual(ret, None)
self.assertTrue(os.path.exists('%s/%s/' %
- (tmpdir, inst_ref.name)))
+ (tmpdir, inst_ref['name'])))
db.instance_destroy(self.context, inst_ref['uuid'])
def test_pre_block_migration_works_correctly(self):
@@ -2883,42 +2675,6 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.UnsetStubs()
- def test_volume_in_mapping(self):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- swap = {'device_name': '/dev/sdb',
- 'swap_size': 1}
- ephemerals = [{'num': 0,
- 'virtual_name': 'ephemeral0',
- 'device_name': '/dev/sdc1',
- 'size': 1},
- {'num': 2,
- 'virtual_name': 'ephemeral2',
- 'device_name': '/dev/sdd',
- 'size': 1}]
- block_device_mapping = [{'mount_device': '/dev/sde',
- 'device_path': 'fake_device'},
- {'mount_device': '/dev/sdf',
- 'device_path': 'fake_device'}]
- block_device_info = {
- 'root_device_name': '/dev/sda',
- 'swap': swap,
- 'ephemerals': ephemerals,
- 'block_device_mapping': block_device_mapping}
-
- def _assert_volume_in_mapping(device_name, true_or_false):
- self.assertEquals(conn._volume_in_mapping(device_name,
- block_device_info),
- true_or_false)
-
- _assert_volume_in_mapping('sda', False)
- _assert_volume_in_mapping('sdb', True)
- _assert_volume_in_mapping('sdc1', True)
- _assert_volume_in_mapping('sdd', True)
- _assert_volume_in_mapping('sde', True)
- _assert_volume_in_mapping('sdf', True)
- _assert_volume_in_mapping('sdg', False)
- _assert_volume_in_mapping('sdh1', False)
-
def test_immediate_delete(self):
def fake_lookup_by_name(instance_name):
raise exception.InstanceNotFound(instance_id=instance_name)
@@ -3833,7 +3589,6 @@ class IptablesFirewallTestCase(test.TestCase):
if '*filter' in lines:
self.out6_rules = lines
return '', ''
- print cmd, kwargs
network_model = _fake_network_info(self.stubs, 1, spectacular=True)
@@ -3849,7 +3604,7 @@ class IptablesFirewallTestCase(test.TestCase):
in_rules = filter(lambda l: not l.startswith('#'),
self.in_rules)
for rule in in_rules:
- if not 'nova' in rule:
+ if 'nova' not in rule:
self.assertTrue(rule in self.out_rules,
'Rule went missing: %s' % rule)
@@ -4250,7 +4005,7 @@ class LibvirtUtilsTestCase(test.TestCase):
def test_pick_disk_driver_name(self):
type_map = {'kvm': ([True, 'qemu'], [False, 'qemu'], [None, 'qemu']),
'qemu': ([True, 'qemu'], [False, 'qemu'], [None, 'qemu']),
- 'xen': ([True, 'phy'], [False, 'file'], [None, 'file']),
+ 'xen': ([True, 'phy'], [False, 'tap'], [None, 'tap']),
'uml': ([True, None], [False, None], [None, None]),
'lxc': ([True, None], [False, None], [None, None])}
@@ -4474,7 +4229,7 @@ class LibvirtDriverTestCase(test.TestCase):
self.counter = 0
- def fake_get_instance_disk_info(instance):
+ def fake_get_instance_disk_info(instance, xml=None):
return '[]'
def fake_destroy(instance):
@@ -4519,7 +4274,7 @@ class LibvirtDriverTestCase(test.TestCase):
'disk_size':'83886080'}]
disk_info_text = jsonutils.dumps(disk_info)
- def fake_get_instance_disk_info(instance):
+ def fake_get_instance_disk_info(instance, xml=None):
return disk_info_text
def fake_destroy(instance):
@@ -4593,16 +4348,18 @@ class LibvirtDriverTestCase(test.TestCase):
def fake_extend(path, size):
pass
- def fake_to_xml(instance, network_info, image_meta=None, rescue=None,
+ def fake_to_xml(instance, network_info, disk_info,
+ image_meta=None, rescue=None,
block_device_info=None, write_to_disk=False):
return ""
def fake_plug_vifs(instance, network_info):
pass
- def fake_create_image(context, inst, libvirt_xml, suffix='',
- disk_images=None, network_info=None,
- block_device_info=None):
+ def fake_create_image(context, inst, libvirt_xml,
+ disk_mapping, suffix='',
+ disk_images=None, network_info=None,
+ block_device_info=None):
pass
def fake_create_domain(xml, instance=None):
@@ -4660,7 +4417,8 @@ class LibvirtDriverTestCase(test.TestCase):
def fake_get_info(instance):
return {'state': power_state.RUNNING}
- def fake_to_xml(instance, network_info, image_meta=None, rescue=None,
+ def fake_to_xml(instance, network_info, disk_info,
+ image_meta=None, rescue=None,
block_device_info=None):
return ""
diff --git a/nova/tests/test_libvirt_blockinfo.py b/nova/tests/test_libvirt_blockinfo.py
new file mode 100644
index 000000000..e8d548399
--- /dev/null
+++ b/nova/tests/test_libvirt_blockinfo.py
@@ -0,0 +1,427 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2010 OpenStack LLC
+# Copyright 2012 University Of Minho
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import block_device
+from nova import context
+from nova import db
+from nova import exception
+from nova import test
+import nova.tests.image.fake
+from nova.virt.libvirt import blockinfo
+
+
+class LibvirtBlockInfoTest(test.TestCase):
+
+ def setUp(self):
+ super(LibvirtBlockInfoTest, self).setUp()
+
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.get_admin_context()
+ nova.tests.image.fake.stub_out_image_service(self.stubs)
+ self.test_instance = {
+ 'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310',
+ 'memory_kb': '1024000',
+ 'basepath': '/some/path',
+ 'bridge_name': 'br100',
+ 'vcpus': 2,
+ 'project_id': 'fake',
+ 'bridge': 'br101',
+ 'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ 'root_gb': 10,
+ 'ephemeral_gb': 20,
+ 'instance_type_id': '5'} # m1.small
+
+ def test_volume_in_mapping(self):
+ swap = {'device_name': '/dev/sdb',
+ 'swap_size': 1}
+ ephemerals = [{'num': 0,
+ 'virtual_name': 'ephemeral0',
+ 'device_name': '/dev/sdc1',
+ 'size': 1},
+ {'num': 2,
+ 'virtual_name': 'ephemeral2',
+ 'device_name': '/dev/sdd',
+ 'size': 1}]
+ block_device_mapping = [{'mount_device': '/dev/sde',
+ 'device_path': 'fake_device'},
+ {'mount_device': '/dev/sdf',
+ 'device_path': 'fake_device'}]
+ block_device_info = {
+ 'root_device_name': '/dev/sda',
+ 'swap': swap,
+ 'ephemerals': ephemerals,
+ 'block_device_mapping': block_device_mapping}
+
+ def _assert_volume_in_mapping(device_name, true_or_false):
+ self.assertEquals(
+ block_device.volume_in_mapping(device_name,
+ block_device_info),
+ true_or_false)
+
+ _assert_volume_in_mapping('sda', False)
+ _assert_volume_in_mapping('sdb', True)
+ _assert_volume_in_mapping('sdc1', True)
+ _assert_volume_in_mapping('sdd', True)
+ _assert_volume_in_mapping('sde', True)
+ _assert_volume_in_mapping('sdf', True)
+ _assert_volume_in_mapping('sdg', False)
+ _assert_volume_in_mapping('sdh1', False)
+
+ def test_find_disk_dev(self):
+ mapping = {
+ "disk.local": {
+ 'dev': 'sda',
+ 'bus': 'scsi',
+ 'type': 'disk',
+ },
+ "disk.swap": {
+ 'dev': 'sdc',
+ 'bus': 'scsi',
+ 'type': 'disk',
+ },
+ }
+
+ dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'scsi')
+ self.assertEqual(dev, 'sdb')
+
+ dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'scsi',
+ last_device=True)
+ self.assertEqual(dev, 'sdz')
+
+ dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'virtio')
+ self.assertEqual(dev, 'vda')
+
+ def test_get_next_disk_dev(self):
+ mapping = {}
+ mapping['disk.local'] = blockinfo.get_next_disk_info(mapping,
+ 'virtio')
+ self.assertEqual(mapping['disk.local'],
+ {'dev': 'vda', 'bus': 'virtio', 'type': 'disk'})
+
+ mapping['disk.swap'] = blockinfo.get_next_disk_info(mapping,
+ 'virtio')
+ self.assertEqual(mapping['disk.swap'],
+ {'dev': 'vdb', 'bus': 'virtio', 'type': 'disk'})
+
+ mapping['disk.config'] = blockinfo.get_next_disk_info(mapping,
+ 'ide',
+ 'cdrom',
+ True)
+ self.assertEqual(mapping['disk.config'],
+ {'dev': 'hdd', 'bus': 'ide', 'type': 'cdrom'})
+
+ def test_get_disk_mapping_simple(self):
+ # The simplest possible disk mapping setup, all defaults
+
+ user_context = context.RequestContext(self.user_id, self.project_id)
+ instance_ref = db.instance_create(user_context, self.test_instance)
+
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide")
+
+ expect = {
+ 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
+ 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'}
+ }
+ self.assertEqual(mapping, expect)
+
+ def test_get_disk_mapping_simple_rootdev(self):
+ # A simple disk mapping setup, but with custom root device name
+
+ user_context = context.RequestContext(self.user_id, self.project_id)
+ instance_ref = db.instance_create(user_context, self.test_instance)
+ block_device_info = {
+ 'root_device_name': '/dev/sda'
+ }
+
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ block_device_info)
+
+ expect = {
+ 'disk': {'bus': 'scsi', 'dev': 'sda', 'type': 'disk'},
+ 'disk.local': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
+ 'root': {'bus': 'scsi', 'dev': 'sda', 'type': 'disk'}
+ }
+ self.assertEqual(mapping, expect)
+
+ def test_get_disk_mapping_rescue(self):
+ # A simple disk mapping setup, but in rescue mode
+
+ user_context = context.RequestContext(self.user_id, self.project_id)
+ instance_ref = db.instance_create(user_context, self.test_instance)
+
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ rescue=True)
+
+ expect = {
+ 'disk.rescue': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
+ 'disk': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'}
+ }
+ self.assertEqual(mapping, expect)
+
+ def test_get_disk_mapping_simple_iso(self):
+ # A simple disk mapping setup, but with a ISO for root device
+
+ user_context = context.RequestContext(self.user_id, self.project_id)
+ instance_ref = db.instance_create(user_context, self.test_instance)
+ image_meta = {'disk_format': 'iso'}
+
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ None,
+ image_meta)
+
+ expect = {
+ 'disk': {'bus': 'ide', 'dev': 'hda', 'type': 'cdrom'},
+ 'disk.local': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
+ 'root': {'bus': 'ide', 'dev': 'hda', 'type': 'cdrom'}
+ }
+ self.assertEqual(mapping, expect)
+
+ def test_get_disk_mapping_simple_swap(self):
+ # A simple disk mapping setup, but with a swap device added
+
+ user_context = context.RequestContext(self.user_id, self.project_id)
+ instance_ref = db.instance_create(user_context, self.test_instance)
+ instance_ref['instance_type']['swap'] = 5
+
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide")
+
+ expect = {
+ 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
+ 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'disk.swap': {'bus': 'virtio', 'dev': 'vdc', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'}
+ }
+ self.assertEqual(mapping, expect)
+
+ def test_get_disk_mapping_simple_configdrive(self):
+ # A simple disk mapping setup, but with configdrive added
+
+ self.flags(force_config_drive=True)
+
+ user_context = context.RequestContext(self.user_id, self.project_id)
+ instance_ref = db.instance_create(user_context, self.test_instance)
+
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide")
+
+ expect = {
+ 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
+ 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'disk.config': {'bus': 'virtio', 'dev': 'vdz', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'}
+ }
+ self.assertEqual(mapping, expect)
+
+ def test_get_disk_mapping_ephemeral(self):
+ # A disk mapping with ephemeral devices
+ user_context = context.RequestContext(self.user_id, self.project_id)
+ instance_ref = db.instance_create(user_context, self.test_instance)
+ instance_ref['instance_type']['swap'] = 5
+
+ block_device_info = {
+ 'ephemerals': [
+ {'num': 0, 'virtual_name': 'ephemeral0',
+ 'device_name': '/dev/vdb', 'size': 10},
+ {'num': 1, 'virtual_name': 'ephemeral1',
+ 'device_name': '/dev/vdc', 'size': 10},
+ {'num': 2, 'virtual_name': 'ephemeral2',
+ 'device_name': '/dev/vdd', 'size': 10},
+ ]
+ }
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ block_device_info)
+
+ expect = {
+ 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
+ 'disk.eph0': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'disk.eph1': {'bus': 'virtio', 'dev': 'vdc', 'type': 'disk'},
+ 'disk.eph2': {'bus': 'virtio', 'dev': 'vdd', 'type': 'disk'},
+ 'disk.swap': {'bus': 'virtio', 'dev': 'vde', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'}
+ }
+ self.assertEqual(mapping, expect)
+
+ def test_get_disk_mapping_custom_swap(self):
+ # A disk mapping with a swap device at position vdb. This
+ # should cause disk.local to be removed
+ user_context = context.RequestContext(self.user_id, self.project_id)
+ instance_ref = db.instance_create(user_context, self.test_instance)
+
+ block_device_info = {
+ 'swap': {'device_name': '/dev/vdb',
+ 'swap_size': 10},
+ }
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ block_device_info)
+
+ expect = {
+ 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
+ 'disk.swap': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'}
+ }
+ self.assertEqual(mapping, expect)
+
+ def test_get_disk_mapping_blockdev_root(self):
+ # A disk mapping with a blockdev replacing the default root
+ user_context = context.RequestContext(self.user_id, self.project_id)
+ instance_ref = db.instance_create(user_context, self.test_instance)
+
+ block_device_info = {
+ 'block_device_mapping': [
+ {'connection_info': "fake",
+ 'mount_device': "/dev/vda",
+ 'delete_on_termination': True},
+ ]
+ }
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ block_device_info)
+
+ expect = {
+ '/dev/vda': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
+ 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'}
+ }
+ self.assertEqual(mapping, expect)
+
+ def test_get_disk_mapping_blockdev_eph(self):
+ # A disk mapping with a blockdev replacing the ephemeral device
+ user_context = context.RequestContext(self.user_id, self.project_id)
+ instance_ref = db.instance_create(user_context, self.test_instance)
+
+ block_device_info = {
+ 'block_device_mapping': [
+ {'connection_info': "fake",
+ 'mount_device': "/dev/vdb",
+ 'delete_on_termination': True},
+ ]
+ }
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ block_device_info)
+
+ expect = {
+ 'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
+ '/dev/vdb': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'}
+ }
+ self.assertEqual(mapping, expect)
+
+ def test_get_disk_mapping_blockdev_many(self):
+ # A disk mapping with a blockdev replacing all devices
+ user_context = context.RequestContext(self.user_id, self.project_id)
+ instance_ref = db.instance_create(user_context, self.test_instance)
+
+ block_device_info = {
+ 'block_device_mapping': [
+ {'connection_info': "fake",
+ 'mount_device': "/dev/vda",
+ 'delete_on_termination': True},
+ {'connection_info': "fake",
+ 'mount_device': "/dev/vdb",
+ 'delete_on_termination': True},
+ {'connection_info': "fake",
+ 'mount_device': "/dev/vdc",
+ 'delete_on_termination': True},
+ ]
+ }
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ block_device_info)
+
+ expect = {
+ '/dev/vda': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
+ '/dev/vdb': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ '/dev/vdc': {'bus': 'virtio', 'dev': 'vdc', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'}
+ }
+ self.assertEqual(mapping, expect)
+
+ def test_get_disk_mapping_complex(self):
+ # The strangest possible disk mapping setup
+ user_context = context.RequestContext(self.user_id, self.project_id)
+ instance_ref = db.instance_create(user_context, self.test_instance)
+
+ block_device_info = {
+ 'root_device_name': '/dev/vdf',
+ 'swap': {'device_name': '/dev/vdy',
+ 'swap_size': 10},
+ 'ephemerals': [
+ {'num': 0, 'virtual_name': 'ephemeral0',
+ 'device_name': '/dev/vdb', 'size': 10},
+ {'num': 1, 'virtual_name': 'ephemeral1',
+ 'device_name': '/dev/vdc', 'size': 10},
+ ],
+ 'block_device_mapping': [
+ {'connection_info': "fake",
+ 'mount_device': "/dev/vda",
+ 'delete_on_termination': True},
+ ]
+ }
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ block_device_info)
+
+ expect = {
+ 'disk': {'bus': 'virtio', 'dev': 'vdf', 'type': 'disk'},
+ '/dev/vda': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
+ 'disk.eph0': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'disk.eph1': {'bus': 'virtio', 'dev': 'vdc', 'type': 'disk'},
+ 'disk.swap': {'bus': 'virtio', 'dev': 'vdy', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vdf', 'type': 'disk'}
+ }
+ self.assertEqual(mapping, expect)
+
+ def test_get_disk_bus(self):
+ bus = blockinfo.get_disk_bus_for_device_type('kvm')
+ self.assertEqual(bus, 'virtio')
+
+ bus = blockinfo.get_disk_bus_for_device_type('kvm',
+ device_type='cdrom')
+ self.assertEqual(bus, 'ide')
+
+ image_meta = {'properties': {'disk_bus': 'scsi'}}
+ bus = blockinfo.get_disk_bus_for_device_type('kvm',
+ image_meta)
+ self.assertEqual(bus, 'scsi')
+
+ image_meta = {'properties': {'disk_bus': 'usb',
+ 'cdrom_bus': 'scsi'}}
+ bus = blockinfo.get_disk_bus_for_device_type('kvm',
+ image_meta,
+ device_type='cdrom')
+ self.assertEqual(bus, 'scsi')
+
+ bus = blockinfo.get_disk_bus_for_device_type('kvm',
+ image_meta)
+ self.assertEqual(bus, 'usb')
+
+ image_meta = {'properties': {'disk_bus': 'xen'}}
+ self.assertRaises(exception.NovaException,
+ blockinfo.get_disk_bus_for_device_type,
+ 'kvm',
+ image_meta)
diff --git a/nova/tests/test_libvirt_vif.py b/nova/tests/test_libvirt_vif.py
index 3861d7dfa..916b961da 100644
--- a/nova/tests/test_libvirt_vif.py
+++ b/nova/tests/test_libvirt_vif.py
@@ -16,8 +16,11 @@
from lxml import etree
+from nova import exception
+from nova.network import model as network_model
from nova.openstack.common import cfg
from nova import test
+from nova.tests import fakelibvirt
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import vif
@@ -42,13 +45,37 @@ class LibvirtVifTestCase(test.TestCase):
'id': 'network-id-xxx-yyy-zzz'
}
+ net_bridge_quantum = {
+ 'cidr': '101.168.1.0/24',
+ 'cidr_v6': '101:1db9::/64',
+ 'gateway_v6': '101:1db9::1',
+ 'netmask_v6': '64',
+ 'netmask': '255.255.255.0',
+ 'bridge_interface': 'eth0',
+ 'vlan': 99,
+ 'gateway': '101.168.1.1',
+ 'broadcast': '101.168.1.255',
+ 'dns1': '8.8.8.8',
+ 'id': 'network-id-xxx-yyy-zzz'
+ }
+
mapping_bridge = {
'mac': 'ca:fe:de:ad:be:ef',
'gateway_v6': net_bridge['gateway_v6'],
'ips': [{'ip': '101.168.1.9'}],
'dhcp_server': '191.168.1.1',
'vif_uuid': 'vif-xxx-yyy-zzz',
- 'vif_devname': 'tap-xxx-yyy-zzz'
+ 'vif_devname': 'tap-xxx-yyy-zzz',
+ 'vif_type': network_model.VIF_TYPE_BRIDGE,
+ }
+
+ mapping_bridge_quantum = {
+ 'mac': 'ca:fe:de:ad:be:ef',
+ 'gateway_v6': net_bridge['gateway_v6'],
+ 'ips': [{'ip': '101.168.1.9'}],
+ 'dhcp_server': '191.168.1.1',
+ 'vif_uuid': 'vif-xxx-yyy-zzz',
+ 'vif_devname': 'tap-xxx-yyy-zzz',
}
net_ovs = {
@@ -72,9 +99,62 @@ class LibvirtVifTestCase(test.TestCase):
'dhcp_server': '191.168.1.1',
'vif_uuid': 'vif-xxx-yyy-zzz',
'vif_devname': 'tap-xxx-yyy-zzz',
+ 'vif_type': network_model.VIF_TYPE_OVS,
'ovs_interfaceid': 'aaa-bbb-ccc',
}
+ mapping_ovs_legacy = {
+ 'mac': 'ca:fe:de:ad:be:ef',
+ 'gateway_v6': net_ovs['gateway_v6'],
+ 'ips': [{'ip': '101.168.1.9'}],
+ 'dhcp_server': '191.168.1.1',
+ 'vif_uuid': 'vif-xxx-yyy-zzz',
+ }
+
+ net_8021 = {
+ 'cidr': '101.168.1.0/24',
+ 'cidr_v6': '101:1db9::/64',
+ 'gateway_v6': '101:1db9::1',
+ 'netmask_v6': '64',
+ 'netmask': '255.255.255.0',
+ 'interface': 'eth0',
+ 'vlan': 99,
+ 'gateway': '101.168.1.1',
+ 'broadcast': '101.168.1.255',
+ 'dns1': '8.8.8.8',
+ 'id': 'network-id-xxx-yyy-zzz'
+ }
+
+ mapping_8021qbh = {
+ 'mac': 'ca:fe:de:ad:be:ef',
+ 'vif_uuid': 'vif-xxx-yyy-zzz',
+ 'vif_devname': 'tap-xxx-yyy-zzz',
+ 'vif_type': network_model.VIF_TYPE_802_QBH,
+ 'qbh_params': network_model.VIF8021QbhParams(
+ profileid="xxx-yyy-zzz"),
+ }
+
+ mapping_8021qbg = {
+ 'mac': 'ca:fe:de:ad:be:ef',
+ 'vif_uuid': 'vif-xxx-yyy-zzz',
+ 'vif_devname': 'tap-xxx-yyy-zzz',
+ 'vif_type': network_model.VIF_TYPE_802_QBG,
+ 'qbg_params': network_model.VIF8021QbgParams(
+ managerid="xxx-yyy-zzz",
+ typeid="aaa-bbb-ccc",
+ typeidversion="1",
+ instanceid="ddd-eee-fff")
+ }
+
+ mapping_none = {
+ 'mac': 'ca:fe:de:ad:be:ef',
+ 'gateway_v6': net_bridge['gateway_v6'],
+ 'ips': [{'ip': '101.168.1.9'}],
+ 'dhcp_server': '191.168.1.1',
+ 'vif_uuid': 'vif-xxx-yyy-zzz',
+ 'vif_devname': 'tap-xxx-yyy-zzz',
+ }
+
instance = {
'name': 'instance-name',
'uuid': 'instance-uuid'
@@ -149,7 +229,10 @@ class LibvirtVifTestCase(test.TestCase):
self.flags(libvirt_use_virtio_for_bridges=False,
libvirt_type='kvm')
- d = vif.LibvirtBridgeDriver()
+ def get_connection():
+ return fakelibvirt.Connection("qemu:///session",
+ False)
+ d = vif.LibvirtGenericVIFDriver(get_connection)
xml = self._get_instance_xml(d,
self.net_bridge,
self.mapping_bridge)
@@ -168,7 +251,10 @@ class LibvirtVifTestCase(test.TestCase):
self.flags(libvirt_use_virtio_for_bridges=True,
libvirt_type='kvm')
- d = vif.LibvirtBridgeDriver()
+ def get_connection():
+ return fakelibvirt.Connection("qemu:///session",
+ False)
+ d = vif.LibvirtGenericVIFDriver(get_connection)
xml = self._get_instance_xml(d,
self.net_bridge,
self.mapping_bridge)
@@ -187,7 +273,10 @@ class LibvirtVifTestCase(test.TestCase):
self.flags(libvirt_use_virtio_for_bridges=True,
libvirt_type='qemu')
- d = vif.LibvirtBridgeDriver()
+ def get_connection():
+ return fakelibvirt.Connection("qemu:///session",
+ False)
+ d = vif.LibvirtGenericVIFDriver(get_connection)
xml = self._get_instance_xml(d,
self.net_bridge,
self.mapping_bridge)
@@ -206,7 +295,10 @@ class LibvirtVifTestCase(test.TestCase):
self.flags(libvirt_use_virtio_for_bridges=True,
libvirt_type='xen')
- d = vif.LibvirtBridgeDriver()
+ def get_connection():
+ return fakelibvirt.Connection("xen:///system",
+ False)
+ d = vif.LibvirtGenericVIFDriver(get_connection)
xml = self._get_instance_xml(d,
self.net_bridge,
self.mapping_bridge)
@@ -221,11 +313,19 @@ class LibvirtVifTestCase(test.TestCase):
ret = node.findall("driver")
self.assertEqual(len(ret), 0)
- def test_bridge_driver(self):
- d = vif.LibvirtBridgeDriver()
- xml = self._get_instance_xml(d,
- self.net_bridge,
- self.mapping_bridge)
+ def test_generic_driver_none(self):
+ def get_connection():
+ return fakelibvirt.Connection("qemu:///session",
+ False)
+ d = vif.LibvirtGenericVIFDriver(get_connection)
+ self.assertRaises(exception.NovaException,
+ self._get_instance_xml,
+ d,
+ self.net_bridge,
+ self.mapping_none)
+
+ def _check_bridge_driver(self, d, net, mapping, br_want):
+ xml = self._get_instance_xml(d, net, mapping)
doc = etree.fromstring(xml)
ret = doc.findall('./devices/interface')
@@ -233,15 +333,45 @@ class LibvirtVifTestCase(test.TestCase):
node = ret[0]
self.assertEqual(node.get("type"), "bridge")
br_name = node.find("source").get("bridge")
- self.assertEqual(br_name, self.net_bridge['bridge'])
+ self.assertEqual(br_name, br_want)
mac = node.find("mac").get("address")
self.assertEqual(mac, self.mapping_bridge['mac'])
- def test_ovs_ethernet_driver(self):
- d = vif.LibvirtOpenVswitchDriver()
- xml = self._get_instance_xml(d,
- self.net_ovs,
- self.mapping_ovs)
+ def test_bridge_driver(self):
+ def get_connection():
+ return fakelibvirt.Connection("qemu:///session",
+ False)
+ d = vif.LibvirtBridgeDriver(get_connection)
+ self._check_bridge_driver(d,
+ self.net_bridge,
+ self.mapping_bridge,
+ self.net_bridge['bridge'])
+
+ def test_generic_driver_bridge(self):
+ def get_connection():
+ return fakelibvirt.Connection("qemu:///session",
+ False)
+ d = vif.LibvirtGenericVIFDriver(get_connection)
+ self._check_bridge_driver(d,
+ self.net_bridge,
+ self.mapping_bridge,
+ self.net_bridge['bridge'])
+
+ def test_quantum_bridge_driver(self):
+ def get_connection():
+ return fakelibvirt.Connection("qemu:///session",
+ False)
+ d = vif.QuantumLinuxBridgeVIFDriver(get_connection)
+ br_want = 'brq' + self.net_bridge_quantum['id']
+ br_want = br_want[:network_model.NIC_NAME_LEN]
+ self._check_bridge_driver(d,
+ self.net_bridge_quantum,
+ self.mapping_bridge_quantum,
+ br_want)
+
+ def _check_ovs_ethernet_driver(self, d, net, mapping):
+ self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
+ xml = self._get_instance_xml(d, net, mapping)
doc = etree.fromstring(xml)
ret = doc.findall('./devices/interface')
@@ -255,11 +385,30 @@ class LibvirtVifTestCase(test.TestCase):
script = node.find("script").get("path")
self.assertEquals(script, "")
- def test_ovs_virtualport_driver(self):
- d = vif.LibvirtOpenVswitchVirtualPortDriver()
- xml = self._get_instance_xml(d,
- self.net_ovs,
- self.mapping_ovs)
+ def test_ovs_ethernet_driver(self):
+ def get_connection():
+ return fakelibvirt.Connection("qemu:///session",
+ False,
+ 9010)
+ d = vif.LibvirtOpenVswitchDriver(get_connection)
+ d = vif.LibvirtOpenVswitchDriver()
+ self._check_ovs_ethernet_driver(d,
+ self.net_ovs,
+ self.mapping_ovs_legacy)
+
+ def test_ovs_ethernet_driver(self):
+ def get_connection():
+ return fakelibvirt.Connection("qemu:///session",
+ False,
+ 9010)
+ d = vif.LibvirtGenericVIFDriver(get_connection)
+ self._check_ovs_ethernet_driver(d,
+ self.net_ovs,
+ self.mapping_ovs)
+
+ def _check_ovs_virtualport_driver(self, d, net, mapping, want_iface_id):
+ self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
+ xml = self._get_instance_xml(d, net, mapping)
doc = etree.fromstring(xml)
ret = doc.findall('./devices/interface')
@@ -270,49 +419,162 @@ class LibvirtVifTestCase(test.TestCase):
br_name = node.find("source").get("bridge")
self.assertEqual(br_name, "br0")
mac = node.find("mac").get("address")
- self.assertEqual(mac, self.mapping_ovs['mac'])
+ self.assertEqual(mac, mapping['mac'])
vp = node.find("virtualport")
self.assertEqual(vp.get("type"), "openvswitch")
iface_id_found = False
for p_elem in vp.findall("parameters"):
iface_id = p_elem.get("interfaceid", None)
if iface_id:
- self.assertEqual(iface_id,
- self.mapping_ovs['ovs_interfaceid'])
+ self.assertEqual(iface_id, want_iface_id)
iface_id_found = True
self.assertTrue(iface_id_found)
- def test_quantum_bridge_ethernet_driver(self):
- d = vif.QuantumLinuxBridgeVIFDriver()
- xml = self._get_instance_xml(d,
- self.net_bridge,
- self.mapping_bridge)
+ def test_ovs_virtualport_driver(self):
+ def get_connection():
+ return fakelibvirt.Connection("qemu:///session",
+ False,
+ 9011)
+ d = vif.LibvirtOpenVswitchVirtualPortDriver(get_connection)
+ want_iface_id = 'vif-xxx-yyy-zzz'
+ self._check_ovs_virtualport_driver(d,
+ self.net_ovs,
+ self.mapping_ovs_legacy,
+ want_iface_id)
+
+ def test_generic_ovs_virtualport_driver(self):
+ def get_connection():
+ return fakelibvirt.Connection("qemu:///session",
+ False,
+ 9011)
+ d = vif.LibvirtGenericVIFDriver(get_connection)
+ want_iface_id = self.mapping_ovs['ovs_interfaceid']
+ self._check_ovs_virtualport_driver(d,
+ self.net_ovs,
+ self.mapping_ovs,
+ want_iface_id)
+
+ def _check_quantum_hybrid_driver(self, d, net, mapping, br_want):
+ self.flags(firewall_driver="nova.virt.firewall.IptablesFirewallDriver")
+ xml = self._get_instance_xml(d, net, mapping)
doc = etree.fromstring(xml)
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
node = ret[0]
self.assertEqual(node.get("type"), "bridge")
- dev_name = node.find("target").get("dev")
- self.assertTrue(dev_name.startswith("tap"))
- mac = node.find("mac").get("address")
- self.assertEqual(mac, self.mapping_ovs['mac'])
br_name = node.find("source").get("bridge")
- self.assertEqual(br_name, "br0")
+ self.assertEqual(br_name, br_want)
+ mac = node.find("mac").get("address")
+ self.assertEqual(mac, mapping['mac'])
def test_quantum_hybrid_driver(self):
- d = vif.LibvirtHybridOVSBridgeDriver()
+ def get_connection():
+ return fakelibvirt.Connection("qemu:///session",
+ False)
+ br_want = "qbr" + self.mapping_ovs['vif_uuid']
+ br_want = br_want[:network_model.NIC_NAME_LEN]
+ d = vif.LibvirtHybridOVSBridgeDriver(get_connection)
+ self._check_quantum_hybrid_driver(d,
+ self.net_ovs,
+ self.mapping_ovs_legacy,
+ br_want)
+
+ def test_generic_hybrid_driver(self):
+ def get_connection():
+ return fakelibvirt.Connection("qemu:///session",
+ False)
+ d = vif.LibvirtGenericVIFDriver(get_connection)
+ br_want = "qbr" + self.mapping_ovs['vif_uuid']
+ br_want = br_want[:network_model.NIC_NAME_LEN]
+ self._check_quantum_hybrid_driver(d,
+ self.net_ovs,
+ self.mapping_ovs,
+ br_want)
+
+ def test_generic_8021qbh_driver(self):
+ def get_connection():
+ return fakelibvirt.Connection("qemu:///session",
+ False)
+ d = vif.LibvirtGenericVIFDriver(get_connection)
xml = self._get_instance_xml(d,
- self.net_ovs,
- self.mapping_ovs)
+ self.net_8021,
+ self.mapping_8021qbh)
doc = etree.fromstring(xml)
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
node = ret[0]
- self.assertEqual(node.get("type"), "bridge")
- br_name = node.find("source").get("bridge")
- self.assertEqual(br_name, self.net_ovs['bridge'])
+ self.assertEqual(node.get("type"), "direct")
+
+ br_name = node.find("source").get("dev")
+ self.assertEqual(br_name, "eth0")
mac = node.find("mac").get("address")
- self.assertEqual(mac, self.mapping_ovs['mac'])
+ self.assertEqual(mac, self.mapping_8021qbh['mac'])
+ vp = node.find("virtualport")
+ self.assertEqual(vp.get("type"), "802.1Qbh")
+ profile_id_found = False
+ for p_elem in vp.findall("parameters"):
+ wantparams = self.mapping_8021qbh['qbh_params']
+ profile_id = p_elem.get("profileid", None)
+ if profile_id:
+ self.assertEqual(profile_id,
+ wantparams['profileid'])
+ profile_id_found = True
+
+ self.assertTrue(profile_id_found)
+
+ def test_generic_8021qbg_driver(self):
+ def get_connection():
+ return fakelibvirt.Connection("qemu:///session",
+ False)
+ d = vif.LibvirtGenericVIFDriver(get_connection)
+ xml = self._get_instance_xml(d,
+ self.net_8021,
+ self.mapping_8021qbg)
+
+ doc = etree.fromstring(xml)
+ print xml
+ ret = doc.findall('./devices/interface')
+ self.assertEqual(len(ret), 1)
+ node = ret[0]
+ self.assertEqual(node.get("type"), "direct")
+
+ br_name = node.find("source").get("dev")
+ self.assertEqual(br_name, "eth0")
+ mac = node.find("mac").get("address")
+ self.assertEqual(mac, self.mapping_8021qbg['mac'])
+ vp = node.find("virtualport")
+ self.assertEqual(vp.get("type"), "802.1Qbg")
+ manager_id_found = False
+ type_id_found = False
+ typeversion_id_found = False
+ instance_id_found = False
+ for p_elem in vp.findall("parameters"):
+ wantparams = self.mapping_8021qbg['qbg_params']
+ manager_id = p_elem.get("managerid", None)
+ type_id = p_elem.get("typeid", None)
+ typeversion_id = p_elem.get("typeidversion", None)
+ instance_id = p_elem.get("instanceid", None)
+ if manager_id:
+ self.assertEqual(manager_id,
+ wantparams['managerid'])
+ manager_id_found = True
+ if type_id:
+ self.assertEqual(type_id,
+ wantparams['typeid'])
+ type_id_found = True
+ if typeversion_id:
+ self.assertEqual(typeversion_id,
+ wantparams['typeidversion'])
+ typeversion_id_found = True
+ if instance_id:
+ self.assertEqual(instance_id,
+ wantparams['instanceid'])
+ instance_id_found = True
+
+ self.assertTrue(manager_id_found)
+ self.assertTrue(type_id_found)
+ self.assertTrue(typeversion_id_found)
+ self.assertTrue(instance_id_found)
diff --git a/nova/tests/test_libvirt_volume.py b/nova/tests/test_libvirt_volume.py
new file mode 100644
index 000000000..b9f95735f
--- /dev/null
+++ b/nova/tests/test_libvirt_volume.py
@@ -0,0 +1,366 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2010 OpenStack LLC
+# Copyright 2012 University Of Minho
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+from nova.openstack.common import cfg
+from nova import test
+from nova import utils
+from nova.virt import fake
+from nova.virt.libvirt import volume
+
+CONF = cfg.CONF
+
+
+class LibvirtVolumeTestCase(test.TestCase):
+
+ def setUp(self):
+ super(LibvirtVolumeTestCase, self).setUp()
+ self.executes = []
+
+ def fake_execute(*cmd, **kwargs):
+ self.executes.append(cmd)
+ return None, None
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ class FakeLibvirtDriver(object):
+ def __init__(self, hyperv="QEMU"):
+ self.hyperv = hyperv
+
+ def get_hypervisor_type(self):
+ return self.hyperv
+
+ def get_all_block_devices(self):
+ return []
+
+ self.fake_conn = FakeLibvirtDriver(fake.FakeVirtAPI())
+ self.connr = {
+ 'ip': '127.0.0.1',
+ 'initiator': 'fake_initiator',
+ 'host': 'fake_host'
+ }
+
+ def test_libvirt_volume_driver_serial(self):
+ libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
+ name = 'volume-00000001'
+ vol = {'id': 1, 'name': name}
+ connection_info = {
+ 'driver_volume_type': 'fake',
+ 'data': {
+ 'device_path': '/foo',
+ },
+ 'serial': 'fake_serial',
+ }
+ disk_info = {
+ "bus": "virtio",
+ "dev": "vde",
+ "type": "disk",
+ }
+ conf = libvirt_driver.connect_volume(connection_info, disk_info)
+ tree = conf.format_dom()
+ self.assertEqual(tree.get('type'), 'block')
+ self.assertEqual(tree.find('./serial').text, 'fake_serial')
+
+ def iscsi_connection(self, volume, location, iqn):
+ return {
+ 'driver_volume_type': 'iscsi',
+ 'data': {
+ 'volume_id': volume['id'],
+ 'target_portal': location,
+ 'target_iqn': iqn,
+ 'target_lun': 1,
+ }
+ }
+
+ def test_libvirt_iscsi_driver(self):
+ # NOTE(vish) exists is to make driver assume connecting worked
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ location = '10.0.2.15:3260'
+ name = 'volume-00000001'
+ iqn = 'iqn.2010-10.org.openstack:%s' % name
+ vol = {'id': 1, 'name': name}
+ connection_info = self.iscsi_connection(vol, location, iqn)
+ disk_info = {
+ "bus": "virtio",
+ "dev": "vde",
+ "type": "disk",
+ }
+ conf = libvirt_driver.connect_volume(connection_info, disk_info)
+ tree = conf.format_dom()
+ dev_str = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (location, iqn)
+ self.assertEqual(tree.get('type'), 'block')
+ self.assertEqual(tree.find('./source').get('dev'), dev_str)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+ expected_commands = [('iscsiadm', '-m', 'node', '-T', iqn,
+ '-p', location),
+ ('iscsiadm', '-m', 'node', '-T', iqn,
+ '-p', location, '--login'),
+ ('iscsiadm', '-m', 'node', '-T', iqn,
+ '-p', location, '--op', 'update',
+ '-n', 'node.startup', '-v', 'automatic'),
+ ('iscsiadm', '-m', 'node', '-T', iqn,
+ '-p', location, '--op', 'update',
+ '-n', 'node.startup', '-v', 'manual'),
+ ('iscsiadm', '-m', 'node', '-T', iqn,
+ '-p', location, '--logout'),
+ ('iscsiadm', '-m', 'node', '-T', iqn,
+ '-p', location, '--op', 'delete')]
+ self.assertEqual(self.executes, expected_commands)
+
+ def test_libvirt_iscsi_driver_still_in_use(self):
+ # NOTE(vish) exists is to make driver assume connecting worked
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ location = '10.0.2.15:3260'
+ name = 'volume-00000001'
+ iqn = 'iqn.2010-10.org.openstack:%s' % name
+ devs = ['/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (location, iqn)]
+ self.stubs.Set(self.fake_conn, 'get_all_block_devices', lambda: devs)
+ vol = {'id': 1, 'name': name}
+ connection_info = self.iscsi_connection(vol, location, iqn)
+ disk_info = {
+ "bus": "virtio",
+ "dev": "vde",
+ "type": "disk",
+ }
+ conf = libvirt_driver.connect_volume(connection_info, disk_info)
+ tree = conf.format_dom()
+ dev_str = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (location, iqn)
+ self.assertEqual(tree.get('type'), 'block')
+ self.assertEqual(tree.find('./source').get('dev'), dev_str)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+ expected_commands = [('iscsiadm', '-m', 'node', '-T', iqn,
+ '-p', location),
+ ('iscsiadm', '-m', 'node', '-T', iqn,
+ '-p', location, '--login'),
+ ('iscsiadm', '-m', 'node', '-T', iqn,
+ '-p', location, '--op', 'update',
+ '-n', 'node.startup', '-v', 'automatic')]
+ self.assertEqual(self.executes, expected_commands)
+
+ def sheepdog_connection(self, volume):
+ return {
+ 'driver_volume_type': 'sheepdog',
+ 'data': {
+ 'name': volume['name']
+ }
+ }
+
+ def test_libvirt_sheepdog_driver(self):
+ libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
+ name = 'volume-00000001'
+ vol = {'id': 1, 'name': name}
+ connection_info = self.sheepdog_connection(vol)
+ disk_info = {
+ "bus": "virtio",
+ "dev": "vde",
+ "type": "disk",
+ }
+ conf = libvirt_driver.connect_volume(connection_info, disk_info)
+ tree = conf.format_dom()
+ self.assertEqual(tree.get('type'), 'network')
+ self.assertEqual(tree.find('./source').get('protocol'), 'sheepdog')
+ self.assertEqual(tree.find('./source').get('name'), name)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ def rbd_connection(self, volume):
+ return {
+ 'driver_volume_type': 'rbd',
+ 'data': {
+ 'name': '%s/%s' % ('rbd', volume['name']),
+ 'auth_enabled': CONF.rbd_secret_uuid is not None,
+ 'auth_username': CONF.rbd_user,
+ 'secret_type': 'ceph',
+ 'secret_uuid': CONF.rbd_secret_uuid,
+ }
+ }
+
+ def test_libvirt_rbd_driver(self):
+ libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
+ name = 'volume-00000001'
+ vol = {'id': 1, 'name': name}
+ connection_info = self.rbd_connection(vol)
+ disk_info = {
+ "bus": "virtio",
+ "dev": "vde",
+ "type": "disk",
+ }
+ conf = libvirt_driver.connect_volume(connection_info, disk_info)
+ tree = conf.format_dom()
+ self.assertEqual(tree.get('type'), 'network')
+ self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
+ rbd_name = '%s/%s' % ('rbd', name)
+ self.assertEqual(tree.find('./source').get('name'), rbd_name)
+ self.assertEqual(tree.find('./source/auth'), None)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ def test_libvirt_rbd_driver_auth_enabled(self):
+ libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
+ name = 'volume-00000001'
+ vol = {'id': 1, 'name': name}
+ connection_info = self.rbd_connection(vol)
+ uuid = '875a8070-d0b9-4949-8b31-104d125c9a64'
+ user = 'foo'
+ secret_type = 'ceph'
+ connection_info['data']['auth_enabled'] = True
+ connection_info['data']['auth_username'] = user
+ connection_info['data']['secret_type'] = secret_type
+ connection_info['data']['secret_uuid'] = uuid
+ disk_info = {
+ "bus": "virtio",
+ "dev": "vde",
+ "type": "disk",
+ }
+
+ conf = libvirt_driver.connect_volume(connection_info, disk_info)
+ tree = conf.format_dom()
+ self.assertEqual(tree.get('type'), 'network')
+ self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
+ rbd_name = '%s/%s' % ('rbd', name)
+ self.assertEqual(tree.find('./source').get('name'), rbd_name)
+ self.assertEqual(tree.find('./auth').get('username'), user)
+ self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
+ self.assertEqual(tree.find('./auth/secret').get('uuid'), uuid)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ def test_libvirt_rbd_driver_auth_enabled_flags_override(self):
+ libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
+ name = 'volume-00000001'
+ vol = {'id': 1, 'name': name}
+ connection_info = self.rbd_connection(vol)
+ uuid = '875a8070-d0b9-4949-8b31-104d125c9a64'
+ user = 'foo'
+ secret_type = 'ceph'
+ connection_info['data']['auth_enabled'] = True
+ connection_info['data']['auth_username'] = user
+ connection_info['data']['secret_type'] = secret_type
+ connection_info['data']['secret_uuid'] = uuid
+
+ flags_uuid = '37152720-1785-11e2-a740-af0c1d8b8e4b'
+ flags_user = 'bar'
+ self.flags(rbd_user=flags_user,
+ rbd_secret_uuid=flags_uuid)
+ disk_info = {
+ "bus": "virtio",
+ "dev": "vde",
+ "type": "disk",
+ }
+
+ conf = libvirt_driver.connect_volume(connection_info, disk_info)
+ tree = conf.format_dom()
+ self.assertEqual(tree.get('type'), 'network')
+ self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
+ rbd_name = '%s/%s' % ('rbd', name)
+ self.assertEqual(tree.find('./source').get('name'), rbd_name)
+ self.assertEqual(tree.find('./auth').get('username'), flags_user)
+ self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
+ self.assertEqual(tree.find('./auth/secret').get('uuid'), flags_uuid)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ def test_libvirt_rbd_driver_auth_disabled(self):
+ libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
+ name = 'volume-00000001'
+ vol = {'id': 1, 'name': name}
+ connection_info = self.rbd_connection(vol)
+ uuid = '875a8070-d0b9-4949-8b31-104d125c9a64'
+ user = 'foo'
+ secret_type = 'ceph'
+ connection_info['data']['auth_enabled'] = False
+ connection_info['data']['auth_username'] = user
+ connection_info['data']['secret_type'] = secret_type
+ connection_info['data']['secret_uuid'] = uuid
+ disk_info = {
+ "bus": "virtio",
+ "dev": "vde",
+ "type": "disk",
+ }
+
+ conf = libvirt_driver.connect_volume(connection_info, disk_info)
+ tree = conf.format_dom()
+ self.assertEqual(tree.get('type'), 'network')
+ self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
+ rbd_name = '%s/%s' % ('rbd', name)
+ self.assertEqual(tree.find('./source').get('name'), rbd_name)
+ self.assertEqual(tree.find('./auth'), None)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ def test_libvirt_rbd_driver_auth_disabled_flags_override(self):
+ libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
+ name = 'volume-00000001'
+ vol = {'id': 1, 'name': name}
+ connection_info = self.rbd_connection(vol)
+ uuid = '875a8070-d0b9-4949-8b31-104d125c9a64'
+ user = 'foo'
+ secret_type = 'ceph'
+ connection_info['data']['auth_enabled'] = False
+ connection_info['data']['auth_username'] = user
+ connection_info['data']['secret_type'] = secret_type
+ connection_info['data']['secret_uuid'] = uuid
+
+ # NOTE: Supplying the rbd_secret_uuid will enable authentication
+ # locally in nova-compute even if not enabled in nova-volume/cinder
+ flags_uuid = '37152720-1785-11e2-a740-af0c1d8b8e4b'
+ flags_user = 'bar'
+ self.flags(rbd_user=flags_user,
+ rbd_secret_uuid=flags_uuid)
+ disk_info = {
+ "bus": "virtio",
+ "dev": "vde",
+ "type": "disk",
+ }
+
+ conf = libvirt_driver.connect_volume(connection_info, disk_info)
+ tree = conf.format_dom()
+ self.assertEqual(tree.get('type'), 'network')
+ self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
+ rbd_name = '%s/%s' % ('rbd', name)
+ self.assertEqual(tree.find('./source').get('name'), rbd_name)
+ self.assertEqual(tree.find('./auth').get('username'), flags_user)
+ self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
+ self.assertEqual(tree.find('./auth/secret').get('uuid'), flags_uuid)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ def test_libvirt_nfs_driver(self):
+ # NOTE(vish) exists is to make driver assume connecting worked
+ mnt_base = '/mnt'
+ self.flags(nfs_mount_point_base=mnt_base)
+
+ libvirt_driver = volume.LibvirtNFSVolumeDriver(self.fake_conn)
+ export_string = '192.168.1.1:/nfs/share1'
+ name = 'volume-00001'
+ export_mnt_base = os.path.join(mnt_base,
+ libvirt_driver.get_hash_str(export_string))
+ file_path = os.path.join(export_mnt_base, name)
+
+ connection_info = {'data': {'export': export_string, 'name': name}}
+ disk_info = {
+ "bus": "virtio",
+ "dev": "vde",
+ "type": "disk",
+ }
+ conf = libvirt_driver.connect_volume(connection_info, disk_info)
+ tree = conf.format_dom()
+ self.assertEqual(tree.get('type'), 'file')
+ self.assertEqual(tree.find('./source').get('file'), file_path)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ expected_commands = [
+ ('stat', export_mnt_base),
+ ('mount', '-t', 'nfs', export_string, export_mnt_base)]
+ self.assertEqual(self.executes, expected_commands)
diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py
index c2f0b5a11..827bfb398 100644
--- a/nova/tests/test_metadata.py
+++ b/nova/tests/test_metadata.py
@@ -20,6 +20,8 @@
import base64
import copy
+import hashlib
+import hmac
import json
import re
@@ -461,15 +463,19 @@ class MetadataHandlerTestCase(test.TestCase):
expected_instance_id = 'a-b-c-d'
def fake_get_metadata(instance_id, remote_address):
- if instance_id == expected_instance_id:
+ if remote_address is None:
+ raise Exception('Expected X-Forwared-For header')
+ elif instance_id == expected_instance_id:
return self.mdinst
else:
# raise the exception to aid with 500 response code test
raise Exception("Expected instance_id of %s, got %s" %
(expected_instance_id, instance_id))
- signed = ('d98d0dd53b026a24df2c06b464ffa5da'
- 'db922ae41af7bd3ecc3cae75aef65771')
+ signed = hmac.new(
+ CONF.quantum_metadata_proxy_shared_secret,
+ expected_instance_id,
+ hashlib.sha256).hexdigest()
# try a request with service disabled
response = fake_request(
@@ -481,39 +487,57 @@ class MetadataHandlerTestCase(test.TestCase):
self.assertEqual(response.status_int, 200)
# now enable the service
-
self.flags(service_quantum_metadata_proxy=True)
response = fake_request(
self.stubs, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
fake_get_metadata_by_instance_id=fake_get_metadata,
- headers={'X-Instance-ID': 'a-b-c-d',
+ headers={'X-Forwarded-For': '192.192.192.2',
+ 'X-Instance-ID': 'a-b-c-d',
'X-Instance-ID-Signature': signed})
self.assertEqual(response.status_int, 200)
self.assertEqual(response.body,
base64.b64decode(self.instance['user_data']))
+ # mismatched signature
response = fake_request(
self.stubs, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
fake_get_metadata_by_instance_id=fake_get_metadata,
- headers={'X-Instance-ID': 'a-b-c-d',
+ headers={'X-Forwarded-For': '192.192.192.2',
+ 'X-Instance-ID': 'a-b-c-d',
'X-Instance-ID-Signature': ''})
self.assertEqual(response.status_int, 403)
+ # without X-Forwarded-For
response = fake_request(
self.stubs, self.mdinst,
relpath="/2009-04-04/user-data",
address="192.192.192.2",
fake_get_metadata_by_instance_id=fake_get_metadata,
- headers={'X-Instance-ID': 'z-z-z-z',
- 'X-Instance-ID-Signature': '81f42e3fc77ba3a3e8d83142746e0'
- '8387b96cbc5bd2474665192d2ec28'
- '8ffb67'})
+ headers={'X-Instance-ID': 'a-b-c-d',
+ 'X-Instance-ID-Signature': signed})
+
+ self.assertEqual(response.status_int, 500)
+
+ # unexpected Instance-ID
+ signed = hmac.new(
+ CONF.quantum_metadata_proxy_shared_secret,
+ 'z-z-z-z',
+ hashlib.sha256).hexdigest()
+
+ response = fake_request(
+ self.stubs, self.mdinst,
+ relpath="/2009-04-04/user-data",
+ address="192.192.192.2",
+ fake_get_metadata_by_instance_id=fake_get_metadata,
+ headers={'X-Forwarded-For': '192.192.192.2',
+ 'X-Instance-ID': 'z-z-z-z',
+ 'X-Instance-ID-Signature': signed})
self.assertEqual(response.status_int, 500)
@@ -525,6 +549,7 @@ class MetadataPasswordTestCase(test.TestCase):
self.instance = copy.copy(INSTANCES[0])
self.mdinst = fake_InstanceMetadata(self.stubs, self.instance,
address=None, sgroups=None)
+ self.flags(use_local=True, group='conductor')
def test_get_password(self):
request = webob.Request.blank('')
@@ -542,8 +567,16 @@ class MetadataPasswordTestCase(test.TestCase):
request = webob.Request.blank('')
request.method = 'POST'
request.body = val
- self.stubs.Set(db, 'instance_system_metadata_update',
- lambda *a, **kw: None)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ lambda *a, **kw: {'system_metadata': []})
+
+ def fake_instance_update(context, uuid, updates):
+ self.assertIn('system_metadata', updates)
+ self.assertIn('password_0', updates['system_metadata'])
+ return self.instance, self.instance
+
+ self.stubs.Set(db, 'instance_update_and_get_original',
+ fake_instance_update)
password.handle_password(request, self.mdinst)
def test_set_password(self):
diff --git a/nova/tests/test_migration_utils.py b/nova/tests/test_migration_utils.py
new file mode 100644
index 000000000..45b6d86d4
--- /dev/null
+++ b/nova/tests/test_migration_utils.py
@@ -0,0 +1,126 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2013 Boris Pavlovic (boris@pavlovic.me).
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from migrate.changeset import UniqueConstraint
+from sqlalchemy import MetaData, Table, Column, Integer, BigInteger
+
+from nova.db.sqlalchemy import utils
+from nova import exception
+from nova.tests import test_migrations
+
+
+class TestMigrationUtils(test_migrations.BaseMigrationTestCase):
+ """Class for testing utils that are used in db migrations."""
+
+ def test_utils_drop_unique_constraint(self):
+ table_name = "__test_tmp_table__"
+ uc_name = 'uniq_foo'
+ values = [
+ {'id': 1, 'a': 3, 'foo': 10},
+ {'id': 2, 'a': 2, 'foo': 20},
+ {'id': 3, 'a': 1, 'foo': 30}
+ ]
+ for key, engine in self.engines.items():
+ meta = MetaData()
+ meta.bind = engine
+ test_table = Table(table_name, meta,
+ Column('id', Integer, primary_key=True,
+ nullable=False),
+ Column('a', Integer),
+ Column('foo', Integer),
+ UniqueConstraint('a', name='uniq_a'),
+ UniqueConstraint('foo', name=uc_name))
+ test_table.create()
+
+ engine.execute(test_table.insert(), values)
+ # NOTE(boris-42): This method is generic UC dropper.
+ utils.drop_unique_constraint(engine, table_name, uc_name, 'foo')
+
+ s = test_table.select().order_by(test_table.c.id)
+ rows = engine.execute(s).fetchall()
+
+ for i in xrange(0, len(values)):
+ v = values[i]
+ self.assertEqual((v['id'], v['a'], v['foo']), rows[i])
+
+ # NOTE(boris-42): Update data about Table from DB.
+ meta = MetaData()
+ meta.bind = engine
+ test_table = Table(table_name, meta, autoload=True)
+ constraints = filter(lambda c: c.name == uc_name,
+ test_table.constraints)
+ self.assertEqual(len(constraints), 0)
+ self.assertEqual(len(test_table.constraints), 1)
+
+ test_table.drop()
+
+ def test_util_drop_unique_constraint_with_not_supported_sqlite_type(self):
+ table_name = "__test_tmp_table__"
+ uc_name = 'uniq_foo'
+ values = [
+ {'id': 1, 'a': 3, 'foo': 10},
+ {'id': 2, 'a': 2, 'foo': 20},
+ {'id': 3, 'a': 1, 'foo': 30}
+ ]
+
+ for key, engine in self.engines.items():
+ meta = MetaData()
+ meta.bind = engine
+ test_table = Table(table_name, meta,
+ Column('id', Integer, primary_key=True,
+ nullable=False),
+ Column('a', Integer),
+ Column('foo', BigInteger, default=0),
+ UniqueConstraint('a', name='uniq_a'),
+ UniqueConstraint('foo', name=uc_name))
+ test_table.create()
+
+ engine.execute(test_table.insert(), values)
+ if key == "sqlite":
+ # NOTE(boris-42): Missing info about column `foo` that has
+ # unsupported type BigInteger.
+ self.assertRaises(exception.NovaException,
+ utils.drop_unique_constraint,
+ engine, table_name, uc_name, 'foo')
+
+ # NOTE(boris-42): Wrong type of foo instance. it should be
+ # instance of sqlalchemy.Column.
+ self.assertRaises(exception.NovaException,
+ utils.drop_unique_constraint,
+ engine, table_name, uc_name, 'foo',
+ foo=Integer())
+
+ foo = Column('foo', BigInteger, default=0)
+ utils.drop_unique_constraint(engine, table_name, uc_name, 'foo',
+ foo=foo)
+
+ s = test_table.select().order_by(test_table.c.id)
+ rows = engine.execute(s).fetchall()
+
+ for i in xrange(0, len(values)):
+ v = values[i]
+ self.assertEqual((v['id'], v['a'], v['foo']), rows[i])
+
+ # NOTE(boris-42): Update data about Table from DB.
+ meta = MetaData()
+ meta.bind = engine
+ test_table = Table(table_name, meta, autoload=True)
+ constraints = filter(lambda c: c.name == uc_name,
+ test_table.constraints)
+ self.assertEqual(len(constraints), 0)
+ self.assertEqual(len(test_table.constraints), 1)
+ test_table.drop()
diff --git a/nova/tests/test_migrations.py b/nova/tests/test_migrations.py
index f0ed0a863..a0c5db9c4 100644
--- a/nova/tests/test_migrations.py
+++ b/nova/tests/test_migrations.py
@@ -22,10 +22,28 @@ to use in the tests. For each connection found in the config file,
the test case runs a series of test cases to ensure that migrations work
properly both upgrading and downgrading, and that no data loss occurs
if possible.
+
+There are also "opportunistic" tests for both mysql and postgresql in here,
+which allows testing against all 3 databases (sqlite in memory, mysql, pg) in
+a properly configured unit test environment.
+
+For the opportunistic testing you need to set up a db named 'openstack_citest'
+with user 'openstack_citest' and password 'openstack_citest' on localhost.
+The test will then use that db and u/p combo to run the tests.
+
+For postgres on Ubuntu this can be done with the following commands:
+
+sudo -u postgres psql
+postgres=# create user openstack_citest with createdb login password
+ 'openstack_citest';
+postgres=# create database openstack_citest with owner openstack_citest;
+
"""
+import collections
import commands
import ConfigParser
+import datetime
import os
import urlparse
@@ -36,6 +54,7 @@ import nova.db.migration as migration
import nova.db.sqlalchemy.migrate_repo
from nova.db.sqlalchemy.migration import versioning_api as migration_api
from nova.openstack.common import log as logging
+from nova.openstack.common import timeutils
from nova import test
@@ -52,6 +71,8 @@ def _get_connect_string(backend,
"""
if backend == "postgres":
backend = "postgresql+psycopg2"
+ elif backend == "mysql":
+ backend = "mysql+mysqldb"
return ("%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s"
% locals())
@@ -87,8 +108,18 @@ def _have_mysql():
return present.lower() in ('', 'true')
-class TestMigrations(test.TestCase):
- """Test sqlalchemy-migrate migrations."""
+def get_table(engine, name):
+ """Returns an sqlalchemy table dynamically from db.
+
+ Needed because the models don't work for us in migrations
+ as models will be far out of sync with the current data."""
+ metadata = sqlalchemy.schema.MetaData()
+ metadata.bind = engine
+ return sqlalchemy.Table(name, metadata, autoload=True)
+
+
+class BaseMigrationTestCase(test.TestCase):
+ """Base class fort testing migrations and migration utils."""
DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
'test_migrations.conf')
@@ -101,18 +132,18 @@ class TestMigrations(test.TestCase):
os.path.abspath(os.path.dirname(MIGRATE_FILE)))
def setUp(self):
- super(TestMigrations, self).setUp()
+ super(BaseMigrationTestCase, self).setUp()
self.snake_walk = False
self.test_databases = {}
# Load test databases from the config file. Only do this
# once. No need to re-run this on each test...
- LOG.debug('config_path is %s' % TestMigrations.CONFIG_FILE_PATH)
- if os.path.exists(TestMigrations.CONFIG_FILE_PATH):
+ LOG.debug('config_path is %s' % BaseMigrationTestCase.CONFIG_FILE_PATH)
+ if os.path.exists(BaseMigrationTestCase.CONFIG_FILE_PATH):
cp = ConfigParser.RawConfigParser()
try:
- cp.read(TestMigrations.CONFIG_FILE_PATH)
+ cp.read(BaseMigrationTestCase.CONFIG_FILE_PATH)
defaults = cp.defaults()
for key, value in defaults.items():
self.test_databases[key] = value
@@ -136,7 +167,7 @@ class TestMigrations(test.TestCase):
# and recreate it, which ensures that we have no side-effects
# from the tests
self._reset_databases()
- super(TestMigrations, self).tearDown()
+ super(BaseMigrationTestCase, self).tearDown()
def _reset_databases(self):
def execute_cmd(cmd=None):
@@ -183,11 +214,12 @@ class TestMigrations(test.TestCase):
password = ""
if len(auth_pieces) > 1:
password = auth_pieces[1].strip()
- # note(boris-42): This file is used for authentication
- # without password prompt.
- createpgpass = ("echo '*:*:*:%(user)s:%(password)s' > "
- "~/.pgpass && chmod 0600 ~/.pgpass" % locals())
- execute_cmd(createpgpass)
+ # note(krtaylor): File creation problems with tests in
+ # venv using .pgpass authentication, changed to
+ # PGPASSWORD environment variable which is no longer
+ # planned to be deprecated
+ os.environ['PGPASSWORD'] = password
+ os.environ['PGUSER'] = user
# note(boris-42): We must create and drop database, we can't
# drop database which we have connected to, so for such
# operations there is a special database template1.
@@ -199,6 +231,12 @@ class TestMigrations(test.TestCase):
sql = ("create database %(database)s;") % locals()
createtable = sqlcmd % locals()
execute_cmd(createtable)
+ os.unsetenv('PGPASSWORD')
+ os.unsetenv('PGUSER')
+
+
+class TestMigrations(BaseMigrationTestCase):
+ """Test sqlalchemy-migrate migrations."""
def test_walk_versions(self):
"""
@@ -227,19 +265,11 @@ class TestMigrations(test.TestCase):
self.engines["mysqlcitest"] = engine
self.test_databases["mysqlcitest"] = connect_string
- # Test that we end in an innodb
- self._check_mysql_innodb(engine)
- # Test IP transition
- self._check_mysql_migration_149(engine)
-
- def _check_mysql_innodb(self, engine):
# build a fully populated mysql database with all the tables
self._reset_databases()
self._walk_versions(engine, False, False)
- uri = _get_connect_string("mysql", database="information_schema")
- connection = sqlalchemy.create_engine(uri).connect()
-
+ connection = engine.connect()
# sanity check
total = connection.execute("SELECT count(*) "
"from information_schema.TABLES "
@@ -253,91 +283,30 @@ class TestMigrations(test.TestCase):
"and TABLE_NAME!='migrate_version'")
count = noninnodb.scalar()
self.assertEqual(count, 0, "%d non InnoDB tables created" % count)
+ connection.close()
- def test_migration_149_postgres(self):
- """Test updating a table with IPAddress columns."""
- if not _is_backend_avail('postgres'):
- self.skipTest("postgres not available")
+ def test_postgresql_connect_fail(self):
+ """
+ Test that we can trigger a postgres connection failure and we fail
+ gracefully to ensure we don't break people without postgres
+ """
+ if _is_backend_avail('postgresql', user="openstack_cifail"):
+ self.fail("Shouldn't have connected")
+ def test_postgresql_opportunistically(self):
+ # Test postgresql database migration walk
+ if not _is_backend_avail('postgres'):
+ self.skipTest("postgresql not available")
+ # add this to the global lists to make reset work with it, it's removed
+ # automatically in tearDown so no need to clean it up here.
connect_string = _get_connect_string("postgres")
engine = sqlalchemy.create_engine(connect_string)
+ self.engines["postgresqlcitest"] = engine
+ self.test_databases["postgresqlcitest"] = connect_string
- self.engines["postgrescitest"] = engine
- self.test_databases["postgrescitest"] = connect_string
-
- self._reset_databases()
- migration_api.version_control(engine, TestMigrations.REPOSITORY,
- migration.INIT_VERSION)
-
- connection = engine.connect()
-
- self._migrate_up(engine, 148)
- IPS = ("127.0.0.1", "255.255.255.255", "2001:db8::1:2", "::1")
- connection.execute("INSERT INTO provider_fw_rules "
- " (protocol, from_port, to_port, cidr)"
- "VALUES ('tcp', 1234, 1234, '%s'), "
- " ('tcp', 1234, 1234, '%s'), "
- " ('tcp', 1234, 1234, '%s'), "
- " ('tcp', 1234, 1234, '%s')" % IPS)
- self.assertEqual('character varying',
- connection.execute(
- "SELECT data_type FROM INFORMATION_SCHEMA.COLUMNS "
- "WHERE table_name='provider_fw_rules' "
- "AND table_catalog='openstack_citest' "
- "AND column_name='cidr'").scalar())
-
- self._migrate_up(engine, 149)
- self.assertEqual(IPS,
- tuple(tup[0] for tup in connection.execute(
- "SELECT cidr from provider_fw_rules").fetchall()))
- self.assertEqual('inet',
- connection.execute(
- "SELECT data_type FROM INFORMATION_SCHEMA.COLUMNS "
- "WHERE table_name='provider_fw_rules' "
- "AND table_catalog='openstack_citest' "
- "AND column_name='cidr'").scalar())
- connection.close()
-
- def _check_mysql_migration_149(self, engine):
- """Test updating a table with IPAddress columns."""
+ # build a fully populated postgresql database with all the tables
self._reset_databases()
- migration_api.version_control(engine, TestMigrations.REPOSITORY,
- migration.INIT_VERSION)
-
- uri = _get_connect_string("mysql", database="openstack_citest")
- connection = sqlalchemy.create_engine(uri).connect()
-
- self._migrate_up(engine, 148)
-
- IPS = ("127.0.0.1", "255.255.255.255", "2001:db8::1:2", "::1")
- connection.execute("INSERT INTO provider_fw_rules "
- " (protocol, from_port, to_port, cidr)"
- "VALUES ('tcp', 1234, 1234, '%s'), "
- " ('tcp', 1234, 1234, '%s'), "
- " ('tcp', 1234, 1234, '%s'), "
- " ('tcp', 1234, 1234, '%s')" % IPS)
- self.assertEqual('varchar(255)',
- connection.execute(
- "SELECT column_type FROM INFORMATION_SCHEMA.COLUMNS "
- "WHERE table_name='provider_fw_rules' "
- "AND table_schema='openstack_citest' "
- "AND column_name='cidr'").scalar())
-
- connection.close()
-
- self._migrate_up(engine, 149)
-
- connection = sqlalchemy.create_engine(uri).connect()
-
- self.assertEqual(IPS,
- tuple(tup[0] for tup in connection.execute(
- "SELECT cidr from provider_fw_rules").fetchall()))
- self.assertEqual('varchar(39)',
- connection.execute(
- "SELECT column_type FROM INFORMATION_SCHEMA.COLUMNS "
- "WHERE table_name='provider_fw_rules' "
- "AND table_schema='openstack_citest' "
- "AND column_name='cidr'").scalar())
+ self._walk_versions(engine, False, False)
def _walk_versions(self, engine=None, snake_walk=False, downgrade=True):
# Determine latest version script from the repo, then
@@ -360,7 +329,7 @@ class TestMigrations(test.TestCase):
for version in xrange(migration.INIT_VERSION + 2,
TestMigrations.REPOSITORY.latest + 1):
# upgrade -> downgrade -> upgrade
- self._migrate_up(engine, version)
+ self._migrate_up(engine, version, with_data=True)
if snake_walk:
self._migrate_down(engine, version)
self._migrate_up(engine, version)
@@ -385,176 +354,280 @@ class TestMigrations(test.TestCase):
migration_api.db_version(engine,
TestMigrations.REPOSITORY))
- def _migrate_up(self, engine, version):
- migration_api.upgrade(engine,
- TestMigrations.REPOSITORY,
- version)
- self.assertEqual(version,
- migration_api.db_version(engine,
- TestMigrations.REPOSITORY))
+ def _migrate_up(self, engine, version, with_data=False):
+ """migrate up to a new version of the db.
- def test_migration_146(self):
- name = 'name'
- az = 'custom_az'
-
- def _145_check():
- agg = aggregates.select(aggregates.c.id == 1).execute().first()
- self.assertEqual(name, agg.name)
- self.assertEqual(az, agg.availability_zone)
-
- for key, engine in self.engines.items():
- migration_api.version_control(engine, TestMigrations.REPOSITORY,
- migration.INIT_VERSION)
- migration_api.upgrade(engine, TestMigrations.REPOSITORY, 145)
- metadata = sqlalchemy.schema.MetaData()
- metadata.bind = engine
- aggregates = sqlalchemy.Table('aggregates', metadata,
- autoload=True)
-
- aggregates.insert().values(id=1, availability_zone=az,
- aggregate_name=1, name=name).execute()
-
- _145_check()
-
- migration_api.upgrade(engine, TestMigrations.REPOSITORY, 146)
-
- aggregate_metadata = sqlalchemy.Table('aggregate_metadata',
- metadata, autoload=True)
- metadata = aggregate_metadata.select(aggregate_metadata.c.
- aggregate_id == 1).execute().first()
- self.assertEqual(az, metadata['value'])
-
- migration_api.downgrade(engine, TestMigrations.REPOSITORY, 145)
- _145_check()
-
- def test_migration_147(self):
+ We allow for data insertion and post checks at every
+ migration version with special _prerun_### and
+ _check_### functions in the main test.
+ """
+ # NOTE(sdague): try block is here because it's impossible to debug
+ # where a failed data migration happens otherwise
+ try:
+ if with_data:
+ data = None
+ prerun = getattr(self, "_prerun_%d" % version, None)
+ if prerun:
+ data = prerun(engine)
+
+ migration_api.upgrade(engine,
+ TestMigrations.REPOSITORY,
+ version)
+ self.assertEqual(
+ version,
+ migration_api.db_version(engine,
+ TestMigrations.REPOSITORY))
+
+ if with_data:
+ check = getattr(self, "_check_%d" % version, None)
+ if check:
+ check(engine, data)
+ except Exception:
+ LOG.error("Failed to migrate to version %s on engine %s" %
+ (version, engine))
+ raise
+
+ def _prerun_134(self, engine):
+ now = timeutils.utcnow()
+ data = [{
+ 'id': 1,
+ 'uuid': '1d739808-d7ec-4944-b252-f8363e119755',
+ 'mac': '00:00:00:00:00:01',
+ 'start_period': now,
+ 'last_refreshed': now + datetime.timedelta(seconds=10),
+ 'bw_in': 100000,
+ 'bw_out': 200000,
+ }, {
+ 'id': 2,
+ 'uuid': '1d739808-d7ec-4944-b252-f8363e119756',
+ 'mac': '2a:f2:48:31:c1:60',
+ 'start_period': now,
+ 'last_refreshed': now + datetime.timedelta(seconds=20),
+ 'bw_in': 1000000000,
+ 'bw_out': 200000000,
+ }, {
+ 'id': 3,
+ # This is intended to be the same as above.
+ 'uuid': '1d739808-d7ec-4944-b252-f8363e119756',
+ 'mac': '00:00:00:00:00:02',
+ 'start_period': now,
+ 'last_refreshed': now + datetime.timedelta(seconds=30),
+ 'bw_in': 0,
+ 'bw_out': 0,
+ }]
+
+ bw_usage_cache = get_table(engine, 'bw_usage_cache')
+ engine.execute(bw_usage_cache.insert(), data)
+ return data
+
+ def _check_134(self, engine, data):
+ bw_usage_cache = get_table(engine, 'bw_usage_cache')
+
+ # Checks if both columns have been successfuly created.
+ self.assertIn('last_ctr_in', bw_usage_cache.c)
+ self.assertIn('last_ctr_out', bw_usage_cache.c)
+
+ # Checks if all rows have been inserted.
+ bw_items = bw_usage_cache.select().execute().fetchall()
+ self.assertEqual(len(bw_items), 3)
+
+ bw = bw_usage_cache.select(
+ bw_usage_cache.c.id == 1).execute().first()
+
+ # New columns have 'NULL' as default value.
+ self.assertEqual(bw['last_ctr_in'], None)
+ self.assertEqual(bw['last_ctr_out'], None)
+
+ self.assertEqual(data[0]['mac'], bw['mac'])
+
+ # migration 146, availability zone transition
+ def _prerun_146(self, engine):
+ data = {
+ 'availability_zone': 'custom_az',
+ 'name': 'name',
+ }
+
+ aggregates = get_table(engine, 'aggregates')
+ result = aggregates.insert().values(data).execute()
+ # NOTE(sdague) it's important you don't insert keys by value in
+ # postgresql, because its autoincrement counter won't get updated
+ data['id'] = result.inserted_primary_key[0]
+ return data
+
+ def _check_146(self, engine, data):
+ aggregate_md = get_table(engine, 'aggregate_metadata')
+ md = aggregate_md.select(
+ aggregate_md.c.aggregate_id == data['id']).execute().first()
+ self.assertEqual(data['availability_zone'], md['value'])
+
+ # migration 147, availability zone transition for services
+ def _prerun_147(self, engine):
az = 'test_zone'
host1 = 'compute-host1'
host2 = 'compute-host2'
-
- def _146_check():
- service = services.select(services.c.id == 1).execute().first()
- self.assertEqual(az, service.availability_zone)
- self.assertEqual(host1, service.host)
- service = services.select(services.c.id == 2).execute().first()
- self.assertNotEqual(az, service.availability_zone)
- service = services.select(services.c.id == 3).execute().first()
- self.assertEqual(az, service.availability_zone)
- self.assertEqual(host2, service.host)
-
- for key, engine in self.engines.items():
- migration_api.version_control(engine, TestMigrations.REPOSITORY,
- migration.INIT_VERSION)
- migration_api.upgrade(engine, TestMigrations.REPOSITORY, 146)
- metadata = sqlalchemy.schema.MetaData()
- metadata.bind = engine
-
- #populate service table
- services = sqlalchemy.Table('services', metadata,
- autoload=True)
- services.insert().values(id=1, host=host1,
- binary='nova-compute', topic='compute', report_count=0,
- availability_zone=az).execute()
- services.insert().values(id=2, host='sched-host',
- binary='nova-scheduler', topic='scheduler', report_count=0,
- availability_zone='ignore_me').execute()
- services.insert().values(id=3, host=host2,
- binary='nova-compute', topic='compute', report_count=0,
- availability_zone=az).execute()
-
- _146_check()
-
- migration_api.upgrade(engine, TestMigrations.REPOSITORY, 147)
-
- # check aggregate metadata
- aggregate_metadata = sqlalchemy.Table('aggregate_metadata',
- metadata, autoload=True)
- aggregate_hosts = sqlalchemy.Table('aggregate_hosts',
- metadata, autoload=True)
- metadata = aggregate_metadata.select(aggregate_metadata.c.
- aggregate_id == 1).execute().first()
- self.assertEqual(az, metadata['value'])
- self.assertEqual(aggregate_hosts.select(
- aggregate_hosts.c.aggregate_id == 1).execute().
- first().host, host1)
- blank = [h for h in aggregate_hosts.select(
- aggregate_hosts.c.aggregate_id == 2).execute()]
- self.assertEqual(blank, [])
-
- migration_api.downgrade(engine, TestMigrations.REPOSITORY, 146)
-
- _146_check()
-
- def test_migration_152(self):
+ # start at id == 2 because we already inserted one
+ data = [
+ {'id': 1, 'host': host1,
+ 'binary': 'nova-compute', 'topic': 'compute',
+ 'report_count': 0, 'availability_zone': az},
+ {'id': 2, 'host': 'sched-host',
+ 'binary': 'nova-scheduler', 'topic': 'scheduler',
+ 'report_count': 0, 'availability_zone': 'ignore_me'},
+ {'id': 3, 'host': host2,
+ 'binary': 'nova-compute', 'topic': 'compute',
+ 'report_count': 0, 'availability_zone': az},
+ ]
+
+ services = get_table(engine, 'services')
+ engine.execute(services.insert(), data)
+ return data
+
+ def _check_147(self, engine, data):
+ aggregate_md = get_table(engine, 'aggregate_metadata')
+ aggregate_hosts = get_table(engine, 'aggregate_hosts')
+ # NOTE(sdague): hard coded to id == 2, because we added to
+ # aggregate_metadata previously
+ for item in data:
+ md = aggregate_md.select(
+ aggregate_md.c.aggregate_id == 2).execute().first()
+ if item['binary'] == "nova-compute":
+ self.assertEqual(item['availability_zone'], md['value'])
+
+ host = aggregate_hosts.select(
+ aggregate_hosts.c.aggregate_id == 2
+ ).execute().first()
+ self.assertEqual(host['host'], data[0]['host'])
+
+ # NOTE(sdague): id 3 is just non-existent
+ host = aggregate_hosts.select(
+ aggregate_hosts.c.aggregate_id == 3
+ ).execute().first()
+ self.assertEqual(host, None)
+
+ # migration 149, changes IPAddr storage format
+ def _prerun_149(self, engine):
+ provider_fw_rules = get_table(engine, 'provider_fw_rules')
+ data = [
+ {'protocol': 'tcp', 'from_port': 1234,
+ 'to_port': 1234, 'cidr': "127.0.0.1"},
+ {'protocol': 'tcp', 'from_port': 1234,
+ 'to_port': 1234, 'cidr': "255.255.255.255"},
+ {'protocol': 'tcp', 'from_port': 1234,
+ 'to_port': 1234, 'cidr': "2001:db8::1:2"},
+ {'protocol': 'tcp', 'from_port': 1234,
+ 'to_port': 1234, 'cidr': "::1"}
+ ]
+ engine.execute(provider_fw_rules.insert(), data)
+ return data
+
+ def _check_149(self, engine, data):
+ provider_fw_rules = get_table(engine, 'provider_fw_rules')
+ result = provider_fw_rules.select().execute()
+
+ iplist = map(lambda x: x['cidr'], data)
+
+ for row in result:
+ self.assertIn(row['cidr'], iplist)
+
+ # migration 152 - convert deleted from boolean to int
+ def _prerun_152(self, engine):
host1 = 'compute-host1'
host2 = 'compute-host2'
-
- def _151_check(services, volumes):
- service = services.select(services.c.id == 1).execute().first()
- self.assertEqual(False, service.deleted)
- service = services.select(services.c.id == 2).execute().first()
- self.assertEqual(True, service.deleted)
-
- volume = volumes.select(volumes.c.id == "first").execute().first()
- self.assertEqual(False, volume.deleted)
- volume = volumes.select(volumes.c.id == "second").execute().first()
- self.assertEqual(True, volume.deleted)
-
- for key, engine in self.engines.items():
- migration_api.version_control(engine, TestMigrations.REPOSITORY,
- migration.INIT_VERSION)
- migration_api.upgrade(engine, TestMigrations.REPOSITORY, 151)
- metadata = sqlalchemy.schema.MetaData()
- metadata.bind = engine
-
- # NOTE(boris-42): It is enough to test one table with type of `id`
- # column Integer and one with type String.
- services = sqlalchemy.Table('services', metadata, autoload=True)
- volumes = sqlalchemy.Table('volumes', metadata, autoload=True)
-
- engine.execute(
- services.insert(),
- [
- {'id': 1, 'host': host1, 'binary': 'nova-compute',
- 'report_count': 0, 'topic': 'compute', 'deleted': False},
- {'id': 2, 'host': host1, 'binary': 'nova-compute',
- 'report_count': 0, 'topic': 'compute', 'deleted': True}
- ]
- )
-
- engine.execute(
- volumes.insert(),
- [
- {'id': 'first', 'host': host1, 'deleted': False},
- {'id': 'second', 'host': host2, 'deleted': True}
- ]
- )
-
- _151_check(services, volumes)
-
- migration_api.upgrade(engine, TestMigrations.REPOSITORY, 152)
- # NOTE(boris-42): One more time get from DB info about tables.
- metadata2 = sqlalchemy.schema.MetaData()
- metadata2.bind = engine
-
- services = sqlalchemy.Table('services', metadata2, autoload=True)
-
- service = services.select(services.c.id == 1).execute().first()
- self.assertEqual(0, service.deleted)
- service = services.select(services.c.id == 2).execute().first()
- self.assertEqual(service.id, service.deleted)
-
- volumes = sqlalchemy.Table('volumes', metadata2, autoload=True)
- volume = volumes.select(volumes.c.id == "first").execute().first()
- self.assertEqual("", volume.deleted)
- volume = volumes.select(volumes.c.id == "second").execute().first()
- self.assertEqual(volume.id, volume.deleted)
-
- migration_api.downgrade(engine, TestMigrations.REPOSITORY, 151)
- # NOTE(boris-42): One more time get from DB info about tables.
- metadata = sqlalchemy.schema.MetaData()
- metadata.bind = engine
- services = sqlalchemy.Table('services', metadata, autoload=True)
- volumes = sqlalchemy.Table('volumes', metadata, autoload=True)
-
- _151_check(services, volumes)
+ # NOTE(sdague): start at #4 because services data already in table
+ # from 147
+ services_data = [
+ {'id': 4, 'host': host1, 'binary': 'nova-compute',
+ 'report_count': 0, 'topic': 'compute', 'deleted': False},
+ {'id': 5, 'host': host1, 'binary': 'nova-compute',
+ 'report_count': 0, 'topic': 'compute', 'deleted': True}
+ ]
+ volumes_data = [
+ {'id': 'first', 'host': host1, 'deleted': False},
+ {'id': 'second', 'host': host2, 'deleted': True}
+ ]
+
+ services = get_table(engine, 'services')
+ engine.execute(services.insert(), services_data)
+
+ volumes = get_table(engine, 'volumes')
+ engine.execute(volumes.insert(), volumes_data)
+ return dict(services=services_data, volumes=volumes_data)
+
+ def _check_152(self, engine, data):
+ services = get_table(engine, 'services')
+ service = services.select(services.c.id == 4).execute().first()
+ self.assertEqual(0, service.deleted)
+ service = services.select(services.c.id == 5).execute().first()
+ self.assertEqual(service.id, service.deleted)
+
+ volumes = get_table(engine, 'volumes')
+ volume = volumes.select(volumes.c.id == "first").execute().first()
+ self.assertEqual("", volume.deleted)
+ volume = volumes.select(volumes.c.id == "second").execute().first()
+ self.assertEqual(volume.id, volume.deleted)
+
+ # migration 153, copy flavor information into system_metadata
+ def _prerun_153(self, engine):
+ fake_types = [
+ dict(id=10, name='type1', memory_mb=128, vcpus=1,
+ root_gb=10, ephemeral_gb=0, flavorid="1", swap=0,
+ rxtx_factor=1.0, vcpu_weight=1, disabled=False,
+ is_public=True),
+ dict(id=11, name='type2', memory_mb=512, vcpus=1,
+ root_gb=10, ephemeral_gb=5, flavorid="2", swap=0,
+ rxtx_factor=1.5, vcpu_weight=2, disabled=False,
+ is_public=True),
+ dict(id=12, name='type3', memory_mb=128, vcpus=1,
+ root_gb=10, ephemeral_gb=0, flavorid="3", swap=0,
+ rxtx_factor=1.0, vcpu_weight=1, disabled=False,
+ is_public=False),
+ dict(id=13, name='type4', memory_mb=128, vcpus=1,
+ root_gb=10, ephemeral_gb=0, flavorid="4", swap=0,
+ rxtx_factor=1.0, vcpu_weight=1, disabled=True,
+ is_public=True),
+ dict(id=14, name='type5', memory_mb=128, vcpus=1,
+ root_gb=10, ephemeral_gb=0, flavorid="5", swap=0,
+ rxtx_factor=1.0, vcpu_weight=1, disabled=True,
+ is_public=False),
+ ]
+
+ fake_instances = [
+ dict(uuid='m153-uuid1', instance_type_id=10),
+ dict(uuid='m153-uuid2', instance_type_id=11),
+ dict(uuid='m153-uuid3', instance_type_id=12),
+ dict(uuid='m153-uuid4', instance_type_id=13),
+ # NOTE(danms): no use of type5
+ ]
+
+ instances = get_table(engine, 'instances')
+ instance_types = get_table(engine, 'instance_types')
+ engine.execute(instance_types.insert(), fake_types)
+ engine.execute(instances.insert(), fake_instances)
+
+ return fake_types, fake_instances
+
+ def _check_153(self, engine, data):
+ fake_types, fake_instances = data
+ # NOTE(danms): Fetch all the tables and data from scratch after change
+ instances = get_table(engine, 'instances')
+ instance_types = get_table(engine, 'instance_types')
+ sys_meta = get_table(engine, 'instance_system_metadata')
+
+ # Collect all system metadata, indexed by instance_uuid
+ metadata = collections.defaultdict(dict)
+ for values in sys_meta.select().execute():
+ metadata[values['instance_uuid']][values['key']] = values['value']
+
+ # Taken from nova/compute/api.py
+ instance_type_props = ['id', 'name', 'memory_mb', 'vcpus',
+ 'root_gb', 'ephemeral_gb', 'flavorid',
+ 'swap', 'rxtx_factor', 'vcpu_weight']
+
+ for instance in fake_instances:
+ inst_sys_meta = metadata[instance['uuid']]
+ inst_type = fake_types[instance['instance_type_id'] - 10]
+ for prop in instance_type_props:
+ prop_name = 'instance_type_%s' % prop
+ self.assertIn(prop_name, inst_sys_meta)
+ self.assertEqual(str(inst_sys_meta[prop_name]),
+ str(inst_type[prop]))
diff --git a/nova/tests/test_notifications.py b/nova/tests/test_notifications.py
index a300028a0..aec6c8f67 100644
--- a/nova/tests/test_notifications.py
+++ b/nova/tests/test_notifications.py
@@ -187,8 +187,6 @@ class NotificationsTestCase(test.TestCase):
params = {"task_state": task_states.SPAWNING}
(old_ref, new_ref) = db.instance_update_and_get_original(self.context,
self.instance['uuid'], params)
- print old_ref["task_state"]
- print new_ref["task_state"]
notifications.send_update(self.context, old_ref, new_ref)
self.assertEquals(1, len(test_notifier.NOTIFICATIONS))
diff --git a/nova/tests/test_periodic_tasks.py b/nova/tests/test_periodic_tasks.py
index 3c63f6d4a..621e86b3a 100644
--- a/nova/tests/test_periodic_tasks.py
+++ b/nova/tests/test_periodic_tasks.py
@@ -17,7 +17,6 @@
import time
-import fixtures
from testtools import matchers
from nova import manager
diff --git a/nova/tests/test_powervm.py b/nova/tests/test_powervm.py
index 68795e22f..ad0e4539c 100644
--- a/nova/tests/test_powervm.py
+++ b/nova/tests/test_powervm.py
@@ -23,7 +23,9 @@ from nova import db
from nova import test
from nova.compute import power_state
+from nova.network import model as network_model
from nova.openstack.common import log as logging
+from nova.tests import fake_network_cache_model
from nova.virt import images
from nova.virt.powervm import blockdev as powervm_blockdev
from nova.virt.powervm import common
@@ -156,8 +158,11 @@ class PowerVMDriverTestCase(test.TestCase):
self.stubs.Set(images, 'fetch_to_raw', fake_image_fetch_to_raw)
image_meta = {}
image_meta['id'] = '666'
+ fake_net_info = network_model.NetworkInfo([
+ fake_network_cache_model.new_vif()])
self.powervm_connection.spawn(context.get_admin_context(),
- self.instance, image_meta, 's3cr3t', [])
+ self.instance, image_meta, [], 's3cr3t',
+ fake_net_info)
state = self.powervm_connection.get_info(self.instance)['state']
self.assertEqual(state, power_state.RUNNING)
@@ -176,12 +181,13 @@ class PowerVMDriverTestCase(test.TestCase):
self.stubs.Set(
self.powervm_connection._powervm, '_cleanup',
lambda *x, **y: raise_(Exception('This should be logged.')))
-
+ fake_net_info = network_model.NetworkInfo([
+ fake_network_cache_model.new_vif()])
self.assertRaises(exception.PowerVMImageCreationFailed,
self.powervm_connection.spawn,
context.get_admin_context(),
self.instance,
- {'id': 'ANY_ID'}, 's3cr3t', [])
+ {'id': 'ANY_ID'}, [], 's3cr3t', fake_net_info)
def test_destroy(self):
self.powervm_connection.destroy(self.instance, None)
diff --git a/nova/tests/test_sqlalchemy.py b/nova/tests/test_sqlalchemy.py
deleted file mode 100644
index f79d607f8..000000000
--- a/nova/tests/test_sqlalchemy.py
+++ /dev/null
@@ -1,66 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright (c) 2012 Rackspace Hosting
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Unit tests for SQLAlchemy specific code."""
-
-from eventlet import db_pool
-try:
- import MySQLdb
-except ImportError:
- MySQLdb = None
-
-from nova import context
-from nova.db.sqlalchemy import session
-from nova import test
-
-
-class DbPoolTestCase(test.TestCase):
- def setUp(self):
- super(DbPoolTestCase, self).setUp()
- self.flags(sql_dbpool_enable=True)
- self.user_id = 'fake'
- self.project_id = 'fake'
- self.context = context.RequestContext(self.user_id, self.project_id)
- if not MySQLdb:
- self.skipTest("Unable to test due to lack of MySQLdb")
-
- def test_db_pool_option(self):
- self.flags(sql_idle_timeout=11, sql_min_pool_size=21,
- sql_max_pool_size=42)
-
- info = {}
-
- class FakeConnectionPool(db_pool.ConnectionPool):
- def __init__(self, mod_name, **kwargs):
- info['module'] = mod_name
- info['kwargs'] = kwargs
- super(FakeConnectionPool, self).__init__(mod_name,
- **kwargs)
-
- def connect(self, *args, **kwargs):
- raise test.TestingException()
-
- self.stubs.Set(db_pool, 'ConnectionPool',
- FakeConnectionPool)
-
- sql_connection = 'mysql://user:pass@127.0.0.1/nova'
- self.assertRaises(test.TestingException, session.create_engine,
- sql_connection)
-
- self.assertEqual(info['module'], MySQLdb)
- self.assertEqual(info['kwargs']['max_idle'], 11)
- self.assertEqual(info['kwargs']['min_size'], 21)
- self.assertEqual(info['kwargs']['max_size'], 42)
diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py
index 84d56cadf..aaa826a70 100644
--- a/nova/tests/test_utils.py
+++ b/nova/tests/test_utils.py
@@ -16,7 +16,9 @@
import __builtin__
import datetime
+import functools
import hashlib
+import importlib
import os
import os.path
import StringIO
@@ -26,10 +28,13 @@ import mox
import nova
from nova import exception
+from nova.openstack.common import cfg
from nova.openstack.common import timeutils
from nova import test
from nova import utils
+CONF = cfg.CONF
+
class ByteConversionTest(test.TestCase):
def test_string_conversions(self):
@@ -432,11 +437,6 @@ class GenericUtilsTestCase(test.TestCase):
self.assertRaises(exception.FileNotFound,
utils.read_file_as_root, 'bad')
- def test_strcmp_const_time(self):
- self.assertTrue(utils.strcmp_const_time('abc123', 'abc123'))
- self.assertFalse(utils.strcmp_const_time('a', 'aaaaa'))
- self.assertFalse(utils.strcmp_const_time('ABC123', 'abc123'))
-
def test_temporary_chown(self):
def fake_execute(*args, **kwargs):
if args[0] == 'chown':
@@ -463,6 +463,24 @@ class GenericUtilsTestCase(test.TestCase):
h2 = hashlib.sha1(data).hexdigest()
self.assertEquals(h1, h2)
+ def test_is_valid_boolstr(self):
+ self.assertTrue(utils.is_valid_boolstr('true'))
+ self.assertTrue(utils.is_valid_boolstr('false'))
+ self.assertTrue(utils.is_valid_boolstr('yes'))
+ self.assertTrue(utils.is_valid_boolstr('no'))
+ self.assertTrue(utils.is_valid_boolstr('y'))
+ self.assertTrue(utils.is_valid_boolstr('n'))
+ self.assertTrue(utils.is_valid_boolstr('1'))
+ self.assertTrue(utils.is_valid_boolstr('0'))
+
+ self.assertFalse(utils.is_valid_boolstr('maybe'))
+ self.assertFalse(utils.is_valid_boolstr('only on tuesdays'))
+
+ def test_is_valid_ipv4(self):
+ self.assertTrue(utils.is_valid_ipv4('127.0.0.1'))
+ self.assertFalse(utils.is_valid_ipv4('::1'))
+ self.assertFalse(utils.is_valid_ipv4('bacon'))
+
class MonkeyPatchTestCase(test.TestCase):
"""Unit test for utils.monkey_patch()."""
@@ -509,6 +527,29 @@ class MonkeyPatchTestCase(test.TestCase):
in nova.tests.monkey_patch_example.CALLED_FUNCTION)
+class MonkeyPatchDefaultTestCase(test.TestCase):
+ """Unit test for default monkey_patch_modules value."""
+
+ def setUp(self):
+ super(MonkeyPatchDefaultTestCase, self).setUp()
+ self.flags(
+ monkey_patch=True)
+
+ def test_monkey_patch_default_mod(self):
+ # monkey_patch_modules is defined to be
+ # <module_to_patch>:<decorator_to_patch_with>
+ # Here we check that both parts of the default values are
+ # valid
+ for module in CONF.monkey_patch_modules:
+ m = module.split(':', 1)
+ # Check we can import the module to be patched
+ importlib.import_module(m[0])
+ # check the decorator is valid
+ decorator_name = m[1].rsplit('.', 1)
+ decorator_module = importlib.import_module(decorator_name[0])
+ getattr(decorator_module, decorator_name[1])
+
+
class AuditPeriodTest(test.TestCase):
def setUp(self):
@@ -789,3 +830,137 @@ class MetadataToDictTestCase(test.TestCase):
def test_metadata_to_dict_empty(self):
self.assertEqual(utils.metadata_to_dict([]), {})
+
+
+class WrappedCodeTestCase(test.TestCase):
+ """Test the get_wrapped_function utility method."""
+
+ def _wrapper(self, function):
+ @functools.wraps(function)
+ def decorated_function(self, *args, **kwargs):
+ function(self, *args, **kwargs)
+ return decorated_function
+
+ def test_single_wrapped(self):
+ @self._wrapper
+ def wrapped(self, instance, red=None, blue=None):
+ pass
+
+ func = utils.get_wrapped_function(wrapped)
+ func_code = func.func_code
+ self.assertEqual(4, len(func_code.co_varnames))
+ self.assertTrue('self' in func_code.co_varnames)
+ self.assertTrue('instance' in func_code.co_varnames)
+ self.assertTrue('red' in func_code.co_varnames)
+ self.assertTrue('blue' in func_code.co_varnames)
+
+ def test_double_wrapped(self):
+ @self._wrapper
+ @self._wrapper
+ def wrapped(self, instance, red=None, blue=None):
+ pass
+
+ func = utils.get_wrapped_function(wrapped)
+ func_code = func.func_code
+ self.assertEqual(4, len(func_code.co_varnames))
+ self.assertTrue('self' in func_code.co_varnames)
+ self.assertTrue('instance' in func_code.co_varnames)
+ self.assertTrue('red' in func_code.co_varnames)
+ self.assertTrue('blue' in func_code.co_varnames)
+
+ def test_triple_wrapped(self):
+ @self._wrapper
+ @self._wrapper
+ @self._wrapper
+ def wrapped(self, instance, red=None, blue=None):
+ pass
+
+ func = utils.get_wrapped_function(wrapped)
+ func_code = func.func_code
+ self.assertEqual(4, len(func_code.co_varnames))
+ self.assertTrue('self' in func_code.co_varnames)
+ self.assertTrue('instance' in func_code.co_varnames)
+ self.assertTrue('red' in func_code.co_varnames)
+ self.assertTrue('blue' in func_code.co_varnames)
+
+
+class GetCallArgsTestCase(test.TestCase):
+ def _test_func(self, instance, red=None, blue=None):
+ pass
+
+ def test_all_kwargs(self):
+ args = ()
+ kwargs = {'instance': {'uuid': 1}, 'red': 3, 'blue': 4}
+ callargs = utils.getcallargs(self._test_func, *args, **kwargs)
+ #implicit self counts as an arg
+ self.assertEqual(4, len(callargs))
+ self.assertTrue('instance' in callargs)
+ self.assertEqual({'uuid': 1}, callargs['instance'])
+ self.assertTrue('red' in callargs)
+ self.assertEqual(3, callargs['red'])
+ self.assertTrue('blue' in callargs)
+ self.assertEqual(4, callargs['blue'])
+
+ def test_all_args(self):
+ args = ({'uuid': 1}, 3, 4)
+ kwargs = {}
+ callargs = utils.getcallargs(self._test_func, *args, **kwargs)
+ #implicit self counts as an arg
+ self.assertEqual(4, len(callargs))
+ self.assertTrue('instance' in callargs)
+ self.assertEqual({'uuid': 1}, callargs['instance'])
+ self.assertTrue('red' in callargs)
+ self.assertEqual(3, callargs['red'])
+ self.assertTrue('blue' in callargs)
+ self.assertEqual(4, callargs['blue'])
+
+ def test_mixed_args(self):
+ args = ({'uuid': 1}, 3)
+ kwargs = {'blue': 4}
+ callargs = utils.getcallargs(self._test_func, *args, **kwargs)
+ #implicit self counts as an arg
+ self.assertEqual(4, len(callargs))
+ self.assertTrue('instance' in callargs)
+ self.assertEqual({'uuid': 1}, callargs['instance'])
+ self.assertTrue('red' in callargs)
+ self.assertEqual(3, callargs['red'])
+ self.assertTrue('blue' in callargs)
+ self.assertEqual(4, callargs['blue'])
+
+ def test_partial_kwargs(self):
+ args = ()
+ kwargs = {'instance': {'uuid': 1}, 'red': 3}
+ callargs = utils.getcallargs(self._test_func, *args, **kwargs)
+ #implicit self counts as an arg
+ self.assertEqual(4, len(callargs))
+ self.assertTrue('instance' in callargs)
+ self.assertEqual({'uuid': 1}, callargs['instance'])
+ self.assertTrue('red' in callargs)
+ self.assertEqual(3, callargs['red'])
+ self.assertTrue('blue' in callargs)
+ self.assertEqual(None, callargs['blue'])
+
+ def test_partial_args(self):
+ args = ({'uuid': 1}, 3)
+ kwargs = {}
+ callargs = utils.getcallargs(self._test_func, *args, **kwargs)
+ #implicit self counts as an arg
+ self.assertEqual(4, len(callargs))
+ self.assertTrue('instance' in callargs)
+ self.assertEqual({'uuid': 1}, callargs['instance'])
+ self.assertTrue('red' in callargs)
+ self.assertEqual(3, callargs['red'])
+ self.assertTrue('blue' in callargs)
+ self.assertEqual(None, callargs['blue'])
+
+ def test_partial_mixed_args(self):
+ args = (3,)
+ kwargs = {'instance': {'uuid': 1}}
+ callargs = utils.getcallargs(self._test_func, *args, **kwargs)
+ self.assertEqual(4, len(callargs))
+ self.assertTrue('instance' in callargs)
+ self.assertEqual({'uuid': 1}, callargs['instance'])
+ self.assertTrue('red' in callargs)
+ self.assertEqual(3, callargs['red'])
+ self.assertTrue('blue' in callargs)
+ self.assertEqual(None, callargs['blue'])
diff --git a/nova/tests/test_versions.py b/nova/tests/test_versions.py
index 5568ff0de..780e5ae12 100644
--- a/nova/tests/test_versions.py
+++ b/nova/tests/test_versions.py
@@ -24,38 +24,14 @@ from nova import version
class VersionTestCase(test.TestCase):
"""Test cases for Versions code."""
- def setUp(self):
- """setup test with unchanging values."""
- super(VersionTestCase, self).setUp()
- self.version = version
- self.version.FINAL = False
- self.version.NOVA_VERSION = ['2012', '10']
- self.version.YEAR, self.version.COUNT = self.version.NOVA_VERSION
- self.version.version_info = {'branch_nick': u'LOCALBRANCH',
- 'revision_id': 'LOCALREVISION',
- 'revno': 0}
- self.version.NOVA_PACKAGE = "g9ec3421"
-
- def test_version_string_is_good(self):
- # Ensure version string works.
- self.assertEqual("2012.10-dev", self.version.version_string())
-
- def test_canonical_version_string_is_good(self):
- # Ensure canonical version works.
- self.assertEqual("2012.10", self.version.canonical_version_string())
-
- def test_final_version_strings_are_identical(self):
- # Ensure final version strings match only at release.
- self.assertNotEqual(self.version.canonical_version_string(),
- self.version.version_string())
- self.version.FINAL = True
- self.assertEqual(self.version.canonical_version_string(),
- self.version.version_string())
def test_version_string_with_package_is_good(self):
- # Ensure uninstalled code get version string.
- self.assertEqual("2012.10-g9ec3421",
- self.version.version_string_with_package())
+ """Ensure uninstalled code get version string."""
+
+ self.stubs.Set(version.version_info, 'version', '5.5.5.5')
+ self.stubs.Set(version, 'NOVA_PACKAGE', 'g9ec3421')
+ self.assertEqual("5.5.5.5-g9ec3421",
+ version.version_string_with_package())
def test_release_file(self):
version.loaded = False
diff --git a/nova/tests/test_virt_disk.py b/nova/tests/test_virt_disk.py
index e6a57e085..0c51e8267 100644
--- a/nova/tests/test_virt_disk.py
+++ b/nova/tests/test_virt_disk.py
@@ -67,7 +67,7 @@ class VirtDiskTest(test.TestCase):
"key was injected by Nova\nmysshkey\n",
'gid': 100,
'uid': 100,
- 'mode': 0700})
+ 'mode': 0600})
vfs.teardown()
@@ -101,7 +101,7 @@ class VirtDiskTest(test.TestCase):
"key was injected by Nova\nmysshkey\n",
'gid': 100,
'uid': 100,
- 'mode': 0700})
+ 'mode': 0600})
vfs.teardown()
diff --git a/nova/tests/test_virt_disk_vfs_localfs.py b/nova/tests/test_virt_disk_vfs_localfs.py
index 806ed01d8..096a75964 100644
--- a/nova/tests/test_virt_disk_vfs_localfs.py
+++ b/nova/tests/test_virt_disk_vfs_localfs.py
@@ -46,7 +46,7 @@ def fake_execute(*args, **kwargs):
elif args[0] == "chown":
owner = args[1]
path = args[2]
- if not path in files:
+ if path not in files:
raise Exception("No such file: " + path)
sep = owner.find(':')
@@ -72,7 +72,7 @@ def fake_execute(*args, **kwargs):
elif args[0] == "chgrp":
group = args[1]
path = args[2]
- if not path in files:
+ if path not in files:
raise Exception("No such file: " + path)
if group == "users":
@@ -83,13 +83,13 @@ def fake_execute(*args, **kwargs):
elif args[0] == "chmod":
mode = args[1]
path = args[2]
- if not path in files:
+ if path not in files:
raise Exception("No such file: " + path)
files[path]["mode"] = int(mode, 8)
elif args[0] == "cat":
path = args[1]
- if not path in files:
+ if path not in files:
files[path] = {
"content": "Hello World",
"gid": 100,
@@ -104,8 +104,7 @@ def fake_execute(*args, **kwargs):
else:
path = args[1]
append = False
- print str(files)
- if not path in files:
+ if path not in files:
files[path] = {
"content": "Hello World",
"gid": 100,
diff --git a/nova/tests/test_virt_drivers.py b/nova/tests/test_virt_drivers.py
index 6ea2d0ef7..a94fdc3c5 100644
--- a/nova/tests/test_virt_drivers.py
+++ b/nova/tests/test_virt_drivers.py
@@ -84,7 +84,7 @@ class _FakeDriverBackendTestCase(object):
'nova.virt.libvirt.driver.libvirt_utils',
fake_libvirt_utils))
self.useFixture(fixtures.MonkeyPatch(
- 'nova.virt.libvirt.snapshots.libvirt_utils',
+ 'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.firewall.libvirt',
@@ -104,6 +104,13 @@ class _FakeDriverBackendTestCase(object):
def fake_make_drive(_self, _path):
pass
+ def fake_get_instance_disk_info(_self, instance, xml=None):
+ return '[]'
+
+ self.stubs.Set(nova.virt.libvirt.driver.LibvirtDriver,
+ 'get_instance_disk_info',
+ fake_get_instance_disk_info)
+
self.stubs.Set(nova.virt.libvirt.driver.disk,
'extend', fake_extend)
@@ -119,8 +126,6 @@ class _FakeDriverBackendTestCase(object):
def _teardown_fakelibvirt(self):
# Restore libvirt
- import nova.virt.libvirt.driver
- import nova.virt.libvirt.firewall
if self.saved_libvirt:
sys.modules['libvirt'] = self.saved_libvirt
@@ -229,7 +234,8 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
def test_reboot(self):
reboot_type = "SOFT"
instance_ref, network_info = self._get_running_instance()
- self.connection.reboot(instance_ref, network_info, reboot_type)
+ self.connection.reboot(self.ctxt, instance_ref, network_info,
+ reboot_type)
@catch_notimplementederror
def test_get_host_ip_addr(self):
@@ -380,10 +386,10 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
instance_ref, network_info = self._get_running_instance()
self.connection.attach_volume({'driver_volume_type': 'fake'},
instance_ref,
- '/mnt/nova/something')
+ '/dev/sda')
self.connection.detach_volume({'driver_volume_type': 'fake'},
instance_ref,
- '/mnt/nova/something')
+ '/dev/sda')
@catch_notimplementederror
def test_attach_detach_different_power_states(self):
@@ -391,11 +397,11 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
self.connection.power_off(instance_ref)
self.connection.attach_volume({'driver_volume_type': 'fake'},
instance_ref,
- '/mnt/nova/something')
+ '/dev/sda')
self.connection.power_on(instance_ref)
self.connection.detach_volume({'driver_volume_type': 'fake'},
instance_ref,
- '/mnt/nova/something')
+ '/dev/sda')
@catch_notimplementederror
def test_get_info(self):
diff --git a/nova/tests/test_vmwareapi.py b/nova/tests/test_vmwareapi.py
index 34f03a555..22544fd2d 100644
--- a/nova/tests/test_vmwareapi.py
+++ b/nova/tests/test_vmwareapi.py
@@ -199,14 +199,16 @@ class VMwareAPIVMTestCase(test.TestCase):
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.RUNNING)
reboot_type = "SOFT"
- self.conn.reboot(self.instance, self.network_info, reboot_type)
+ self.conn.reboot(self.context, self.instance, self.network_info,
+ reboot_type)
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.RUNNING)
def test_reboot_non_existent(self):
self._create_instance_in_the_db()
self.assertRaises(exception.InstanceNotFound, self.conn.reboot,
- self.instance, self.network_info, 'SOFT')
+ self.context, self.instance, self.network_info,
+ 'SOFT')
def test_reboot_not_poweredon(self):
self._create_vm()
@@ -216,7 +218,8 @@ class VMwareAPIVMTestCase(test.TestCase):
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.SUSPENDED)
self.assertRaises(exception.InstanceRebootFailure, self.conn.reboot,
- self.instance, self.network_info, 'SOFT')
+ self.context, self.instance, self.network_info,
+ 'SOFT')
def test_suspend(self):
self._create_vm()
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 067e28a13..cc71ba31e 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -19,7 +19,6 @@
import ast
import base64
import contextlib
-import cPickle as pickle
import functools
import os
import re
@@ -48,6 +47,7 @@ from nova.virt.xenapi import agent
from nova.virt.xenapi import driver as xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import host
+from nova.virt.xenapi.imageupload import glance
from nova.virt.xenapi import pool
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
@@ -431,15 +431,29 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+ image_id = "my_snapshot_id"
stubs.stubout_instance_snapshot(self.stubs)
stubs.stubout_is_snapshot(self.stubs)
# Stubbing out firewall driver as previous stub sets alters
# xml rpc result parsing
stubs.stubout_firewall_driver(self.stubs, self.conn)
+
instance = self._create_instance()
- image_id = "my_snapshot_id"
+ self.fake_upload_called = False
+
+ def fake_image_upload(_self, ctx, session, inst, vdi_uuids,
+ img_id):
+ self.fake_upload_called = True
+ self.assertEqual(ctx, self.context)
+ self.assertEqual(inst, instance)
+ self.assertTrue(isinstance(vdi_uuids, list))
+ self.assertEqual(img_id, image_id)
+
+ self.stubs.Set(glance.GlanceStore, 'upload_image',
+ fake_image_upload)
+
self.conn.snapshot(self.context, instance, image_id,
func_call_matcher.call)
@@ -469,6 +483,8 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
name_label = vdi_rec["name_label"]
self.assert_(not name_label.endswith('snapshot'))
+ self.assertTrue(self.fake_upload_called)
+
def create_vm_record(self, conn, os_type, name):
instances = conn.list_instances()
self.assertEquals(instances, [name])
@@ -579,7 +595,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
def _check_vdis(self, start_list, end_list):
for vdi_ref in end_list:
- if not vdi_ref in start_list:
+ if vdi_ref not in start_list:
vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
# If the cache is turned on then the base disk will be
# there even after the cleanup
@@ -839,7 +855,8 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
host=CONF.host,
vpn=None,
rxtx_factor=3,
- project_id=self.project_id)
+ project_id=self.project_id,
+ macs=None)
self._test_spawn(IMAGE_MACHINE,
IMAGE_KERNEL,
IMAGE_RAMDISK,
@@ -938,12 +955,12 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
def test_reboot_hard(self):
instance = self._create_instance()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- conn.reboot(instance, None, "HARD")
+ conn.reboot(self.context, instance, None, "HARD")
def test_reboot_soft(self):
instance = self._create_instance()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- conn.reboot(instance, None, "SOFT")
+ conn.reboot(self.context, instance, None, "SOFT")
def test_reboot_halted(self):
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
@@ -951,7 +968,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
instance = self._create_instance(spawn=False)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
xenapi_fake.create_vm(instance['name'], 'Halted')
- conn.reboot(instance, None, "SOFT")
+ conn.reboot(self.context, instance, None, "SOFT")
vm_ref = vm_utils.lookup(session, instance['name'])
vm = xenapi_fake.get_record('VM', vm_ref)
self.assertEquals(vm['power_state'], 'Running')
@@ -960,8 +977,8 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
instance = self._create_instance(spawn=False)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
xenapi_fake.create_vm(instance['name'], 'Unknown')
- self.assertRaises(xenapi_fake.Failure, conn.reboot, instance,
- None, "SOFT")
+ self.assertRaises(xenapi_fake.Failure, conn.reboot, self.context,
+ instance, None, "SOFT")
def _test_maintenance_mode(self, find_host, find_aggregate):
real_call_xenapi = self.conn._session.call_xenapi
@@ -1933,7 +1950,7 @@ class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase):
in_rules = filter(lambda l: not l.startswith('#'),
self._in_rules)
for rule in in_rules:
- if not 'nova' in rule:
+ if 'nova' not in rule:
self.assertTrue(rule in self._out_rules,
'Rule went missing: %s' % rule)
@@ -2574,54 +2591,6 @@ class SwapXapiHostTestCase(test.TestCase):
"http://someserver", 'otherserver'))
-class VmUtilsTestCase(test.TestCase):
- """Unit tests for xenapi utils."""
-
- def test_upload_image(self):
- def fake_instance_system_metadata_get(context, uuid):
- return dict(image_a=1, image_b=2, image_c='c', d='d')
-
- def fake_get_sr_path(session):
- return "foo"
-
- class FakeInstance(dict):
- def __init__(self):
- super(FakeInstance, self).__init__({
- 'auto_disk_config': 'auto disk config',
- 'os_type': 'os type'})
-
- def __missing__(self, item):
- return "whatever"
-
- class FakeSession(object):
- def call_plugin(session_self, service, command, kwargs):
- self.kwargs = kwargs
-
- def call_plugin_serialized(session_self, service, command, *args,
- **kwargs):
- self.kwargs = kwargs
-
- def fake_dumps(thing):
- return thing
-
- self.stubs.Set(db, "instance_system_metadata_get",
- fake_instance_system_metadata_get)
- self.stubs.Set(vm_utils, "get_sr_path", fake_get_sr_path)
- self.stubs.Set(pickle, "dumps", fake_dumps)
-
- ctx = context.get_admin_context()
-
- instance = FakeInstance()
- session = FakeSession()
- vm_utils.upload_image(ctx, session, instance, "vmi uuids", "image id")
-
- actual = self.kwargs['properties']
- # Inheritance happens in another place, now
- expected = dict(auto_disk_config='auto disk config',
- os_type='os type')
- self.assertEquals(expected, actual)
-
-
class XenAPILiveMigrateTestCase(stubs.XenAPITestBase):
"""Unit tests for live_migration."""
def setUp(self):
diff --git a/nova/tests/utils.py b/nova/tests/utils.py
index 6437f9537..a5d4b4712 100644
--- a/nova/tests/utils.py
+++ b/nova/tests/utils.py
@@ -18,6 +18,9 @@ import platform
import nova.context
import nova.db
+
+from nova.compute import instance_types
+from nova import exception
from nova.image import glance
from nova.network import minidns
from nova.network import model as network_model
@@ -52,25 +55,35 @@ def get_test_instance_type(context=None):
'root_gb': 40,
'ephemeral_gb': 80,
'swap': 1024}
-
- instance_type_ref = nova.db.instance_type_create(context,
- test_instance_type)
+ try:
+ instance_type_ref = nova.db.instance_type_create(context,
+ test_instance_type)
+ except exception.InstanceTypeExists:
+ instance_type_ref = nova.db.instance_type_get_by_name(context,
+ 'kinda.big')
return instance_type_ref
-def get_test_instance(context=None):
+def get_test_instance(context=None, instance_type=None):
if not context:
context = get_test_admin_context()
- test_instance = {'memory_kb': '1024000',
+ if not instance_type:
+ instance_type = get_test_instance_type(context)
+
+ metadata = {}
+ instance_types.save_instance_type_info(metadata, instance_type, '')
+
+ test_instance = {'memory_kb': '2048000',
'basepath': '/some/path',
'bridge_name': 'br100',
- 'vcpus': 2,
- 'root_gb': 10,
+ 'vcpus': 4,
+ 'root_gb': 40,
'project_id': 'fake',
'bridge': 'br101',
'image_ref': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'instance_type_id': '5'} # m1.small
+ 'instance_type_id': '5',
+ 'system_metadata': metadata}
instance_ref = nova.db.instance_create(context, test_instance)
return instance_ref
diff --git a/nova/tests/virt/xenapi/imageupload/__init__.py b/nova/tests/virt/xenapi/imageupload/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/nova/tests/virt/xenapi/imageupload/__init__.py
diff --git a/nova/tests/virt/xenapi/imageupload/test_glance.py b/nova/tests/virt/xenapi/imageupload/test_glance.py
new file mode 100644
index 000000000..b0518228d
--- /dev/null
+++ b/nova/tests/virt/xenapi/imageupload/test_glance.py
@@ -0,0 +1,74 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import mox
+
+from nova import context
+from nova import test
+from nova.virt.xenapi.imageupload import glance
+from nova.virt.xenapi import vm_utils
+
+
+class TestGlanceStore(test.TestCase):
+ def setUp(self):
+ super(TestGlanceStore, self).setUp()
+ self.store = glance.GlanceStore()
+ self.mox = mox.Mox()
+
+ def tearDown(self):
+ super(TestGlanceStore, self).tearDown()
+
+ def test_upload_image(self):
+ glance_host = '0.1.2.3'
+ glance_port = 8143
+ glance_use_ssl = False
+ sr_path = '/fake/sr/path'
+ self.flags(glance_host=glance_host)
+ self.flags(glance_port=glance_port)
+ self.flags(glance_api_insecure=glance_use_ssl)
+
+ def fake_get_sr_path(*_args, **_kwargs):
+ return sr_path
+
+ self.stubs.Set(vm_utils, 'get_sr_path', fake_get_sr_path)
+
+ ctx = context.RequestContext('user', 'project', auth_token='foobar')
+ properties = {
+ 'auto_disk_config': True,
+ 'os_type': 'default',
+ }
+ image_id = 'fake_image_uuid'
+ vdi_uuids = ['fake_vdi_uuid']
+ instance = {'uuid': 'blah'}
+ instance.update(properties)
+
+ params = {'vdi_uuids': vdi_uuids,
+ 'image_id': image_id,
+ 'glance_host': glance_host,
+ 'glance_port': glance_port,
+ 'glance_use_ssl': glance_use_ssl,
+ 'sr_path': sr_path,
+ 'auth_token': 'foobar',
+ 'properties': properties}
+ session = self.mox.CreateMockAnything()
+ session.call_plugin_serialized('glance', 'upload_vhd', **params)
+ self.mox.ReplayAll()
+
+ self.store.upload_image(ctx, session, instance, vdi_uuids, image_id)
+
+ self.mox.VerifyAll()
diff --git a/nova/tests/virt/xenapi/test_vm_utils.py b/nova/tests/virt/xenapi/test_vm_utils.py
index 275088af0..633e6f835 100644
--- a/nova/tests/virt/xenapi/test_vm_utils.py
+++ b/nova/tests/virt/xenapi/test_vm_utils.py
@@ -19,10 +19,8 @@
import contextlib
import fixtures
import mox
-import uuid
from nova import test
-from nova.tests.xenapi import stubs
from nova import utils
from nova.virt.xenapi import vm_utils
diff --git a/nova/tests/xenapi/test_vm_utils.py b/nova/tests/xenapi/test_vm_utils.py
index 6d7f9a624..a701efdd9 100644
--- a/nova/tests/xenapi/test_vm_utils.py
+++ b/nova/tests/xenapi/test_vm_utils.py
@@ -14,7 +14,7 @@ XENSM_TYPE = 'xensm'
ISCSI_TYPE = 'iscsi'
-def get_fake_dev_params(sr_type):
+def get_fake_connection_data(sr_type):
fakes = {XENSM_TYPE: {'sr_uuid': 'falseSR',
'name_label': 'fake_storage',
'name_description': 'test purposes',
@@ -73,16 +73,16 @@ class GetInstanceForVdisForSrTestCase(stubs.XenAPITestBase):
self.assertEquals([], result)
- def test_get_vdis_for_boot_from_vol_with_sr_uuid(self):
- dev_params = get_fake_dev_params(XENSM_TYPE)
+ def test_get_vdi_uuid_for_volume_with_sr_uuid(self):
+ connection_data = get_fake_connection_data(XENSM_TYPE)
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
- result = vm_utils.get_vdis_for_boot_from_vol(driver._session,
- dev_params)
- self.assertEquals(result['root']['uuid'], 'falseVDI')
+ vdi_uuid = vm_utils.get_vdi_uuid_for_volume(
+ driver._session, connection_data)
+ self.assertEquals(vdi_uuid, 'falseVDI')
- def test_get_vdis_for_boot_from_vol_failure(self):
+ def test_get_vdi_uuid_for_volume_failure(self):
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
@@ -90,19 +90,19 @@ class GetInstanceForVdisForSrTestCase(stubs.XenAPITestBase):
return None
self.stubs.Set(volume_utils, 'introduce_sr', bad_introduce_sr)
- dev_params = get_fake_dev_params(XENSM_TYPE)
+ connection_data = get_fake_connection_data(XENSM_TYPE)
self.assertRaises(exception.NovaException,
- vm_utils.get_vdis_for_boot_from_vol,
- driver._session, dev_params)
+ vm_utils.get_vdi_uuid_for_volume,
+ driver._session, connection_data)
- def test_get_vdis_for_boot_from_iscsi_vol_missing_sr_uuid(self):
- dev_params = get_fake_dev_params(ISCSI_TYPE)
+ def test_get_vdi_uuid_for_volume_from_iscsi_vol_missing_sr_uuid(self):
+ connection_data = get_fake_connection_data(ISCSI_TYPE)
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
- result = vm_utils.get_vdis_for_boot_from_vol(driver._session,
- dev_params)
- self.assertNotEquals(result['root']['uuid'], None)
+ vdi_uuid = vm_utils.get_vdi_uuid_for_volume(
+ driver._session, connection_data)
+ self.assertNotEquals(vdi_uuid, None)
class VMRefOrRaiseVMFoundTestCase(test.TestCase):
diff --git a/nova/utils.py b/nova/utils.py
index 52d4868c9..7ad810504 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -48,16 +48,19 @@ from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
+from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
+notify_decorator = 'nova.openstack.common.notifier.api.notify_decorator'
+
monkey_patch_opts = [
cfg.BoolOpt('monkey_patch',
default=False,
help='Whether to log monkey patching'),
cfg.ListOpt('monkey_patch_modules',
default=[
- 'nova.api.ec2.cloud:nova.notifier.api.notify_decorator',
- 'nova.compute.api:nova.notifier.api.notify_decorator'
+ 'nova.api.ec2.cloud:%s' % (notify_decorator),
+ 'nova.compute.api:%s' % (notify_decorator)
],
help='List of modules/decorators to monkey patch'),
]
@@ -628,7 +631,7 @@ class DynamicLoopingCall(LoopingCallBase):
if not self._running:
break
- if not periodic_interval_max is None:
+ if periodic_interval_max is not None:
idle = min(idle, periodic_interval_max)
LOG.debug(_('Periodic task processor sleeping for %.02f '
'seconds'), idle)
@@ -876,27 +879,17 @@ def is_int_like(val):
def is_valid_boolstr(val):
"""Check if the provided string is a valid bool string or not."""
- val = str(val).lower()
- return val == 'true' or val == 'false' or \
- val == 'yes' or val == 'no' or \
- val == 'y' or val == 'n' or \
- val == '1' or val == '0'
+ boolstrs = ('true', 'false', 'yes', 'no', 'y', 'n', '1', '0')
+ return str(val).lower() in boolstrs
def is_valid_ipv4(address):
- """valid the address strictly as per format xxx.xxx.xxx.xxx.
- where xxx is a value between 0 and 255.
- """
- parts = address.split(".")
- if len(parts) != 4:
+ """Verify that address represents a valid IPv4 address."""
+ try:
+ addr = netaddr.IPAddress(address)
+ return addr.version == 4
+ except Exception:
return False
- for item in parts:
- try:
- if not 0 <= int(item) <= 255:
- return False
- except ValueError:
- return False
- return True
def is_valid_cidr(address):
@@ -930,10 +923,11 @@ def monkey_patch():
You can set decorators for each modules
using CONF.monkey_patch_modules.
The format is "Module path:Decorator function".
- Example: 'nova.api.ec2.cloud:nova.notifier.api.notify_decorator'
+ Example:
+ 'nova.api.ec2.cloud:nova.openstack.common.notifier.api.notify_decorator'
Parameters of the decorator is as follows.
- (See nova.notifier.api.notify_decorator)
+ (See nova.openstack.common.notifier.api.notify_decorator)
name - name of the function
function - object of the function
@@ -1087,21 +1081,42 @@ def temporary_mutation(obj, **kwargs):
with temporary_mutation(context, read_deleted="yes"):
do_something_that_needed_deleted_objects()
"""
+ def is_dict_like(thing):
+ return hasattr(thing, 'has_key')
+
+ def get(thing, attr, default):
+ if is_dict_like(thing):
+ return thing.get(attr, default)
+ else:
+ return getattr(thing, attr, default)
+
+ def set_value(thing, attr, val):
+ if is_dict_like(thing):
+ thing[attr] = val
+ else:
+ setattr(thing, attr, val)
+
+ def delete(thing, attr):
+ if is_dict_like(thing):
+ del thing[attr]
+ else:
+ delattr(thing, attr)
+
NOT_PRESENT = object()
old_values = {}
for attr, new_value in kwargs.items():
- old_values[attr] = getattr(obj, attr, NOT_PRESENT)
- setattr(obj, attr, new_value)
+ old_values[attr] = get(obj, attr, NOT_PRESENT)
+ set_value(obj, attr, new_value)
try:
yield
finally:
for attr, old_value in old_values.items():
if old_value is NOT_PRESENT:
- del obj[attr]
+ delete(obj, attr)
else:
- setattr(obj, attr, old_value)
+ set_value(obj, attr, old_value)
def generate_mac_address():
@@ -1161,26 +1176,6 @@ def tempdir(**kwargs):
LOG.error(_('Could not remove tmpdir: %s'), str(e))
-def strcmp_const_time(s1, s2):
- """Constant-time string comparison.
-
- :params s1: the first string
- :params s2: the second string
-
- :return: True if the strings are equal.
-
- This function takes two strings and compares them. It is intended to be
- used when doing a comparison for authentication purposes to help guard
- against timing attacks.
- """
- if len(s1) != len(s2):
- return False
- result = 0
- for (a, b) in zip(s1, s2):
- result |= ord(a) ^ ord(b)
- return result == 0
-
-
def walk_class_hierarchy(clazz, encountered=None):
"""Walk class hierarchy, yielding most derived classes first."""
if not encountered:
@@ -1270,5 +1265,79 @@ def last_bytes(file_like_object, num):
def metadata_to_dict(metadata):
result = {}
for item in metadata:
- result[item['key']] = item['value']
+ if not item.get('deleted'):
+ result[item['key']] = item['value']
return result
+
+
+def get_wrapped_function(function):
+ """Get the method at the bottom of a stack of decorators."""
+ if not hasattr(function, 'func_closure') or not function.func_closure:
+ return function
+
+ def _get_wrapped_function(function):
+ if not hasattr(function, 'func_closure') or not function.func_closure:
+ return None
+
+ for closure in function.func_closure:
+ func = closure.cell_contents
+
+ deeper_func = _get_wrapped_function(func)
+ if deeper_func:
+ return deeper_func
+ elif hasattr(closure.cell_contents, '__call__'):
+ return closure.cell_contents
+
+ return _get_wrapped_function(function)
+
+
+def getcallargs(function, *args, **kwargs):
+ """This is a simplified inspect.getcallargs (2.7+).
+
+ It should be replaced when python >= 2.7 is standard.
+ """
+ keyed_args = {}
+ argnames, varargs, keywords, defaults = inspect.getargspec(function)
+
+ keyed_args.update(kwargs)
+
+ #NOTE(alaski) the implicit 'self' or 'cls' argument shows up in
+ # argnames but not in args or kwargs. Uses 'in' rather than '==' because
+ # some tests use 'self2'.
+ if 'self' in argnames[0] or 'cls' == argnames[0]:
+ # The function may not actually be a method or have im_self.
+ # Typically seen when it's stubbed with mox.
+ if inspect.ismethod(function) and hasattr(function, 'im_self'):
+ keyed_args[argnames[0]] = function.im_self
+ else:
+ keyed_args[argnames[0]] = None
+
+ remaining_argnames = filter(lambda x: x not in keyed_args, argnames)
+ keyed_args.update(dict(zip(remaining_argnames, args)))
+
+ if defaults:
+ num_defaults = len(defaults)
+ for argname, value in zip(argnames[-num_defaults:], defaults):
+ if argname not in keyed_args:
+ keyed_args[argname] = value
+
+ return keyed_args
+
+
+class ExceptionHelper(object):
+ """Class to wrap another and translate the ClientExceptions raised by its
+ function calls to the actual ones"""
+
+ def __init__(self, target):
+ self._target = target
+
+ def __getattr__(self, name):
+ func = getattr(self._target, name)
+
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except rpc_common.ClientException, e:
+ raise (e._exc_info[1], None, e._exc_info[2])
+ return wrapper
diff --git a/nova/version.py b/nova/version.py
index 82f3bb970..722a20e16 100644
--- a/nova/version.py
+++ b/nova/version.py
@@ -14,14 +14,15 @@
# License for the specific language governing permissions and limitations
# under the License.
+from nova.openstack.common import version as common_version
+
NOVA_VENDOR = "OpenStack Foundation"
NOVA_PRODUCT = "OpenStack Nova"
NOVA_PACKAGE = None # OS distro package version suffix
-NOVA_VERSION = ['2013', '1', None]
-YEAR, COUNT, REVISION = NOVA_VERSION
-FINAL = False # This becomes true at Release Candidate time
loaded = False
+version_info = common_version.VersionInfo('nova')
+version_string = version_info.version_string
def _load_config():
@@ -81,19 +82,8 @@ def package_string():
return NOVA_PACKAGE
-def canonical_version_string():
- return '.'.join(filter(None, NOVA_VERSION))
-
-
-def version_string():
- if FINAL:
- return canonical_version_string()
- else:
- return '%s-dev' % (canonical_version_string(),)
-
-
def version_string_with_package():
if package_string() is None:
- return canonical_version_string()
+ return version_info.version_string()
else:
- return "%s-%s" % (canonical_version_string(), package_string())
+ return "%s-%s" % (version_info.version_string(), package_string())
diff --git a/nova/virt/baremetal/db/sqlalchemy/api.py b/nova/virt/baremetal/db/sqlalchemy/api.py
index 34bcd1229..198c06256 100644
--- a/nova/virt/baremetal/db/sqlalchemy/api.py
+++ b/nova/virt/baremetal/db/sqlalchemy/api.py
@@ -351,7 +351,7 @@ def bm_interface_set_vif_uuid(context, if_id, vif_uuid):
try:
session.add(bm_interface)
session.flush()
- except exception.DBError, e:
+ except db_session.DBError, e:
# TODO(deva): clean up when db layer raises DuplicateKeyError
if str(e).find('IntegrityError') != -1:
raise exception.NovaException(_("Baremetal interface %s "
diff --git a/nova/virt/baremetal/db/sqlalchemy/session.py b/nova/virt/baremetal/db/sqlalchemy/session.py
index fcaf210a5..06d777354 100644
--- a/nova/virt/baremetal/db/sqlalchemy/session.py
+++ b/nova/virt/baremetal/db/sqlalchemy/session.py
@@ -19,8 +19,8 @@
"""Session Handling for SQLAlchemy backend."""
-from nova.db.sqlalchemy import session as nova_session
from nova.openstack.common import cfg
+from nova.openstack.common.db.sqlalchemy import session as nova_session
from nova import paths
opts = [
@@ -38,11 +38,13 @@ CONF = cfg.CONF
CONF.register_group(baremetal_group)
CONF.register_opts(opts, baremetal_group)
-CONF.import_opt('sqlite_db', 'nova.db.sqlalchemy.session')
+CONF.import_opt('sqlite_db', 'nova.openstack.common.db.sqlalchemy.session')
_ENGINE = None
_MAKER = None
+DBError = nova_session.DBError
+
def get_session(autocommit=True, expire_on_commit=False):
"""Return a SQLAlchemy session."""
diff --git a/nova/virt/baremetal/driver.py b/nova/virt/baremetal/driver.py
index 9904fdcd4..9160485a6 100644..100755
--- a/nova/virt/baremetal/driver.py
+++ b/nova/virt/baremetal/driver.py
@@ -25,6 +25,7 @@ from nova.compute import power_state
from nova import context as nova_context
from nova import exception
from nova.openstack.common import cfg
+from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import paths
@@ -140,7 +141,7 @@ class BareMetalDriver(driver.ComputeDriver):
keyval[0] = keyval[0].strip()
keyval[1] = keyval[1].strip()
extra_specs[keyval[0]] = keyval[1]
- if not 'cpu_arch' in extra_specs:
+ if 'cpu_arch' not in extra_specs:
LOG.warning(
_('cpu_arch is not found in instance_type_extra_specs'))
extra_specs['cpu_arch'] = ''
@@ -266,11 +267,11 @@ class BareMetalDriver(driver.ComputeDriver):
pm.state = baremetal_states.ERROR
try:
_update_state(context, node, instance, pm.state)
- except exception.DBError, e:
+ except db_session.DBError, e:
LOG.warning(_("Failed to update state record for "
"baremetal node %s") % instance['uuid'])
- def reboot(self, instance, network_info, reboot_type,
+ def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None):
node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
ctx = nova_context.get_admin_context()
diff --git a/nova/virt/baremetal/pxe.py b/nova/virt/baremetal/pxe.py
index 11af99d41..9a8a4fed0 100644
--- a/nova/virt/baremetal/pxe.py
+++ b/nova/virt/baremetal/pxe.py
@@ -25,13 +25,12 @@ import os
from nova.compute import instance_types
from nova import exception
from nova.openstack.common import cfg
+from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
-from nova import utils
from nova.virt.baremetal import base
from nova.virt.baremetal import db
from nova.virt.baremetal import utils as bm_utils
-from nova.virt.disk import api as disk
pxe_opts = [
@@ -168,11 +167,9 @@ def get_pxe_config_file_path(instance):
def get_partition_sizes(instance):
- type_id = instance['instance_type_id']
- root_mb = instance['root_gb'] * 1024
-
- # NOTE(deva): is there a way to get swap_mb directly from instance?
- swap_mb = instance_types.get_instance_type(type_id)['swap']
+ instance_type = instance_types.extract_instance_type(instance)
+ root_mb = instance_type['root_gb'] * 1024
+ swap_mb = instance_type['swap']
# NOTE(deva): For simpler code paths on the deployment side,
# we always create a swap partition. If the flavor
@@ -413,7 +410,7 @@ class PXE(base.NodeDriver):
bm_utils.unlink_without_raise(get_pxe_config_file_path(instance))
try:
macs = self._collect_mac_addresses(context, node)
- except exception.DBError:
+ except db_session.DBError:
pass
else:
for mac in macs:
diff --git a/nova/virt/baremetal/virtual_power_driver.py b/nova/virt/baremetal/virtual_power_driver.py
new file mode 100644
index 000000000..22dd3a954
--- /dev/null
+++ b/nova/virt/baremetal/virtual_power_driver.py
@@ -0,0 +1,219 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Virtual power driver
+
+
+from nova import exception
+from nova.openstack.common import cfg
+from nova.openstack.common import importutils
+from nova.openstack.common import log as logging
+from nova import utils
+from nova.virt.baremetal import baremetal_states
+from nova.virt.baremetal import base
+import nova.virt.powervm.common as connection
+
+opts = [
+ cfg.StrOpt('virtual_power_ssh_host',
+ default='',
+ help='ip or name to virtual power host'),
+ cfg.StrOpt('virtual_power_type',
+ default='vbox',
+ help='base command to use for virtual power(vbox,virsh)'),
+ cfg.StrOpt('virtual_power_host_user',
+ default='',
+ help='user to execute virtual power commands as'),
+ cfg.StrOpt('virtual_power_host_pass',
+ default='',
+ help='password for virtual power host_user'),
+]
+
+baremetal_vp = cfg.OptGroup(name='baremetal',
+ title='Baremetal Options')
+
+CONF = cfg.CONF
+CONF.register_group(baremetal_vp)
+CONF.register_opts(opts, baremetal_vp)
+
+_conn = None
+_virtual_power_settings = None
+_vp_cmd = None
+_cmds = None
+
+LOG = logging.getLogger(__name__)
+
+
+class VirtualPowerManager(base.PowerManager):
+ """Virtual Power Driver for Baremetal Nova Compute
+
+ This PowerManager class provides mechanism for controlling the power state
+ of VMs based on their name and MAC address. It uses ssh to connect to the
+ VM's host and issue commands.
+
+ Node will be matched based on mac address
+
+ NOTE: for use in dev/test environments only!
+
+ """
+ def __init__(self, **kwargs):
+ global _conn
+ global _virtual_power_settings
+ global _cmds
+
+ if _cmds is None:
+ LOG.debug("Setting up %s commands." %
+ CONF.baremetal.virtual_power_type)
+ _vpc = 'nova.virt.baremetal.virtual_power_driver_settings.%s' % \
+ CONF.baremetal.virtual_power_type
+ _cmds = importutils.import_class(_vpc)
+ self._vp_cmd = _cmds()
+ self.connection_data = _conn
+ node = kwargs.pop('node', {})
+ instance = kwargs.pop('instance', {})
+ self._node_name = instance.get('hostname', "")
+ self._mac_address = node.get('prov_mac_address', "")
+ self._mac_address = self._mac_address.replace(':', '')
+ self._connection = None
+ self._matched_name = ''
+ self.state = None
+
+ def _get_conn(self):
+ if not CONF.baremetal.virtual_power_ssh_host:
+ raise exception.NovaException(
+ _('virtual_power_ssh_host not defined. Can not Start'))
+
+ if not CONF.baremetal.virtual_power_host_user:
+ raise exception.NovaException(
+ _('virtual_power_host_user not defined. Can not Start'))
+
+ if not CONF.baremetal.virtual_power_host_pass:
+ raise exception.NovaException(
+ _('virtual_power_host_pass not defined. Can not Start'))
+
+ _conn = connection.Connection(
+ CONF.baremetal.virtual_power_ssh_host,
+ CONF.baremetal.virtual_power_host_user,
+ CONF.baremetal.virtual_power_host_pass)
+ return _conn
+
+ def _set_connection(self):
+ if self._connection is None:
+ if self.connection_data is None:
+ self.connection_data = self._get_conn()
+
+ self._connection = connection.ssh_connect(self.connection_data)
+
+ def _get_full_node_list(self):
+ LOG.debug("Getting full node list.")
+ cmd = self._vp_cmd.list_cmd
+ full_list = self._run_command(cmd)
+ return full_list
+
+ def _check_for_node(self):
+ LOG.debug("Looking up Name for Mac address %s." % self._mac_address)
+ self._matched_name = ''
+ full_node_list = self._get_full_node_list()
+
+ for node in full_node_list:
+ cmd = self._vp_cmd.get_node_macs.replace('{_NodeName_}', node)
+ mac_address_list = self._run_command(cmd)
+
+ for mac in mac_address_list:
+ if self._mac_address.lower() in mac.lower():
+ self._matched_name = node
+ break
+ if self._matched_name:
+ break
+ return self._matched_name
+
+ def activate_node(self):
+ LOG.info("activate_node name %s" % self._node_name)
+ if self._check_for_node():
+ cmd = self._vp_cmd.start_cmd
+ self._run_command(cmd)
+
+ if self.is_power_on():
+ self.state = baremetal_states.ACTIVE
+ else:
+ self.state = baremetal_states.ERROR
+ return self.state
+
+ def reboot_node(self):
+ LOG.info("reset node: %s" % self._node_name)
+ if self._check_for_node():
+ cmd = self._vp_cmd.reboot_cmd
+ self._run_command(cmd)
+ if self.is_power_on():
+ self.state = baremetal_states.ACTIVE
+ else:
+ self.state = baremetal_states.ERROR
+ return self.state
+
+ def deactivate_node(self):
+ LOG.info("deactivate_node name %s" % self._node_name)
+ if self._check_for_node():
+ if self.is_power_on():
+ cmd = self._vp_cmd.stop_cmd
+ self._run_command(cmd)
+
+ if self.is_power_on():
+ self.state = baremetal_states.ERROR
+ else:
+ self.state = baremetal_states.DELETED
+ return self.state
+
+ def is_power_on(self):
+ LOG.debug("Checking if %s is running" % self._node_name)
+
+ if not self._check_for_node():
+ return False
+
+ cmd = self._vp_cmd.list_running_cmd
+ running_node_list = self._run_command(cmd)
+
+ for node in running_node_list:
+ if self._matched_name in node:
+ return True
+ return False
+
+ def start_console(self):
+ pass
+
+ def stop_console(self):
+ pass
+
+ def _run_command(self, cmd, check_exit_code=True):
+ """Run a remote command using an active ssh connection.
+
+ :param command: String with the command to run.
+
+ If {_NodeName_} is in the command it will get replaced by
+ the _matched_name value.
+
+ base_cmd will also get prepended to the command.
+ """
+ self._set_connection()
+
+ cmd = cmd.replace('{_NodeName_}', self._matched_name)
+
+ cmd = '%s %s' % (self._vp_cmd.base_cmd, cmd)
+
+ stdout, stderr = utils.ssh_execute(self._connection, cmd,
+ check_exit_code=check_exit_code)
+ result = stdout.strip().splitlines()
+ LOG.debug('Result for run_command: %s' % result)
+ return result
diff --git a/nova/virt/baremetal/virtual_power_driver_settings.py b/nova/virt/baremetal/virtual_power_driver_settings.py
new file mode 100644
index 000000000..6f6b28cd6
--- /dev/null
+++ b/nova/virt/baremetal/virtual_power_driver_settings.py
@@ -0,0 +1,61 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
+
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# Virtual power driver commands
+
+from nova.openstack.common import log as logging
+
+
+LOG = logging.getLogger(__name__)
+
+
+class vbox(object):
+ """set commands for basic Virtual Box control."""
+
+ def __init__(self):
+ self.base_cmd = '/usr/bin/VBoxManage'
+ self.start_cmd = 'startvm {_NodeName_}'
+ self.stop_cmd = 'controlvm {_NodeName_} poweroff'
+ self.reboot_cmd = 'controlvm {_NodeName_} reset'
+ self.list_cmd = "list vms|awk -F'\"' '{print $2}'"
+ self.list_running_cmd = 'list runningvms'
+ self.get_node_macs = ("showvminfo --machinereadable {_NodeName_} | "
+ "grep "
+ '"macaddress" | awk -F '
+ "'"
+ '"'
+ "' '{print $2}'")
+
+
+class virsh(object):
+ """set commands for basic Virsh control."""
+
+ def __init__(self):
+ self.base_cmd = '/usr/bin/virsh'
+ self.start_cmd = 'start {_NodeName_}'
+ self.stop_cmd = 'destroy {_NodeName_}'
+ self.reboot_cmd = 'reset {_NodeName_}'
+ self.list_cmd = "list --all | tail -n +2 | awk -F\" \" '{print $2}'"
+ self.list_running_cmd = \
+ "list --all|grep running|awk -F\" \" '{print $2}'"
+ self.get_node_macs = ("dumpxml {_NodeName_} | grep "
+ '"mac address" | awk -F'
+ '"'
+ "'"
+ '" '
+ "'{print $2}' | tr -d ':'")
diff --git a/nova/virt/baremetal/volume_driver.py b/nova/virt/baremetal/volume_driver.py
index 0a05dfedd..e92325b97 100644
--- a/nova/virt/baremetal/volume_driver.py
+++ b/nova/virt/baremetal/volume_driver.py
@@ -210,7 +210,7 @@ class LibvirtVolumeDriver(VolumeDriver):
def _volume_driver_method(self, method_name, connection_info,
*args, **kwargs):
driver_type = connection_info.get('driver_volume_type')
- if not driver_type in self.volume_drivers:
+ if driver_type not in self.volume_drivers:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
driver = self.volume_drivers[driver_type]
method = getattr(driver, method_name)
diff --git a/nova/virt/disk/api.py b/nova/virt/disk/api.py
index 3d7d0f516..d4f8b6cc0 100644
--- a/nova/virt/disk/api.py
+++ b/nova/virt/disk/api.py
@@ -258,10 +258,16 @@ class _DiskImage(object):
return bool(self._mounter)
def umount(self):
- """Unmount a disk image from the file system."""
+ """Umount a mount point from the filesystem."""
+ if self._mounter:
+ self._mounter.do_umount()
+ self._mounter = None
+
+ def teardown(self):
+ """Remove a disk image from the file system."""
try:
if self._mounter:
- self._mounter.do_umount()
+ self._mounter.do_teardown()
self._mounter = None
finally:
if self._mkdir:
@@ -336,9 +342,22 @@ def teardown_container(container_dir):
"""
try:
img = _DiskImage(image=None, mount_dir=container_dir)
+ img.teardown()
+ except Exception, exn:
+ LOG.exception(_('Failed to teardown ntainer filesystem: %s'), exn)
+
+
+def clean_lxc_namespace(container_dir):
+ """Clean up the container namespace rootfs mounting one spawned.
+
+ It will umount the mounted names that is mounted
+ but leave the linked deivces alone.
+ """
+ try:
+ img = _DiskImage(image=None, mount_dir=container_dir)
img.umount()
except Exception, exn:
- LOG.exception(_('Failed to unmount container filesystem: %s'), exn)
+ LOG.exception(_('Failed to umount container filesystem: %s'), exn)
def inject_data_into_fs(fs, key, net, metadata, admin_password, files,
@@ -443,6 +462,7 @@ def _inject_key_into_fs(key, fs):
])
_inject_file_into_fs(fs, keyfile, key_data, append=True)
+ fs.set_permissions(keyfile, 0600)
_setup_selinux_for_keys(fs, sshdir)
diff --git a/nova/virt/disk/mount/api.py b/nova/virt/disk/mount/api.py
index 4de9d9c77..1d9d1fc20 100644
--- a/nova/virt/disk/mount/api.py
+++ b/nova/virt/disk/mount/api.py
@@ -218,6 +218,11 @@ class Mount(object):
"""Call the unmnt, unmap and unget operations."""
if self.mounted:
self.unmnt_dev()
+
+ def do_teardown(self):
+ """Call the umnt, unmap, and unget operations."""
+ if self.mounted:
+ self.unmnt_dev()
if self.mapped:
self.unmap_dev()
if self.linked:
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index 747b60714..ba0dfbafe 100644..100755
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -239,7 +239,7 @@ class ComputeDriver(object):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
- def reboot(self, instance, network_info, reboot_type,
+ def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None):
"""Reboot the specified instance.
@@ -254,7 +254,6 @@ class ComputeDriver(object):
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param reboot_type: Either a HARD or SOFT reboot
"""
- # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_console_pool_info(self, console_type):
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 04eeded72..30a5fc758 100644..100755
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -125,11 +125,11 @@ class FakeDriver(driver.ComputeDriver):
self.instances[name] = fake_instance
def snapshot(self, context, instance, name, update_task_state):
- if not instance['name'] in self.instances:
+ if instance['name'] not in self.instances:
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
update_task_state(task_state=task_states.IMAGE_UPLOADING)
- def reboot(self, instance, network_info, reboot_type,
+ def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None):
pass
@@ -209,7 +209,7 @@ class FakeDriver(driver.ComputeDriver):
def attach_volume(self, connection_info, instance, mountpoint):
"""Attach the disk to the instance at mountpoint using info."""
instance_name = instance['name']
- if not instance_name in self._mounts:
+ if instance_name not in self._mounts:
self._mounts[instance_name] = {}
self._mounts[instance_name][mountpoint] = connection_info
return True
diff --git a/nova/virt/hyperv/__init__.py b/nova/virt/hyperv/__init__.py
index e69de29bb..090fc0639 100644
--- a/nova/virt/hyperv/__init__.py
+++ b/nova/virt/hyperv/__init__.py
@@ -0,0 +1,16 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
diff --git a/nova/virt/hyperv/baseops.py b/nova/virt/hyperv/baseops.py
deleted file mode 100644
index 5b617f898..000000000
--- a/nova/virt/hyperv/baseops.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 Cloudbase Solutions Srl
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Management base class for Hyper-V operations.
-"""
-import sys
-
-from nova.openstack.common import log as logging
-
-# Check needed for unit testing on Unix
-if sys.platform == 'win32':
- import wmi
-
-LOG = logging.getLogger(__name__)
-
-
-class BaseOps(object):
- def __init__(self):
- self.__conn = None
- self.__conn_v2 = None
- self.__conn_cimv2 = None
- self.__conn_wmi = None
- self.__conn_storage = None
-
- @property
- def _conn(self):
- if self.__conn is None:
- self.__conn = wmi.WMI(moniker='//./root/virtualization')
- return self.__conn
-
- @property
- def _conn_v2(self):
- if self.__conn_v2 is None:
- self.__conn_v2 = wmi.WMI(moniker='//./root/virtualization/v2')
- return self.__conn_v2
-
- @property
- def _conn_cimv2(self):
- if self.__conn_cimv2 is None:
- self.__conn_cimv2 = wmi.WMI(moniker='//./root/cimv2')
- return self.__conn_cimv2
-
- @property
- def _conn_wmi(self):
- if self.__conn_wmi is None:
- self.__conn_wmi = wmi.WMI(moniker='//./root/wmi')
- return self.__conn_wmi
-
- @property
- def _conn_storage(self):
- if self.__conn_storage is None:
- storage_namespace = '//./Root/Microsoft/Windows/Storage'
- self.__conn_storage = wmi.WMI(moniker=storage_namespace)
- return self.__conn_storage
diff --git a/nova/virt/hyperv/basevolumeutils.py b/nova/virt/hyperv/basevolumeutils.py
index 2352c3bef..5b515a0f1 100644
--- a/nova/virt/hyperv/basevolumeutils.py
+++ b/nova/virt/hyperv/basevolumeutils.py
@@ -1,6 +1,7 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 Pedro Navarro Perez
+# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -20,17 +21,18 @@ Helper methods for operations related to the management of volumes,
and storage repositories
"""
+import abc
import sys
+if sys.platform == 'win32':
+ import _winreg
+ import wmi
+
from nova import block_device
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.virt import driver
-# Check needed for unit testing on Unix
-if sys.platform == 'win32':
- import _winreg
-
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('my_ip', 'nova.netconf')
@@ -38,25 +40,41 @@ CONF.import_opt('my_ip', 'nova.netconf')
class BaseVolumeUtils(object):
- def get_iscsi_initiator(self, cim_conn):
+ def __init__(self):
+ if sys.platform == 'win32':
+ self._conn_wmi = wmi.WMI(moniker='//./root/wmi')
+ self._conn_cimv2 = wmi.WMI(moniker='//./root/cimv2')
+
+ @abc.abstractmethod
+ def login_storage_target(self, target_lun, target_iqn, target_portal):
+ pass
+
+ @abc.abstractmethod
+ def logout_storage_target(self, target_iqn):
+ pass
+
+ @abc.abstractmethod
+ def execute_log_out(self, session_id):
+ pass
+
+ def get_iscsi_initiator(self):
"""Get iscsi initiator name for this machine."""
- computer_system = cim_conn.Win32_ComputerSystem()[0]
+ computer_system = self._conn_cimv2.Win32_ComputerSystem()[0]
hostname = computer_system.name
- keypath = \
- r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\iSCSI\Discovery"
+ keypath = ("SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\"
+ "iSCSI\\Discovery")
try:
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, keypath, 0,
- _winreg.KEY_ALL_ACCESS)
+ _winreg.KEY_ALL_ACCESS)
temp = _winreg.QueryValueEx(key, 'DefaultInitiatorName')
initiator_name = str(temp[0])
_winreg.CloseKey(key)
except Exception:
LOG.info(_("The ISCSI initiator name can't be found. "
- "Choosing the default one"))
- computer_system = cim_conn.Win32_ComputerSystem()[0]
- initiator_name = "iqn.1991-05.com.microsoft:" + \
- hostname.lower()
+ "Choosing the default one"))
+ computer_system = self._conn_cimv2.Win32_ComputerSystem()[0]
+ initiator_name = "iqn.1991-05.com.microsoft:" + hostname.lower()
return {
'ip': CONF.my_ip,
'initiator': initiator_name,
@@ -78,3 +96,33 @@ class BaseVolumeUtils(object):
LOG.debug(_("block_device_list %s"), block_device_list)
return block_device.strip_dev(mount_device) in block_device_list
+
+ def _get_drive_number_from_disk_path(self, disk_path):
+ # TODO(pnavarro) replace with regex
+ start_device_id = disk_path.find('"', disk_path.find('DeviceID'))
+ end_device_id = disk_path.find('"', start_device_id + 1)
+ device_id = disk_path[start_device_id + 1:end_device_id]
+ return device_id[device_id.find("\\") + 2:]
+
+ def get_session_id_from_mounted_disk(self, physical_drive_path):
+ drive_number = self._get_drive_number_from_disk_path(
+ physical_drive_path)
+ initiator_sessions = self._conn_wmi.query("SELECT * FROM "
+ "MSiSCSIInitiator_Session"
+ "Class")
+ for initiator_session in initiator_sessions:
+ devices = initiator_session.Devices
+ for device in devices:
+ device_number = str(device.DeviceNumber)
+ if device_number == drive_number:
+ return initiator_session.SessionId
+
+ def get_device_number_for_target(self, target_iqn, target_lun):
+ initiator_session = self._conn_wmi.query("SELECT * FROM "
+ "MSiSCSIInitiator_Session"
+ "Class WHERE TargetName='%s'"
+ % target_iqn)[0]
+ devices = initiator_session.Devices
+ for device in devices:
+ if device.ScsiLun == target_lun:
+ return device.DeviceNumber
diff --git a/nova/virt/hyperv/constants.py b/nova/virt/hyperv/constants.py
index 4be569e88..31323f0f4 100644
--- a/nova/virt/hyperv/constants.py
+++ b/nova/virt/hyperv/constants.py
@@ -35,15 +35,6 @@ HYPERV_POWER_STATE = {
HYPERV_VM_STATE_SUSPENDED: power_state.SUSPENDED
}
-REQ_POWER_STATE = {
- 'Enabled': HYPERV_VM_STATE_ENABLED,
- 'Disabled': HYPERV_VM_STATE_DISABLED,
- 'Reboot': HYPERV_VM_STATE_REBOOT,
- 'Reset': HYPERV_VM_STATE_RESET,
- 'Paused': HYPERV_VM_STATE_PAUSED,
- 'Suspended': HYPERV_VM_STATE_SUSPENDED,
-}
-
WMI_WIN32_PROCESSOR_ARCHITECTURE = {
0: 'x86',
1: 'MIPS',
diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py
index 9316b2598..4af3b8b05 100644..100755
--- a/nova/virt/hyperv/driver.py
+++ b/nova/virt/hyperv/driver.py
@@ -16,49 +16,7 @@
# under the License.
"""
-A connection to Hyper-V .
-Uses Windows Management Instrumentation (WMI) calls to interact with Hyper-V
-Hyper-V WMI usage:
- http://msdn.microsoft.com/en-us/library/cc723875%28v=VS.85%29.aspx
-The Hyper-V object model briefly:
- The physical computer and its hosted virtual machines are each represented
- by the Msvm_ComputerSystem class.
-
- Each virtual machine is associated with a
- Msvm_VirtualSystemGlobalSettingData (vs_gs_data) instance and one or more
- Msvm_VirtualSystemSettingData (vmsetting) instances. For each vmsetting
- there is a series of Msvm_ResourceAllocationSettingData (rasd) objects.
- The rasd objects describe the settings for each device in a VM.
- Together, the vs_gs_data, vmsettings and rasds describe the configuration
- of the virtual machine.
-
- Creating new resources such as disks and nics involves cloning a default
- rasd object and appropriately modifying the clone and calling the
- AddVirtualSystemResources WMI method
- Changing resources such as memory uses the ModifyVirtualSystemResources
- WMI method
-
-Using the Python WMI library:
- Tutorial:
- http://timgolden.me.uk/python/wmi/tutorial.html
- Hyper-V WMI objects can be retrieved simply by using the class name
- of the WMI object and optionally specifying a column to filter the
- result set. More complex filters can be formed using WQL (sql-like)
- queries.
- The parameters and return tuples of WMI method calls can gleaned by
- examining the doc string. For example:
- >>> vs_man_svc.ModifyVirtualSystemResources.__doc__
- ModifyVirtualSystemResources (ComputerSystem, ResourceSettingData[])
- => (Job, ReturnValue)'
- When passing setting data (ResourceSettingData) to the WMI method,
- an XML representation of the data is passed in using GetText_(1).
- Available methods on a service can be determined using method.keys():
- >>> vs_man_svc.methods.keys()
- vmsettings and rasds for a vm can be retrieved using the 'associators'
- method with the appropriate return class.
- Long running WMI commands generally return a Job (an instance of
- Msvm_ConcreteJob) whose state can be polled to determine when it finishes
-
+A Hyper-V Nova Compute driver.
"""
from nova.openstack.common import log as logging
@@ -84,7 +42,7 @@ class HyperVDriver(driver.ComputeDriver):
self._volumeops)
def init_host(self, host):
- self._host = host
+ pass
def list_instances(self):
return self._vmops.list_instances()
@@ -92,9 +50,9 @@ class HyperVDriver(driver.ComputeDriver):
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
self._vmops.spawn(context, instance, image_meta, injected_files,
- admin_password, network_info, block_device_info)
+ admin_password, network_info, block_device_info)
- def reboot(self, instance, network_info, reboot_type,
+ def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None):
self._vmops.reboot(instance, network_info, reboot_type)
@@ -106,16 +64,12 @@ class HyperVDriver(driver.ComputeDriver):
return self._vmops.get_info(instance)
def attach_volume(self, connection_info, instance, mountpoint):
- """Attach volume storage to VM instance."""
return self._volumeops.attach_volume(connection_info,
- instance['name'],
- mountpoint)
+ instance['name'])
def detach_volume(self, connection_info, instance, mountpoint):
- """Detach volume storage to VM instance."""
return self._volumeops.detach_volume(connection_info,
- instance['name'],
- mountpoint)
+ instance['name'])
def get_volume_connector(self, instance):
return self._volumeops.get_volume_connector(instance)
@@ -151,30 +105,38 @@ class HyperVDriver(driver.ComputeDriver):
self._vmops.power_on(instance)
def live_migration(self, context, instance_ref, dest, post_method,
- recover_method, block_migration=False, migrate_data=None):
+ recover_method, block_migration=False,
+ migrate_data=None):
self._livemigrationops.live_migration(context, instance_ref, dest,
- post_method, recover_method, block_migration, migrate_data)
+ post_method, recover_method,
+ block_migration, migrate_data)
def compare_cpu(self, cpu_info):
return self._livemigrationops.compare_cpu(cpu_info)
def pre_live_migration(self, context, instance, block_device_info,
- network_info, migrate_data=None):
+ network_info, migrate_data=None):
self._livemigrationops.pre_live_migration(context, instance,
- block_device_info, network_info)
+ block_device_info,
+ network_info)
def post_live_migration_at_destination(self, ctxt, instance_ref,
- network_info, block_migration, block_device_info=None):
+ network_info,
+ block_migr=False,
+ block_device_info=None):
self._livemigrationops.post_live_migration_at_destination(ctxt,
- instance_ref, network_info, block_migration)
-
- def check_can_live_migrate_destination(self, ctxt, instance,
- src_compute_info, dst_compute_info,
- block_migration, disk_over_commit):
+ instance_ref,
+ network_info,
+ block_migr)
+
+ def check_can_live_migrate_destination(self, ctxt, instance_ref,
+ src_compute_info, dst_compute_info,
+ block_migration=False,
+ disk_over_commit=False):
pass
def check_can_live_migrate_destination_cleanup(self, ctxt,
- dest_check_data):
+ dest_check_data):
pass
def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):
@@ -188,25 +150,21 @@ class HyperVDriver(driver.ComputeDriver):
def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
LOG.debug(_("ensure_filtering_rules_for_instance called"),
- instance=instance_ref)
+ instance=instance_ref)
def unfilter_instance(self, instance, network_info):
- """Stop filtering instance."""
LOG.debug(_("unfilter_instance called"), instance=instance)
def confirm_migration(self, migration, instance, network_info):
- """Confirms a resize, destroying the source VM."""
LOG.debug(_("confirm_migration called"), instance=instance)
def finish_revert_migration(self, instance, network_info,
block_device_info=None):
- """Finish reverting a resize, powering back on the instance."""
LOG.debug(_("finish_revert_migration called"), instance=instance)
def finish_migration(self, context, migration, instance, disk_info,
- network_info, image_meta, resize_instance=False,
- block_device_info=None):
- """Completes a resize, turning on the migrated instance."""
+ network_info, image_meta, resize_instance=False,
+ block_device_info=None):
LOG.debug(_("finish_migration called"), instance=instance)
def get_console_output(self, instance):
diff --git a/nova/virt/hyperv/hostops.py b/nova/virt/hyperv/hostops.py
index 5cbe46c1c..5a22b60de 100644
--- a/nova/virt/hyperv/hostops.py
+++ b/nova/virt/hyperv/hostops.py
@@ -18,25 +18,23 @@
"""
Management class for host operations.
"""
-import ctypes
-import multiprocessing
import os
import platform
-from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
-from nova.virt.hyperv import baseops
from nova.virt.hyperv import constants
+from nova.virt.hyperv import hostutils
+from nova.virt.hyperv import pathutils
-CONF = cfg.CONF
LOG = logging.getLogger(__name__)
-class HostOps(baseops.BaseOps):
+class HostOps(object):
def __init__(self):
- super(HostOps, self).__init__()
self._stats = None
+ self._hostutils = hostutils.HostUtils()
+ self._pathutils = pathutils.PathUtils()
def _get_cpu_info(self):
"""Get the CPU information.
@@ -44,94 +42,51 @@ class HostOps(baseops.BaseOps):
of the central processor in the hypervisor.
"""
cpu_info = dict()
- processor = self._conn_cimv2.query(
- "SELECT * FROM Win32_Processor WHERE ProcessorType = 3")
- cpu_info['arch'] = constants.WMI_WIN32_PROCESSOR_ARCHITECTURE\
- .get(processor[0].Architecture, 'Unknown')
- cpu_info['model'] = processor[0].Name
- cpu_info['vendor'] = processor[0].Manufacturer
+ processors = self._hostutils.get_cpus_info()
+
+ w32_arch_dict = constants.WMI_WIN32_PROCESSOR_ARCHITECTURE
+ cpu_info['arch'] = w32_arch_dict.get(processors[0]['Architecture'],
+ 'Unknown')
+ cpu_info['model'] = processors[0]['Name']
+ cpu_info['vendor'] = processors[0]['Manufacturer']
topology = dict()
- topology['sockets'] = len(processor)
- topology['cores'] = processor[0].NumberOfCores
- topology['threads'] = processor[0].NumberOfLogicalProcessors\
- / processor[0].NumberOfCores
+ topology['sockets'] = len(processors)
+ topology['cores'] = processors[0]['NumberOfCores']
+ topology['threads'] = (processors[0]['NumberOfLogicalProcessors'] /
+ processors[0]['NumberOfCores'])
cpu_info['topology'] = topology
features = list()
for fkey, fname in constants.PROCESSOR_FEATURE.items():
- if ctypes.windll.kernel32.IsProcessorFeaturePresent(fkey):
+ if self._hostutils.is_cpu_feature_present(fkey):
features.append(fname)
cpu_info['features'] = features
- return jsonutils.dumps(cpu_info)
+ return cpu_info
- def _get_vcpu_total(self):
- """Get vcpu number of physical computer.
- :returns: the number of cpu core.
- """
- # On certain platforms, this will raise a NotImplementedError.
- try:
- return multiprocessing.cpu_count()
- except NotImplementedError:
- LOG.warn(_("Cannot get the number of cpu, because this "
- "function is not implemented for this platform. "
- "This error can be safely ignored for now."))
- return 0
-
- def _get_memory_mb_total(self):
- """Get the total memory size(MB) of physical computer.
- :returns: the total amount of memory(MB).
- """
- total_kb = self._conn_cimv2.query(
- "SELECT TotalVisibleMemorySize FROM win32_operatingsystem")[0]\
- .TotalVisibleMemorySize
- total_mb = long(total_kb) / 1024
- return total_mb
+ def _get_memory_info(self):
+ (total_mem_kb, free_mem_kb) = self._hostutils.get_memory_info()
+ total_mem_mb = total_mem_kb / 1024
+ free_mem_mb = free_mem_kb / 1024
+ return (total_mem_mb, free_mem_mb, total_mem_mb - free_mem_mb)
def _get_local_hdd_info_gb(self):
- """Get the total and used size of the volume containing
- CONF.instances_path expressed in GB.
- :returns:
- A tuple with the total and used space in GB.
- """
- normalized_path = os.path.normpath(CONF.instances_path)
- drive, path = os.path.splitdrive(normalized_path)
- hdd_info = self._conn_cimv2.query(
- ("SELECT FreeSpace,Size FROM win32_logicaldisk WHERE DeviceID='%s'"
- ) % drive)[0]
- total_gb = long(hdd_info.Size) / (1024 ** 3)
- free_gb = long(hdd_info.FreeSpace) / (1024 ** 3)
- used_gb = total_gb - free_gb
- return total_gb, used_gb
+ (drive, _) = os.path.splitdrive(self._pathutils.get_instances_path())
+ (size, free_space) = self._hostutils.get_volume_info(drive)
- def _get_vcpu_used(self):
- """Get vcpu usage number of physical computer.
- :returns: The total number of vcpu that currently used.
- """
- #TODO(jordanrinke) figure out a way to count assigned VCPUs
- total_vcpu = 0
- return total_vcpu
-
- def _get_memory_mb_used(self):
- """Get the free memory size(MB) of physical computer.
- :returns: the total usage of memory(MB).
- """
- total_kb = self._conn_cimv2.query(
- "SELECT FreePhysicalMemory FROM win32_operatingsystem")[0]\
- .FreePhysicalMemory
- total_mb = long(total_kb) / 1024
-
- return total_mb
+ total_gb = size / (1024 ** 3)
+ free_gb = free_space / (1024 ** 3)
+ used_gb = total_gb - free_gb
+ return (total_gb, free_gb, used_gb)
def _get_hypervisor_version(self):
"""Get hypervisor version.
:returns: hypervisor version (ex. 12003)
"""
- version = self._conn_cimv2.Win32_OperatingSystem()[0]\
- .Version.replace('.', '')
- LOG.info(_('Windows version: %s ') % version)
+ version = self._hostutils.get_windows_version().replace('.', '')
+ LOG.debug(_('Windows version: %s ') % version)
return version
def get_available_resource(self):
@@ -143,36 +98,53 @@ class HostOps(baseops.BaseOps):
:returns: dictionary describing resources
"""
- LOG.info(_('get_available_resource called'))
-
- local_gb, used_gb = self._get_local_hdd_info_gb()
- dic = {'vcpus': self._get_vcpu_total(),
- 'memory_mb': self._get_memory_mb_total(),
- 'local_gb': local_gb,
- 'vcpus_used': self._get_vcpu_used(),
- 'memory_mb_used': self._get_memory_mb_used(),
- 'local_gb_used': used_gb,
+ LOG.debug(_('get_available_resource called'))
+
+ (total_mem_mb,
+ free_mem_mb,
+ used_mem_mb) = self._get_memory_info()
+
+ (total_hdd_gb,
+ free_hdd_gb,
+ used_hdd_gb) = self._get_local_hdd_info_gb()
+
+ cpu_info = self._get_cpu_info()
+ cpu_topology = cpu_info['topology']
+ vcpus = (cpu_topology['sockets'] *
+ cpu_topology['cores'] *
+ cpu_topology['threads'])
+
+ dic = {'vcpus': vcpus,
+ 'memory_mb': total_mem_mb,
+ 'memory_mb_used': used_mem_mb,
+ 'local_gb': total_hdd_gb,
+ 'local_gb_used': used_hdd_gb,
'hypervisor_type': "hyperv",
'hypervisor_version': self._get_hypervisor_version(),
'hypervisor_hostname': platform.node(),
- 'cpu_info': self._get_cpu_info()}
+ 'vcpus_used': 0,
+ 'cpu_info': jsonutils.dumps(cpu_info)}
return dic
def _update_stats(self):
LOG.debug(_("Updating host stats"))
+ (total_mem_mb, free_mem_mb, used_mem_mb) = self._get_memory_info()
+ (total_hdd_gb,
+ free_hdd_gb,
+ used_hdd_gb) = self._get_local_hdd_info_gb()
+
data = {}
- data["disk_total"], data["disk_used"] = self._get_local_hdd_info_gb()
- data["disk_available"] = data["disk_total"] - data["disk_used"]
- data["host_memory_total"] = self._get_memory_mb_total()
- data["host_memory_overhead"] = self._get_memory_mb_used()
- data["host_memory_free"] = \
- data["host_memory_total"] - data["host_memory_overhead"]
- data["host_memory_free_computed"] = data["host_memory_free"]
- data["supported_instances"] = \
- [('i686', 'hyperv', 'hvm'),
- ('x86_64', 'hyperv', 'hvm')]
+ data["disk_total"] = total_hdd_gb
+ data["disk_used"] = used_hdd_gb
+ data["disk_available"] = free_hdd_gb
+ data["host_memory_total"] = total_mem_mb
+ data["host_memory_overhead"] = used_mem_mb
+ data["host_memory_free"] = free_mem_mb
+ data["host_memory_free_computed"] = free_mem_mb
+ data["supported_instances"] = [('i686', 'hyperv', 'hvm'),
+ ('x86_64', 'hyperv', 'hvm')]
data["hypervisor_hostname"] = platform.node()
self._stats = data
diff --git a/nova/virt/hyperv/hostutils.py b/nova/virt/hyperv/hostutils.py
new file mode 100644
index 000000000..71f3bc5b2
--- /dev/null
+++ b/nova/virt/hyperv/hostutils.py
@@ -0,0 +1,74 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import ctypes
+import sys
+
+if sys.platform == 'win32':
+ import wmi
+
+from nova.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+class HostUtils(object):
+ def __init__(self):
+ if sys.platform == 'win32':
+ self._conn_cimv2 = wmi.WMI(moniker='//./root/cimv2')
+
+ def get_cpus_info(self):
+ cpus = self._conn_cimv2.query("SELECT * FROM Win32_Processor "
+ "WHERE ProcessorType = 3")
+ cpus_list = []
+ for cpu in cpus:
+ cpu_info = {'Architecture': cpu.Architecture,
+ 'Name': cpu.Name,
+ 'Manufacturer': cpu.Manufacturer,
+ 'NumberOfCores': cpu.NumberOfCores,
+ 'NumberOfLogicalProcessors':
+ cpu.NumberOfLogicalProcessors}
+ cpus_list.append(cpu_info)
+ return cpus_list
+
+ def is_cpu_feature_present(self, feature_key):
+ return ctypes.windll.kernel32.IsProcessorFeaturePresent(feature_key)
+
+ def get_memory_info(self):
+ """
+ Returns a tuple with total visible memory and free physical memory
+ expressed in kB.
+ """
+ mem_info = self._conn_cimv2.query("SELECT TotalVisibleMemorySize, "
+ "FreePhysicalMemory "
+ "FROM win32_operatingsystem")[0]
+ return (long(mem_info.TotalVisibleMemorySize),
+ long(mem_info.FreePhysicalMemory))
+
+ def get_volume_info(self, drive):
+ """
+ Returns a tuple with total size and free space
+ expressed in bytes.
+ """
+ logical_disk = self._conn_cimv2.query("SELECT Size, FreeSpace "
+ "FROM win32_logicaldisk "
+ "WHERE DeviceID='%s'"
+ % drive)[0]
+ return (long(logical_disk.Size), long(logical_disk.FreeSpace))
+
+ def get_windows_version(self):
+ return self._conn_cimv2.Win32_OperatingSystem()[0].Version
diff --git a/nova/virt/hyperv/livemigrationops.py b/nova/virt/hyperv/livemigrationops.py
index 232cbd660..8ee3005f1 100644
--- a/nova/virt/hyperv/livemigrationops.py
+++ b/nova/virt/hyperv/livemigrationops.py
@@ -19,144 +19,66 @@
Management class for live migration VM operations.
"""
import os
-import sys
-from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
-from nova.virt.hyperv import baseops
-from nova.virt.hyperv import constants
+from nova.virt.hyperv import livemigrationutils
+from nova.virt.hyperv import pathutils
from nova.virt.hyperv import vmutils
-
-# Check needed for unit testing on Unix
-if sys.platform == 'win32':
- import wmi
+from nova.virt import images
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('use_cow_images', 'nova.virt.driver')
-class LiveMigrationOps(baseops.BaseOps):
+class LiveMigrationOps(object):
def __init__(self, volumeops):
- super(LiveMigrationOps, self).__init__()
+ self._pathutils = pathutils.PathUtils()
self._vmutils = vmutils.VMUtils()
+ self._livemigrutils = livemigrationutils.LiveMigrationUtils()
self._volumeops = volumeops
- def _check_live_migration_config(self):
- try:
- self._conn_v2
- except Exception:
- raise vmutils.HyperVException(
- _('Live migration is not supported " \
- "by this version of Hyper-V'))
-
- migration_svc = self._conn_v2.Msvm_VirtualSystemMigrationService()[0]
- vsmssd = migration_svc.associators(
- wmi_association_class='Msvm_ElementSettingData',
- wmi_result_class='Msvm_VirtualSystemMigrationServiceSettingData')[0]
- if not vsmssd.EnableVirtualSystemMigration:
- raise vmutils.HyperVException(
- _('Live migration is not enabled on this host'))
- if not migration_svc.MigrationServiceListenerIPAddressList:
- raise vmutils.HyperVException(
- _('Live migration networks are not configured on this host'))
-
def live_migration(self, context, instance_ref, dest, post_method,
- recover_method, block_migration=False, migrate_data=None):
+ recover_method, block_migration=False,
+ migrate_data=None):
LOG.debug(_("live_migration called"), instance=instance_ref)
instance_name = instance_ref["name"]
try:
- self._check_live_migration_config()
-
- vm_name = self._vmutils.lookup(self._conn, instance_name)
- if vm_name is None:
- raise exception.InstanceNotFound(instance=instance_name)
- vm = self._conn_v2.Msvm_ComputerSystem(
- ElementName=instance_name)[0]
- vm_settings = vm.associators(
- wmi_association_class='Msvm_SettingsDefineState',
- wmi_result_class='Msvm_VirtualSystemSettingData')[0]
-
- new_resource_setting_data = []
- sasds = vm_settings.associators(
- wmi_association_class='Msvm_VirtualSystemSettingDataComponent',
- wmi_result_class='Msvm_StorageAllocationSettingData')
- for sasd in sasds:
- if sasd.ResourceType == 31 and \
- sasd.ResourceSubType == \
- "Microsoft:Hyper-V:Virtual Hard Disk":
- #sasd.PoolId = ""
- new_resource_setting_data.append(sasd.GetText_(1))
-
- LOG.debug(_("Getting live migration networks for remote "
- "host: %s"), dest)
- _conn_v2_remote = wmi.WMI(
- moniker='//' + dest + '/root/virtualization/v2')
- migration_svc_remote = \
- _conn_v2_remote.Msvm_VirtualSystemMigrationService()[0]
- remote_ip_address_list = \
- migration_svc_remote.MigrationServiceListenerIPAddressList
-
- # VirtualSystemAndStorage
- vsmsd = self._conn_v2.query("select * from "
- "Msvm_VirtualSystemMigrationSettingData "
- "where MigrationType = 32771")[0]
- vsmsd.DestinationIPAddressList = remote_ip_address_list
- migration_setting_data = vsmsd.GetText_(1)
-
- migration_svc =\
- self._conn_v2.Msvm_VirtualSystemMigrationService()[0]
-
- LOG.debug(_("Starting live migration for instance: %s"),
- instance_name)
- (job_path, ret_val) = migration_svc.MigrateVirtualSystemToHost(
- ComputerSystem=vm.path_(),
- DestinationHost=dest,
- MigrationSettingData=migration_setting_data,
- NewResourceSettingData=new_resource_setting_data)
- if ret_val == constants.WMI_JOB_STATUS_STARTED:
- success = self._vmutils.check_job_status(job_path)
- else:
- success = (ret_val == 0)
- if not success:
- raise vmutils.HyperVException(
- _('Failed to live migrate VM %s') % instance_name)
+ self._livemigrutils.live_migrate_vm(instance_name, dest)
except Exception:
with excutils.save_and_reraise_exception():
LOG.debug(_("Calling live migration recover_method "
- "for instance: %s"), instance_name)
+ "for instance: %s"), instance_name)
recover_method(context, instance_ref, dest, block_migration)
LOG.debug(_("Calling live migration post_method for instance: %s"),
- instance_name)
+ instance_name)
post_method(context, instance_ref, dest, block_migration)
def pre_live_migration(self, context, instance, block_device_info,
- network_info):
+ network_info):
LOG.debug(_("pre_live_migration called"), instance=instance)
- self._check_live_migration_config()
+ self._livemigrutils.check_live_migration_config()
if CONF.use_cow_images:
ebs_root = self._volumeops.volume_in_mapping(
self._volumeops.get_default_root_device(),
block_device_info)
if not ebs_root:
- base_vhd_path = self._vmutils.get_base_vhd_path(
+ base_vhd_path = self._pathutils.get_base_vhd_path(
instance["image_ref"])
if not os.path.exists(base_vhd_path):
- self._vmutils.fetch_image(base_vhd_path, context,
- instance["image_ref"],
- instance["user_id"],
- instance["project_id"])
+ images.fetch(context, instance["image_ref"], base_vhd_path,
+ instance["user_id"], instance["project_id"])
def post_live_migration_at_destination(self, ctxt, instance_ref,
- network_info, block_migration):
+ network_info, block_migration):
LOG.debug(_("post_live_migration_at_destination called"),
- instance=instance_ref)
+ instance=instance_ref)
def compare_cpu(self, cpu_info):
LOG.debug(_("compare_cpu called %s"), cpu_info)
diff --git a/nova/virt/hyperv/livemigrationutils.py b/nova/virt/hyperv/livemigrationutils.py
new file mode 100644
index 000000000..6af4f0fa5
--- /dev/null
+++ b/nova/virt/hyperv/livemigrationutils.py
@@ -0,0 +1,115 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sys
+
+if sys.platform == 'win32':
+ import wmi
+
+from nova.openstack.common import log as logging
+from nova.virt.hyperv import vmutils
+
+LOG = logging.getLogger(__name__)
+
+
+class LiveMigrationUtils(object):
+
+ def __init__(self):
+ self._vmutils = vmutils.VMUtils()
+
+ def _get_conn_v2(self, host='localhost'):
+ try:
+ return wmi.WMI(moniker='//%s/root/virtualization/v2' % host)
+ except wmi.x_wmi as ex:
+ LOG.exception(ex)
+ if ex.com_error.hresult == -2147217394:
+ msg = (_('Live migration is not supported on target host "%s"')
+ % host)
+ elif ex.com_error.hresult == -2147023174:
+ msg = (_('Target live migration host "%s" is unreachable')
+ % host)
+ else:
+ msg = _('Live migration failed: %s') % ex.message
+ raise vmutils.HyperVException(msg)
+
+ def check_live_migration_config(self):
+ conn_v2 = self._get_conn_v2()
+ migration_svc = conn_v2.Msvm_VirtualSystemMigrationService()[0]
+ vsmssds = migration_svc.associators(
+ wmi_association_class='Msvm_ElementSettingData',
+ wmi_result_class='Msvm_VirtualSystemMigrationServiceSettingData')
+ vsmssd = vsmssds[0]
+ if not vsmssd.EnableVirtualSystemMigration:
+ raise vmutils.HyperVException(
+ _('Live migration is not enabled on this host'))
+ if not migration_svc.MigrationServiceListenerIPAddressList:
+ raise vmutils.HyperVException(
+ _('Live migration networks are not configured on this host'))
+
+ def _get_vm(self, conn_v2, vm_name):
+ vms = conn_v2.Msvm_ComputerSystem(ElementName=vm_name)
+ n = len(vms)
+ if not n:
+ raise vmutils.HyperVException(_('VM not found: %s') % vm_name)
+ elif n > 1:
+ raise vmutils.HyperVException(_('Duplicate VM name found: %s')
+ % vm_name)
+ return vms[0]
+
+ def live_migrate_vm(self, vm_name, dest_host):
+ self.check_live_migration_config()
+
+ # We need a v2 namespace VM object
+ conn_v2_local = self._get_conn_v2()
+
+ vm = self._get_vm(conn_v2_local, vm_name)
+ vm_settings = vm.associators(
+ wmi_association_class='Msvm_SettingsDefineState',
+ wmi_result_class='Msvm_VirtualSystemSettingData')[0]
+
+ new_resource_setting_data = []
+ sasds = vm_settings.associators(
+ wmi_association_class='Msvm_VirtualSystemSettingDataComponent',
+ wmi_result_class='Msvm_StorageAllocationSettingData')
+ for sasd in sasds:
+ if (sasd.ResourceType == 31 and sasd.ResourceSubType ==
+ "Microsoft:Hyper-V:Virtual Hard Disk"):
+ #sasd.PoolId = ""
+ new_resource_setting_data.append(sasd.GetText_(1))
+
+ LOG.debug(_("Getting live migration networks for remote host: %s"),
+ dest_host)
+ conn_v2_remote = self._get_conn_v2(dest_host)
+ migr_svc_rmt = conn_v2_remote.Msvm_VirtualSystemMigrationService()[0]
+ rmt_ip_addr_list = migr_svc_rmt.MigrationServiceListenerIPAddressList
+
+ # VirtualSystemAndStorage
+ vsmsd = conn_v2_local.query("select * from "
+ "Msvm_VirtualSystemMigrationSettingData "
+ "where MigrationType = 32771")[0]
+ vsmsd.DestinationIPAddressList = rmt_ip_addr_list
+ migration_setting_data = vsmsd.GetText_(1)
+
+ migr_svc = conn_v2_local.Msvm_VirtualSystemMigrationService()[0]
+
+ LOG.debug(_("Starting live migration for VM: %s"), vm_name)
+ (job_path, ret_val) = migr_svc.MigrateVirtualSystemToHost(
+ ComputerSystem=vm.path_(),
+ DestinationHost=dest_host,
+ MigrationSettingData=migration_setting_data,
+ NewResourceSettingData=new_resource_setting_data)
+ self._vmutils.check_ret_val(ret_val, job_path)
diff --git a/nova/virt/hyperv/networkutils.py b/nova/virt/hyperv/networkutils.py
new file mode 100644
index 000000000..4e1f68685
--- /dev/null
+++ b/nova/virt/hyperv/networkutils.py
@@ -0,0 +1,62 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2013 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Utility class for network related operations.
+"""
+
+import sys
+import uuid
+
+if sys.platform == 'win32':
+ import wmi
+
+from nova.virt.hyperv import vmutils
+
+
+class NetworkUtils(object):
+ def __init__(self):
+ if sys.platform == 'win32':
+ self._conn = wmi.WMI(moniker='//./root/virtualization')
+
+ def get_external_vswitch(self, vswitch_name):
+ if vswitch_name:
+ vswitches = self._conn.Msvm_VirtualSwitch(ElementName=vswitch_name)
+ else:
+ # Find the vswitch that is connected to the first physical nic.
+ ext_port = self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')[0]
+ port = ext_port.associators(wmi_result_class='Msvm_SwitchPort')[0]
+ vswitches = port.associators(wmi_result_class='Msvm_VirtualSwitch')
+
+ if not len(vswitches):
+ raise vmutils.HyperVException(_('vswitch "%s" not found')
+ % vswitch_name)
+ return vswitches[0].path_()
+
+ def create_vswitch_port(self, vswitch_path, port_name):
+ switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0]
+ #Create a port on the vswitch.
+ (new_port, ret_val) = switch_svc.CreateSwitchPort(
+ Name=str(uuid.uuid4()),
+ FriendlyName=port_name,
+ ScopeOfResidence="",
+ VirtualSwitch=vswitch_path)
+ if ret_val != 0:
+ raise vmutils.HyperVException(_("Failed to create vswitch port "
+ "%(port_name)s on switch "
+ "%(vswitch_path)s") % locals())
+ return new_port
diff --git a/nova/virt/hyperv/pathutils.py b/nova/virt/hyperv/pathutils.py
new file mode 100644
index 000000000..7bc2e7ac2
--- /dev/null
+++ b/nova/virt/hyperv/pathutils.py
@@ -0,0 +1,67 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2013 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import shutil
+
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+CONF = cfg.CONF
+CONF.import_opt('instances_path', 'nova.compute.manager')
+
+
+class PathUtils(object):
+ def open(self, path, mode):
+ """Wrapper on __builin__.open used to simplify unit testing."""
+ import __builtin__
+ return __builtin__.open(path, mode)
+
+ def get_instances_path(self):
+ return os.path.normpath(CONF.instances_path)
+
+ def get_instance_path(self, instance_name):
+ instance_path = os.path.join(self.get_instances_path(), instance_name)
+ if not os.path.exists(instance_path):
+ LOG.debug(_('Creating folder %s '), instance_path)
+ os.makedirs(instance_path)
+ return instance_path
+
+ def get_vhd_path(self, instance_name):
+ instance_path = self.get_instance_path(instance_name)
+ return os.path.join(instance_path, instance_name + ".vhd")
+
+ def get_base_vhd_path(self, image_name):
+ base_dir = os.path.join(self.get_instances_path(), '_base')
+ if not os.path.exists(base_dir):
+ os.makedirs(base_dir)
+ return os.path.join(base_dir, image_name + ".vhd")
+
+ def make_export_path(self, instance_name):
+ export_folder = os.path.join(self.get_instances_path(), "export",
+ instance_name)
+ if os.path.isdir(export_folder):
+ LOG.debug(_('Removing existing folder %s '), export_folder)
+ shutil.rmtree(export_folder)
+ LOG.debug(_('Creating folder %s '), export_folder)
+ os.makedirs(export_folder)
+ return export_folder
+
+ def vhd_exists(self, path):
+ return os.path.exists(path)
diff --git a/nova/virt/hyperv/snapshotops.py b/nova/virt/hyperv/snapshotops.py
index cdc6e45a4..c43f59b70 100644
--- a/nova/virt/hyperv/snapshotops.py
+++ b/nova/virt/hyperv/snapshotops.py
@@ -20,173 +20,97 @@ Management class for VM snapshot operations.
"""
import os
import shutil
-import sys
from nova.compute import task_states
-from nova import exception
from nova.image import glance
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
-from nova.virt.hyperv import baseops
-from nova.virt.hyperv import constants
-from nova.virt.hyperv import ioutils
+from nova.virt.hyperv import pathutils
+from nova.virt.hyperv import vhdutils
from nova.virt.hyperv import vmutils
-from xml.etree import ElementTree
-
-# Check needed for unit testing on Unix
-if sys.platform == 'win32':
- import wmi
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
-class SnapshotOps(baseops.BaseOps):
+class SnapshotOps(object):
def __init__(self):
- super(SnapshotOps, self).__init__()
+ self._pathutils = pathutils.PathUtils()
self._vmutils = vmutils.VMUtils()
+ self._vhdutils = vhdutils.VHDUtils()
+
+ def _save_glance_image(self, context, name, image_vhd_path):
+ (glance_image_service,
+ image_id) = glance.get_remote_image_service(context, name)
+ image_metadata = {"is_public": False,
+ "disk_format": "vhd",
+ "container_format": "bare",
+ "properties": {}}
+ with self._pathutils.open(image_vhd_path, 'rb') as f:
+ glance_image_service.update(context, image_id, image_metadata, f)
def snapshot(self, context, instance, name, update_task_state):
"""Create snapshot from a running VM instance."""
instance_name = instance["name"]
- vm = self._vmutils.lookup(self._conn, instance_name)
- if vm is None:
- raise exception.InstanceNotFound(instance=instance_name)
- vm = self._conn.Msvm_ComputerSystem(ElementName=instance_name)[0]
- vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
LOG.debug(_("Creating snapshot for instance %s"), instance_name)
- (job_path, ret_val, snap_setting_data) = \
- vs_man_svc.CreateVirtualSystemSnapshot(vm.path_())
- if ret_val == constants.WMI_JOB_STATUS_STARTED:
- success = self._vmutils.check_job_status(job_path)
- if success:
- job_wmi_path = job_path.replace('\\', '/')
- job = wmi.WMI(moniker=job_wmi_path)
- snap_setting_data = job.associators(
- wmi_result_class='Msvm_VirtualSystemSettingData')[0]
- else:
- success = (ret_val == 0)
- if not success:
- raise vmutils.HyperVException(
- _('Failed to create snapshot for VM %s') %
- instance_name)
- else:
- update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
+ snapshot_path = self._vmutils.take_vm_snapshot(instance_name)
+ update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
export_folder = None
- f = None
try:
- src_vhd_path = os.path.join(CONF.instances_path, instance_name,
- instance_name + ".vhd")
-
- image_man_svc = self._conn.Msvm_ImageManagementService()[0]
+ src_vhd_path = self._pathutils.get_vhd_path(instance_name)
LOG.debug(_("Getting info for VHD %s"), src_vhd_path)
- (src_vhd_info, job_path, ret_val) = \
- image_man_svc.GetVirtualHardDiskInfo(src_vhd_path)
- if ret_val == constants.WMI_JOB_STATUS_STARTED:
- success = self._vmutils.check_job_status(job_path)
- else:
- success = (ret_val == 0)
- if not success:
- raise vmutils.HyperVException(
- _("Failed to get info for disk %s") %
- (src_vhd_path))
+ src_base_disk_path = self._vhdutils.get_vhd_parent_path(
+ src_vhd_path)
- src_base_disk_path = None
- et = ElementTree.fromstring(src_vhd_info)
- for item in et.findall("PROPERTY"):
- if item.attrib["NAME"] == "ParentPath":
- src_base_disk_path = item.find("VALUE").text
- break
-
- export_folder = self._vmutils.make_export_path(instance_name)
+ export_folder = self._pathutils.make_export_path(instance_name)
dest_vhd_path = os.path.join(export_folder, os.path.basename(
src_vhd_path))
LOG.debug(_('Copying VHD %(src_vhd_path)s to %(dest_vhd_path)s'),
- locals())
+ locals())
shutil.copyfile(src_vhd_path, dest_vhd_path)
image_vhd_path = None
if not src_base_disk_path:
image_vhd_path = dest_vhd_path
else:
- dest_base_disk_path = os.path.join(export_folder,
- os.path.basename(src_base_disk_path))
+ basename = os.path.basename(src_base_disk_path)
+ dest_base_disk_path = os.path.join(export_folder, basename)
LOG.debug(_('Copying base disk %(src_vhd_path)s to '
- '%(dest_base_disk_path)s'), locals())
+ '%(dest_base_disk_path)s'), locals())
shutil.copyfile(src_base_disk_path, dest_base_disk_path)
LOG.debug(_("Reconnecting copied base VHD "
- "%(dest_base_disk_path)s and diff VHD %(dest_vhd_path)s"),
- locals())
- (job_path, ret_val) = \
- image_man_svc.ReconnectParentVirtualHardDisk(
- ChildPath=dest_vhd_path,
- ParentPath=dest_base_disk_path,
- Force=True)
- if ret_val == constants.WMI_JOB_STATUS_STARTED:
- success = self._vmutils.check_job_status(job_path)
- else:
- success = (ret_val == 0)
- if not success:
- raise vmutils.HyperVException(
- _("Failed to reconnect base disk "
- "%(dest_base_disk_path)s and diff disk "
- "%(dest_vhd_path)s") %
- locals())
+ "%(dest_base_disk_path)s and diff "
+ "VHD %(dest_vhd_path)s"), locals())
+ self._vhdutils.reconnect_parent_vhd(dest_vhd_path,
+ dest_base_disk_path)
LOG.debug(_("Merging base disk %(dest_base_disk_path)s and "
- "diff disk %(dest_vhd_path)s"),
- locals())
- (job_path, ret_val) = image_man_svc.MergeVirtualHardDisk(
- SourcePath=dest_vhd_path,
- DestinationPath=dest_base_disk_path)
- if ret_val == constants.WMI_JOB_STATUS_STARTED:
- success = self._vmutils.check_job_status(job_path)
- else:
- success = (ret_val == 0)
- if not success:
- raise vmutils.HyperVException(
- _("Failed to merge base disk %(dest_base_disk_path)s "
- "and diff disk %(dest_vhd_path)s") %
- locals())
+ "diff disk %(dest_vhd_path)s"), locals())
+ self._vhdutils.merge_vhd(dest_vhd_path, dest_base_disk_path)
image_vhd_path = dest_base_disk_path
- (glance_image_service, image_id) = \
- glance.get_remote_image_service(context, name)
- image_metadata = {"is_public": False,
- "disk_format": "vhd",
- "container_format": "bare",
- "properties": {}}
- f = ioutils.open(image_vhd_path, 'rb')
- LOG.debug(
- _("Updating Glance image %(image_id)s with content from "
- "merged disk %(image_vhd_path)s"),
- locals())
+ LOG.debug(_("Updating Glance image %(image_id)s with content from "
+ "merged disk %(image_vhd_path)s"), locals())
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
- glance_image_service.update(context, image_id, image_metadata, f)
+ self._save_glance_image(context, name, image_vhd_path)
LOG.debug(_("Snapshot image %(image_id)s updated for VM "
- "%(instance_name)s"), locals())
+ "%(instance_name)s"), locals())
finally:
- LOG.debug(_("Removing snapshot %s"), name)
- (job_path, ret_val) = vs_man_svc.RemoveVirtualSystemSnapshot(
- snap_setting_data.path_())
- if ret_val == constants.WMI_JOB_STATUS_STARTED:
- success = self._vmutils.check_job_status(job_path)
- else:
- success = (ret_val == 0)
- if not success:
- raise vmutils.HyperVException(
- _('Failed to remove snapshot for VM %s') %
- instance_name)
- if f:
- f.close()
+ try:
+ LOG.debug(_("Removing snapshot %s"), name)
+ self._vmutils.remove_vm_snapshot(snapshot_path)
+ except Exception as ex:
+ LOG.exception(ex)
+ LOG.warning(_('Failed to remove snapshot for VM %s')
+ % instance_name)
if export_folder:
LOG.debug(_('Removing folder %s '), export_folder)
shutil.rmtree(export_folder)
diff --git a/nova/virt/hyperv/vhdutils.py b/nova/virt/hyperv/vhdutils.py
new file mode 100644
index 000000000..21c4b4a6d
--- /dev/null
+++ b/nova/virt/hyperv/vhdutils.py
@@ -0,0 +1,72 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sys
+
+if sys.platform == 'win32':
+ import wmi
+
+from nova.virt.hyperv import vmutils
+from xml.etree import ElementTree
+
+
+class VHDUtils(object):
+
+ def __init__(self):
+ self._vmutils = vmutils.VMUtils()
+ if sys.platform == 'win32':
+ self._conn = wmi.WMI(moniker='//./root/virtualization')
+
+ def create_differencing_vhd(self, path, parent_path):
+ image_man_svc = self._conn.Msvm_ImageManagementService()[0]
+
+ (job_path, ret_val) = image_man_svc.CreateDifferencingVirtualHardDisk(
+ Path=path, ParentPath=parent_path)
+ self._vmutils.check_ret_val(ret_val, job_path)
+
+ def reconnect_parent_vhd(self, child_vhd_path, parent_vhd_path):
+ image_man_svc = self._conn.Msvm_ImageManagementService()[0]
+
+ (job_path, ret_val) = image_man_svc.ReconnectParentVirtualHardDisk(
+ ChildPath=child_vhd_path,
+ ParentPath=parent_vhd_path,
+ Force=True)
+ self._vmutils.check_ret_val(ret_val, job_path)
+
+ def merge_vhd(self, src_vhd_path, dest_vhd_path):
+ image_man_svc = self._conn.Msvm_ImageManagementService()[0]
+
+ (job_path, ret_val) = image_man_svc.MergeVirtualHardDisk(
+ SourcePath=src_vhd_path,
+ DestinationPath=dest_vhd_path)
+ self._vmutils.check_ret_val(ret_val, job_path)
+
+ def get_vhd_parent_path(self, vhd_path):
+ image_man_svc = self._conn.Msvm_ImageManagementService()[0]
+
+ (vhd_info,
+ job_path,
+ ret_val) = image_man_svc.GetVirtualHardDiskInfo(vhd_path)
+ self._vmutils.check_ret_val(ret_val, job_path)
+
+ base_disk_path = None
+ et = ElementTree.fromstring(vhd_info)
+ for item in et.findall("PROPERTY"):
+ if item.attrib["NAME"] == "ParentPath":
+ base_disk_path = item.find("VALUE").text
+ break
+ return base_disk_path
diff --git a/nova/virt/hyperv/vif.py b/nova/virt/hyperv/vif.py
index e01006eaa..cfe7c6a4c 100644
--- a/nova/virt/hyperv/vif.py
+++ b/nova/virt/hyperv/vif.py
@@ -15,17 +15,12 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-import abc
-import sys
-import uuid
-
-# Check needed for unit testing on Unix
-if sys.platform == 'win32':
- import wmi
+import abc
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
+from nova.virt.hyperv import networkutils
from nova.virt.hyperv import vmutils
@@ -70,65 +65,17 @@ class HyperVNovaNetworkVIFDriver(HyperVBaseVIFDriver):
def __init__(self):
self._vmutils = vmutils.VMUtils()
- self._conn = wmi.WMI(moniker='//./root/virtualization')
-
- def _find_external_network(self):
- """Find the vswitch that is connected to the physical nic.
- Assumes only one physical nic on the host
- """
- #If there are no physical nics connected to networks, return.
- LOG.debug(_("Attempting to bind NIC to %s ")
- % CONF.vswitch_name)
- if CONF.vswitch_name:
- LOG.debug(_("Attempting to bind NIC to %s ")
- % CONF.vswitch_name)
- bound = self._conn.Msvm_VirtualSwitch(
- ElementName=CONF.vswitch_name)
- else:
- LOG.debug(_("No vSwitch specified, attaching to default"))
- self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')
- if len(bound) == 0:
- return None
- if CONF.vswitch_name:
- return self._conn.Msvm_VirtualSwitch(
- ElementName=CONF.vswitch_name)[0]\
- .associators(wmi_result_class='Msvm_SwitchPort')[0]\
- .associators(wmi_result_class='Msvm_VirtualSwitch')[0]
- else:
- return self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')\
- .associators(wmi_result_class='Msvm_SwitchPort')[0]\
- .associators(wmi_result_class='Msvm_VirtualSwitch')[0]
+ self._netutils = networkutils.NetworkUtils()
def plug(self, instance, vif):
- extswitch = self._find_external_network()
- if extswitch is None:
- raise vmutils.HyperVException(_('Cannot find vSwitch'))
+ vswitch_path = self._netutils.get_external_vswitch(
+ CONF.vswitch_name)
vm_name = instance['name']
-
- nic_data = self._conn.Msvm_SyntheticEthernetPortSettingData(
- ElementName=vif['id'])[0]
-
- switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0]
- #Create a port on the vswitch.
- (new_port, ret_val) = switch_svc.CreateSwitchPort(
- Name=str(uuid.uuid4()),
- FriendlyName=vm_name,
- ScopeOfResidence="",
- VirtualSwitch=extswitch.path_())
- if ret_val != 0:
- LOG.error(_('Failed creating a port on the external vswitch'))
- raise vmutils.HyperVException(_('Failed creating port for %s') %
- vm_name)
- ext_path = extswitch.path_()
- LOG.debug(_("Created switch port %(vm_name)s on switch %(ext_path)s")
- % locals())
-
- vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
- vm = vms[0]
-
- nic_data.Connection = [new_port]
- self._vmutils.modify_virt_resource(self._conn, nic_data, vm)
+ LOG.debug(_('Creating vswitch port for instance: %s') % vm_name)
+ vswitch_port = self._netutils.create_vswitch_port(vswitch_path,
+ vm_name)
+ self._vmutils.set_nic_connection(vm_name, vif['id'], vswitch_port)
def unplug(self, instance, vif):
#TODO(alepilotti) Not implemented
diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py
index 3d8958266..58c1fc66a 100644
--- a/nova/virt/hyperv/vmops.py
+++ b/nova/virt/hyperv/vmops.py
@@ -1,5 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# Copyright (c) 2010 Cloud.com, Inc
# Copyright 2012 Cloudbase Solutions Srl
# All Rights Reserved.
#
@@ -19,7 +20,6 @@
Management class for basic VM operations.
"""
import os
-import uuid
from nova.api.metadata import base as instance_metadata
from nova import exception
@@ -29,29 +29,31 @@ from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt import configdrive
-from nova.virt.hyperv import baseops
from nova.virt.hyperv import constants
+from nova.virt.hyperv import pathutils
+from nova.virt.hyperv import vhdutils
from nova.virt.hyperv import vmutils
+from nova.virt import images
LOG = logging.getLogger(__name__)
hyperv_opts = [
cfg.BoolOpt('limit_cpu_features',
- default=False,
- help='Required for live migration among '
- 'hosts with different CPU features'),
+ default=False,
+ help='Required for live migration among '
+ 'hosts with different CPU features'),
cfg.BoolOpt('config_drive_inject_password',
- default=False,
- help='Sets the admin password in the config drive image'),
+ default=False,
+ help='Sets the admin password in the config drive image'),
cfg.StrOpt('qemu_img_cmd',
default="qemu-img.exe",
help='qemu-img is used to convert between '
'different image types'),
cfg.BoolOpt('config_drive_cdrom',
- default=False,
- help='Attaches the Config Drive image as a cdrom drive '
- 'instead of a disk drive')
- ]
+ default=False,
+ help='Attaches the Config Drive image as a cdrom drive '
+ 'instead of a disk drive')
+]
CONF = cfg.CONF
CONF.register_opts(hyperv_opts)
@@ -59,19 +61,20 @@ CONF.import_opt('use_cow_images', 'nova.virt.driver')
CONF.import_opt('network_api_class', 'nova.network')
-class VMOps(baseops.BaseOps):
+class VMOps(object):
_vif_driver_class_map = {
'nova.network.quantumv2.api.API':
- 'nova.virt.hyperv.vif.HyperVQuantumVIFDriver',
+ 'nova.virt.hyperv.vif.HyperVQuantumVIFDriver',
'nova.network.api.API':
- 'nova.virt.hyperv.vif.HyperVNovaNetworkVIFDriver',
+ 'nova.virt.hyperv.vif.HyperVNovaNetworkVIFDriver',
}
def __init__(self, volumeops):
- super(VMOps, self).__init__()
-
self._vmutils = vmutils.VMUtils()
+ self._vhdutils = vhdutils.VHDUtils()
+ self._pathutils = pathutils.PathUtils()
self._volumeops = volumeops
+ self._vif_driver = None
self._load_vif_driver_class()
def _load_vif_driver_class(self):
@@ -84,124 +87,106 @@ class VMOps(baseops.BaseOps):
CONF.network_api_class)
def list_instances(self):
- """Return the names of all the instances known to Hyper-V."""
- vms = [v.ElementName
- for v in self._conn.Msvm_ComputerSystem(['ElementName'],
- Caption="Virtual Machine")]
- return vms
+ return self._vmutils.list_instances()
def get_info(self, instance):
"""Get information about the VM."""
LOG.debug(_("get_info called for instance"), instance=instance)
- return self._get_info(instance['name'])
-
- def _get_info(self, instance_name):
- vm = self._vmutils.lookup(self._conn, instance_name)
- if vm is None:
- raise exception.InstanceNotFound(instance=instance_name)
- vm = self._conn.Msvm_ComputerSystem(
- ElementName=instance_name)[0]
- vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
- vmsettings = vm.associators(
- wmi_association_class='Msvm_SettingsDefineState',
- wmi_result_class='Msvm_VirtualSystemSettingData')
- settings_paths = [v.path_() for v in vmsettings]
- #See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx
- summary_info = vs_man_svc.GetSummaryInformation(
- [constants.VM_SUMMARY_NUM_PROCS,
- constants.VM_SUMMARY_ENABLED_STATE,
- constants.VM_SUMMARY_MEMORY_USAGE,
- constants.VM_SUMMARY_UPTIME],
- settings_paths)[1]
- info = summary_info[0]
-
- LOG.debug(_("hyperv vm state: %s"), info.EnabledState)
- state = constants.HYPERV_POWER_STATE[info.EnabledState]
- memusage = str(info.MemoryUsage)
- numprocs = str(info.NumberOfProcessors)
- uptime = str(info.UpTime)
-
- LOG.debug(_("Got Info for vm %(instance_name)s: state=%(state)d,"
- " mem=%(memusage)s, num_cpu=%(numprocs)s,"
- " uptime=%(uptime)s"), locals())
+ instance_name = instance['name']
+ if not self._vmutils.vm_exists(instance_name):
+ raise exception.InstanceNotFound(instance=instance)
+
+ info = self._vmutils.get_vm_summary_info(instance_name)
+
+ state = constants.HYPERV_POWER_STATE[info['EnabledState']]
return {'state': state,
- 'max_mem': info.MemoryUsage,
- 'mem': info.MemoryUsage,
- 'num_cpu': info.NumberOfProcessors,
- 'cpu_time': info.UpTime}
+ 'max_mem': info['MemoryUsage'],
+ 'mem': info['MemoryUsage'],
+ 'num_cpu': info['NumberOfProcessors'],
+ 'cpu_time': info['UpTime']}
def spawn(self, context, instance, image_meta, injected_files,
- admin_password, network_info, block_device_info=None):
+ admin_password, network_info, block_device_info=None):
"""Create a new VM and start it."""
- vm = self._vmutils.lookup(self._conn, instance['name'])
- if vm is not None:
- raise exception.InstanceExists(name=instance['name'])
+
+ instance_name = instance['name']
+ if self._vmutils.vm_exists(instance_name):
+ raise exception.InstanceExists(name=instance_name)
ebs_root = self._volumeops.volume_in_mapping(
self._volumeops.get_default_root_device(),
- block_device_info)
+ block_device_info)
#If is not a boot from volume spawn
if not (ebs_root):
#Fetch the file, assume it is a VHD file.
- vhdfile = self._vmutils.get_vhd_path(instance['name'])
+ vhdfile = self._pathutils.get_vhd_path(instance_name)
try:
- self._cache_image(fn=self._vmutils.fetch_image,
- context=context,
- target=vhdfile,
- fname=instance['image_ref'],
- image_id=instance['image_ref'],
- user=instance['user_id'],
- project=instance['project_id'],
- cow=CONF.use_cow_images)
+ self._cache_image(fn=self._fetch_image,
+ context=context,
+ target=vhdfile,
+ fname=instance['image_ref'],
+ image_id=instance['image_ref'],
+ user=instance['user_id'],
+ project=instance['project_id'],
+ cow=CONF.use_cow_images)
except Exception as exn:
LOG.exception(_('cache image failed: %s'), exn)
- self.destroy(instance)
+ raise
try:
- self._create_vm(instance)
+ self._vmutils.create_vm(instance_name,
+ instance['memory_mb'],
+ instance['vcpus'],
+ CONF.limit_cpu_features)
if not ebs_root:
- self._attach_ide_drive(instance['name'], vhdfile, 0, 0,
- constants.IDE_DISK)
+ self._vmutils.attach_ide_drive(instance_name,
+ vhdfile,
+ 0,
+ 0,
+ constants.IDE_DISK)
else:
self._volumeops.attach_boot_volume(block_device_info,
- instance['name'])
+ instance_name)
- #A SCSI controller for volumes connection is created
- self._create_scsi_controller(instance['name'])
+ self._vmutils.create_scsi_controller(instance_name)
for vif in network_info:
- self._create_nic(instance['name'], vif)
+ LOG.debug(_('Creating nic for instance: %s'), instance_name)
+ self._vmutils.create_nic(instance_name,
+ vif['id'],
+ vif['address'])
self._vif_driver.plug(instance, vif)
if configdrive.required_by(instance):
self._create_config_drive(instance, injected_files,
- admin_password)
+ admin_password)
- LOG.debug(_('Starting VM %s '), instance['name'])
- self._set_vm_state(instance['name'], 'Enabled')
- LOG.info(_('Started VM %s '), instance['name'])
- except Exception as exn:
- LOG.exception(_('spawn vm failed: %s'), exn)
+ self._set_vm_state(instance_name,
+ constants.HYPERV_VM_STATE_ENABLED)
+ except Exception as ex:
+ LOG.exception(ex)
self.destroy(instance)
- raise exn
+ raise vmutils.HyperVException(_('Spawn instance failed'))
def _create_config_drive(self, instance, injected_files, admin_password):
if CONF.config_drive_format != 'iso9660':
vmutils.HyperVException(_('Invalid config_drive_format "%s"') %
- CONF.config_drive_format)
+ CONF.config_drive_format)
+
+ LOG.info(_('Using config drive for instance: %s'), instance=instance)
- LOG.info(_('Using config drive'), instance=instance)
extra_md = {}
if admin_password and CONF.config_drive_inject_password:
extra_md['admin_pass'] = admin_password
inst_md = instance_metadata.InstanceMetadata(instance,
- content=injected_files, extra_md=extra_md)
+ content=injected_files,
+ extra_md=extra_md)
- instance_path = self._vmutils.get_instance_path(
+ instance_path = self._pathutils.get_instance_path(
instance['name'])
configdrive_path_iso = os.path.join(instance_path, 'configdrive.iso')
LOG.info(_('Creating config drive at %(path)s'),
@@ -218,7 +203,7 @@ class VMOps(baseops.BaseOps):
if not CONF.config_drive_cdrom:
drive_type = constants.IDE_DISK
configdrive_path = os.path.join(instance_path,
- 'configdrive.vhd')
+ 'configdrive.vhd')
utils.execute(CONF.qemu_img_cmd,
'convert',
'-f',
@@ -233,302 +218,88 @@ class VMOps(baseops.BaseOps):
drive_type = constants.IDE_DVD
configdrive_path = configdrive_path_iso
- self._attach_ide_drive(instance['name'], configdrive_path, 1, 0,
- drive_type)
-
- def _create_vm(self, instance):
- """Create a VM but don't start it."""
- vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
+ self._vmutils.attach_ide_drive(instance['name'], configdrive_path,
+ 1, 0, drive_type)
- vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new()
- vs_gs_data.ElementName = instance["name"]
- (job, ret_val) = vs_man_svc.DefineVirtualSystem(
- [], None, vs_gs_data.GetText_(1))[1:]
- if ret_val == constants.WMI_JOB_STATUS_STARTED:
- success = self._vmutils.check_job_status(job)
- else:
- success = (ret_val == 0)
-
- if not success:
- raise vmutils.HyperVException(_('Failed to create VM %s') %
- instance["name"])
-
- LOG.debug(_('Created VM %s...'), instance["name"])
- vm = self._conn.Msvm_ComputerSystem(ElementName=instance["name"])[0]
-
- vmsettings = vm.associators(
- wmi_result_class='Msvm_VirtualSystemSettingData')
- vmsetting = [s for s in vmsettings
- if s.SettingType == 3][0] # avoid snapshots
- memsetting = vmsetting.associators(
- wmi_result_class='Msvm_MemorySettingData')[0]
- #No Dynamic Memory, so reservation, limit and quantity are identical.
- mem = long(str(instance['memory_mb']))
- memsetting.VirtualQuantity = mem
- memsetting.Reservation = mem
- memsetting.Limit = mem
-
- (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
- vm.path_(), [memsetting.GetText_(1)])
- LOG.debug(_('Set memory for vm %s...'), instance["name"])
- procsetting = vmsetting.associators(
- wmi_result_class='Msvm_ProcessorSettingData')[0]
- vcpus = long(instance['vcpus'])
- procsetting.VirtualQuantity = vcpus
- procsetting.Reservation = vcpus
- procsetting.Limit = 100000 # static assignment to 100%
-
- if CONF.limit_cpu_features:
- procsetting.LimitProcessorFeatures = True
-
- (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
- vm.path_(), [procsetting.GetText_(1)])
- LOG.debug(_('Set vcpus for vm %s...'), instance["name"])
-
- def _create_scsi_controller(self, vm_name):
- """Create an iscsi controller ready to mount volumes."""
- LOG.debug(_('Creating a scsi controller for %(vm_name)s for volume '
- 'attaching') % locals())
- vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
- vm = vms[0]
- scsicontrldefault = self._conn.query(
- "SELECT * FROM Msvm_ResourceAllocationSettingData \
- WHERE ResourceSubType = 'Microsoft Synthetic SCSI Controller'\
- AND InstanceID LIKE '%Default%'")[0]
- if scsicontrldefault is None:
- raise vmutils.HyperVException(_('Controller not found'))
- scsicontrl = self._vmutils.clone_wmi_obj(self._conn,
- 'Msvm_ResourceAllocationSettingData', scsicontrldefault)
- scsicontrl.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
- scsiresource = self._vmutils.add_virt_resource(self._conn,
- scsicontrl, vm)
- if scsiresource is None:
- raise vmutils.HyperVException(
- _('Failed to add scsi controller to VM %s') %
- vm_name)
-
- def _get_ide_controller(self, vm, ctrller_addr):
- #Find the IDE controller for the vm.
- vmsettings = vm.associators(
- wmi_result_class='Msvm_VirtualSystemSettingData')
- rasds = vmsettings[0].associators(
- wmi_result_class='MSVM_ResourceAllocationSettingData')
- ctrller = [r for r in rasds
- if r.ResourceSubType == 'Microsoft Emulated IDE Controller'
- and r.Address == str(ctrller_addr)]
- return ctrller
-
- def _attach_ide_drive(self, vm_name, path, ctrller_addr, drive_addr,
- drive_type=constants.IDE_DISK):
- """Create an IDE drive and attach it to the vm."""
- LOG.debug(_('Creating disk for %(vm_name)s by attaching'
- ' disk file %(path)s') % locals())
-
- vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
- vm = vms[0]
-
- ctrller = self._get_ide_controller(vm, ctrller_addr)
-
- if drive_type == constants.IDE_DISK:
- resSubType = 'Microsoft Synthetic Disk Drive'
- elif drive_type == constants.IDE_DVD:
- resSubType = 'Microsoft Synthetic DVD Drive'
-
- #Find the default disk drive object for the vm and clone it.
- drivedflt = self._conn.query(
- "SELECT * FROM Msvm_ResourceAllocationSettingData \
- WHERE ResourceSubType LIKE '%(resSubType)s'\
- AND InstanceID LIKE '%%Default%%'" % locals())[0]
- drive = self._vmutils.clone_wmi_obj(self._conn,
- 'Msvm_ResourceAllocationSettingData', drivedflt)
- #Set the IDE ctrller as parent.
- drive.Parent = ctrller[0].path_()
- drive.Address = drive_addr
- #Add the cloned disk drive object to the vm.
- new_resources = self._vmutils.add_virt_resource(self._conn,
- drive, vm)
- if new_resources is None:
- raise vmutils.HyperVException(
- _('Failed to add drive to VM %s') %
- vm_name)
- drive_path = new_resources[0]
- LOG.debug(_('New %(drive_type)s drive path is %(drive_path)s') %
- locals())
-
- if drive_type == constants.IDE_DISK:
- resSubType = 'Microsoft Virtual Hard Disk'
- elif drive_type == constants.IDE_DVD:
- resSubType = 'Microsoft Virtual CD/DVD Disk'
-
- #Find the default VHD disk object.
- drivedefault = self._conn.query(
- "SELECT * FROM Msvm_ResourceAllocationSettingData \
- WHERE ResourceSubType LIKE '%(resSubType)s' AND \
- InstanceID LIKE '%%Default%%' " % locals())[0]
-
- #Clone the default and point it to the image file.
- res = self._vmutils.clone_wmi_obj(self._conn,
- 'Msvm_ResourceAllocationSettingData', drivedefault)
- #Set the new drive as the parent.
- res.Parent = drive_path
- res.Connection = [path]
-
- #Add the new vhd object as a virtual hard disk to the vm.
- new_resources = self._vmutils.add_virt_resource(self._conn, res, vm)
- if new_resources is None:
- raise vmutils.HyperVException(
- _('Failed to add %(drive_type)s image to VM %(vm_name)s') %
- locals())
- LOG.info(_('Created drive type %(drive_type)s for %(vm_name)s') %
- locals())
-
- def _create_nic(self, vm_name, vif):
- """Create a (synthetic) nic and attach it to the vm."""
- LOG.debug(_('Creating nic for %s '), vm_name)
-
- #Create a new nic
- syntheticnics_data = self._conn.Msvm_SyntheticEthernetPortSettingData()
- default_nic_data = [n for n in syntheticnics_data
- if n.InstanceID.rfind('Default') > 0]
- new_nic_data = self._vmutils.clone_wmi_obj(self._conn,
- 'Msvm_SyntheticEthernetPortSettingData',
- default_nic_data[0])
-
- #Configure the nic
- new_nic_data.ElementName = vif['id']
- new_nic_data.Address = vif['address'].replace(':', '')
- new_nic_data.StaticMacAddress = 'True'
- new_nic_data.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
-
- #Add the new nic to the vm
- vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
- vm = vms[0]
-
- new_resources = self._vmutils.add_virt_resource(self._conn,
- new_nic_data, vm)
- if new_resources is None:
- raise vmutils.HyperVException(_('Failed to add nic to VM %s') %
- vm_name)
- LOG.info(_("Created nic for %s "), vm_name)
+ def destroy(self, instance, network_info=None, cleanup=True,
+ destroy_disks=True):
+ instance_name = instance['name']
+ LOG.debug(_("Got request to destroy instance: %s"), instance_name)
+ try:
+ if self._vmutils.vm_exists(instance_name):
+ volumes_drives_list = self._vmutils.destroy_vm(instance_name,
+ destroy_disks)
+ #Disconnect volumes
+ for volume_drive in volumes_drives_list:
+ self._volumeops.disconnect_volume(volume_drive)
+ else:
+ LOG.debug(_("Instance not found: %s"), instance_name)
+ except Exception as ex:
+ LOG.exception(ex)
+ raise vmutils.HyperVException(_('Failed to destroy instance: %s') %
+ instance_name)
def reboot(self, instance, network_info, reboot_type):
"""Reboot the specified instance."""
- vm = self._vmutils.lookup(self._conn, instance['name'])
- if vm is None:
- raise exception.InstanceNotFound(instance_id=instance["id"])
- self._set_vm_state(instance['name'], 'Reboot')
-
- def destroy(self, instance, network_info=None, cleanup=True,
- destroy_disks=True):
- """Destroy the VM. Also destroy the associated VHD disk files."""
- LOG.debug(_("Got request to destroy vm %s"), instance['name'])
- vm = self._vmutils.lookup(self._conn, instance['name'])
- if vm is None:
- return
- vm = self._conn.Msvm_ComputerSystem(ElementName=instance['name'])[0]
- vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
- #Stop the VM first.
- self._set_vm_state(instance['name'], 'Disabled')
- vmsettings = vm.associators(
- wmi_result_class='Msvm_VirtualSystemSettingData')
- rasds = vmsettings[0].associators(
- wmi_result_class='MSVM_ResourceAllocationSettingData')
- disks = [r for r in rasds
- if r.ResourceSubType == 'Microsoft Virtual Hard Disk']
- disk_files = []
- volumes = [r for r in rasds
- if r.ResourceSubType == 'Microsoft Physical Disk Drive']
- volumes_drives_list = []
- #collect the volumes information before destroying the VM.
- for volume in volumes:
- hostResources = volume.HostResource
- drive_path = hostResources[0]
- #Appending the Msvm_Disk path
- volumes_drives_list.append(drive_path)
- #Collect disk file information before destroying the VM.
- for disk in disks:
- disk_files.extend([c for c in disk.Connection])
- #Nuke the VM. Does not destroy disks.
- (job, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_())
- if ret_val == constants.WMI_JOB_STATUS_STARTED:
- success = self._vmutils.check_job_status(job)
- elif ret_val == 0:
- success = True
- if not success:
- raise vmutils.HyperVException(_('Failed to destroy vm %s') %
- instance['name'])
- if destroy_disks:
- #Disconnect volumes
- for volume_drive in volumes_drives_list:
- self._volumeops.disconnect_volume(volume_drive)
- #Delete associated vhd disk files.
- for disk in disk_files:
- vhdfile = self._conn_cimv2.query(
- "Select * from CIM_DataFile where Name = '" +
- disk.replace("'", "''") + "'")[0]
- LOG.debug(_("Del: disk %(vhdfile)s vm %(name)s")
- % {'vhdfile': vhdfile, 'name': instance['name']})
- vhdfile.Delete()
+ LOG.debug(_("reboot instance"), instance=instance)
+ self._set_vm_state(instance['name'],
+ constants.HYPERV_VM_STATE_REBOOT)
def pause(self, instance):
"""Pause VM instance."""
LOG.debug(_("Pause instance"), instance=instance)
- self._set_vm_state(instance["name"], 'Paused')
+ self._set_vm_state(instance["name"],
+ constants.HYPERV_VM_STATE_PAUSED)
def unpause(self, instance):
"""Unpause paused VM instance."""
LOG.debug(_("Unpause instance"), instance=instance)
- self._set_vm_state(instance["name"], 'Enabled')
+ self._set_vm_state(instance["name"],
+ constants.HYPERV_VM_STATE_ENABLED)
def suspend(self, instance):
"""Suspend the specified instance."""
print instance
LOG.debug(_("Suspend instance"), instance=instance)
- self._set_vm_state(instance["name"], 'Suspended')
+ self._set_vm_state(instance["name"],
+ constants.HYPERV_VM_STATE_SUSPENDED)
def resume(self, instance):
"""Resume the suspended VM instance."""
LOG.debug(_("Resume instance"), instance=instance)
- self._set_vm_state(instance["name"], 'Enabled')
+ self._set_vm_state(instance["name"],
+ constants.HYPERV_VM_STATE_ENABLED)
def power_off(self, instance):
"""Power off the specified instance."""
LOG.debug(_("Power off instance"), instance=instance)
- self._set_vm_state(instance["name"], 'Disabled')
+ self._set_vm_state(instance["name"],
+ constants.HYPERV_VM_STATE_DISABLED)
def power_on(self, instance):
"""Power on the specified instance."""
LOG.debug(_("Power on instance"), instance=instance)
- self._set_vm_state(instance["name"], 'Enabled')
+ self._set_vm_state(instance["name"],
+ constants.HYPERV_VM_STATE_ENABLED)
def _set_vm_state(self, vm_name, req_state):
- """Set the desired state of the VM."""
- vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
- if len(vms) == 0:
- return False
- (job, ret_val) = vms[0].RequestStateChange(
- constants.REQ_POWER_STATE[req_state])
- success = False
- if ret_val == constants.WMI_JOB_STATUS_STARTED:
- success = self._vmutils.check_job_status(job)
- elif ret_val == 0:
- success = True
- elif ret_val == 32775:
- #Invalid state for current operation. Typically means it is
- #already in the state requested
- success = True
- if success:
- LOG.info(_("Successfully changed vm state of %(vm_name)s"
- " to %(req_state)s") % locals())
- else:
+ try:
+ self._vmutils.set_vm_state(vm_name, req_state)
+ LOG.debug(_("Successfully changed state of VM %(vm_name)s"
+ " to: %(req_state)s") % locals())
+ except Exception as ex:
+ LOG.exception(ex)
msg = _("Failed to change vm state of %(vm_name)s"
" to %(req_state)s") % locals()
- LOG.error(msg)
raise vmutils.HyperVException(msg)
- def _cache_image(self, fn, target, fname, cow=False, Size=None,
- *args, **kwargs):
- """Wrapper for a method that creates an image that caches the image.
+ def _fetch_image(self, target, context, image_id, user, project,
+ *args, **kwargs):
+ images.fetch(context, image_id, target, user, project)
+
+ def _cache_image(self, fn, target, fname, cow=False, size=None,
+ *args, **kwargs):
+ """Wrapper for a method that creates and caches an image.
This wrapper will save the image into a common store and create a
copy for use by the hypervisor.
@@ -543,32 +314,23 @@ class VMOps(baseops.BaseOps):
"""
@lockutils.synchronized(fname, 'nova-')
def call_if_not_exists(path, fn, *args, **kwargs):
- if not os.path.exists(path):
- fn(target=path, *args, **kwargs)
+ if not os.path.exists(path):
+ fn(target=path, *args, **kwargs)
- if not os.path.exists(target):
- LOG.debug(_("use_cow_image:%s"), cow)
+ if not self._pathutils.vhd_exists(target):
+ LOG.debug(_("Use CoW image: %s"), cow)
if cow:
- base = self._vmutils.get_base_vhd_path(fname)
- call_if_not_exists(base, fn, *args, **kwargs)
-
- image_service = self._conn.query(
- "Select * from Msvm_ImageManagementService")[0]
- (job, ret_val) = \
- image_service.CreateDifferencingVirtualHardDisk(
- Path=target, ParentPath=base)
- LOG.debug(
- "Creating difference disk: JobID=%s, Source=%s, Target=%s",
- job, base, target)
- if ret_val == constants.WMI_JOB_STATUS_STARTED:
- success = self._vmutils.check_job_status(job)
- else:
- success = (ret_val == 0)
-
- if not success:
+ parent_path = self._pathutils.get_base_vhd_path(fname)
+ call_if_not_exists(parent_path, fn, *args, **kwargs)
+
+ LOG.debug(_("Creating differencing VHD. Parent: "
+ "%(parent_path)s, Target: %(target)s") % locals())
+ try:
+ self._vhdutils.create_differencing_vhd(target, parent_path)
+ except Exception as ex:
+ LOG.exception(ex)
raise vmutils.HyperVException(
- _('Failed to create Difference Disk from '
- '%(base)s to %(target)s') % locals())
-
+ _('Failed to create a differencing disk from '
+ '%(parent_path)s to %(target)s') % locals())
else:
call_if_not_exists(target, fn, *args, **kwargs)
diff --git a/nova/virt/hyperv/vmutils.py b/nova/virt/hyperv/vmutils.py
index d899f977d..876153902 100644
--- a/nova/virt/hyperv/vmutils.py
+++ b/nova/virt/hyperv/vmutils.py
@@ -1,5 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# Copyright (c) 2010 Cloud.com, Inc
# Copyright 2012 Cloudbase Solutions Srl / Pedro Navarro Perez
# All Rights Reserved.
#
@@ -16,24 +17,20 @@
# under the License.
"""
-Utility class for VM related operations.
+Utility class for VM related operations on Hyper-V.
"""
-import os
-import shutil
import sys
import time
import uuid
+if sys.platform == 'win32':
+ import wmi
+
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.virt.hyperv import constants
-from nova.virt import images
-
-# Check needed for unit testing on Unix
-if sys.platform == 'win32':
- import wmi
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@@ -45,19 +42,342 @@ class HyperVException(exception.NovaException):
class VMUtils(object):
- def lookup(self, conn, i):
- vms = conn.Msvm_ComputerSystem(ElementName=i)
+
+ def __init__(self):
+ if sys.platform == 'win32':
+ self._conn = wmi.WMI(moniker='//./root/virtualization')
+ self._conn_cimv2 = wmi.WMI(moniker='//./root/cimv2')
+
+ def list_instances(self):
+ """Return the names of all the instances known to Hyper-V."""
+ vm_names = [v.ElementName
+ for v in self._conn.Msvm_ComputerSystem(['ElementName'],
+ Caption="Virtual Machine")]
+ return vm_names
+
+ def get_vm_summary_info(self, vm_name):
+ vm = self._lookup_vm_check(vm_name)
+
+ vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
+ vmsettings = vm.associators(
+ wmi_association_class='Msvm_SettingsDefineState',
+ wmi_result_class='Msvm_VirtualSystemSettingData')
+ settings_paths = [v.path_() for v in vmsettings]
+ #See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx
+ (ret_val, summary_info) = vs_man_svc.GetSummaryInformation(
+ [constants.VM_SUMMARY_NUM_PROCS,
+ constants.VM_SUMMARY_ENABLED_STATE,
+ constants.VM_SUMMARY_MEMORY_USAGE,
+ constants.VM_SUMMARY_UPTIME],
+ settings_paths)
+ if ret_val:
+ raise HyperVException(_('Cannot get VM summary data for: %s')
+ % vm_name)
+
+ si = summary_info[0]
+ memory_usage = None
+ if si.MemoryUsage is not None:
+ memory_usage = long(si.MemoryUsage)
+ up_time = None
+ if si.UpTime is not None:
+ up_time = long(si.UpTime)
+
+ summary_info_dict = {'NumberOfProcessors': si.NumberOfProcessors,
+ 'EnabledState': si.EnabledState,
+ 'MemoryUsage': memory_usage,
+ 'UpTime': up_time}
+ return summary_info_dict
+
+ def _lookup_vm_check(self, vm_name):
+ vm = self._lookup_vm(vm_name)
+ if not vm:
+ raise HyperVException(_('VM not found: %s') % vm_name)
+ return vm
+
+ def _lookup_vm(self, vm_name):
+ vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
n = len(vms)
if n == 0:
return None
elif n > 1:
- raise HyperVException(_('duplicate name found: %s') % i)
+ raise HyperVException(_('Duplicate VM name found: %s') % vm_name)
else:
- return vms[0].ElementName
+ return vms[0]
+
+ def vm_exists(self, vm_name):
+ return self._lookup_vm(vm_name) is not None
+
+ def _get_vm_setting_data(self, vm):
+ vmsettings = vm.associators(
+ wmi_result_class='Msvm_VirtualSystemSettingData')
+ # Avoid snapshots
+ return [s for s in vmsettings if s.SettingType == 3][0]
+
+ def _set_vm_memory(self, vm, vmsetting, memory_mb):
+ memsetting = vmsetting.associators(
+ wmi_result_class='Msvm_MemorySettingData')[0]
+ #No Dynamic Memory, so reservation, limit and quantity are identical.
+ mem = long(memory_mb)
+ memsetting.VirtualQuantity = mem
+ memsetting.Reservation = mem
+ memsetting.Limit = mem
+
+ self._modify_virt_resource(memsetting, vm.path_())
+
+ def _set_vm_vcpus(self, vm, vmsetting, vcpus_num, limit_cpu_features):
+ procsetting = vmsetting.associators(
+ wmi_result_class='Msvm_ProcessorSettingData')[0]
+ vcpus = long(vcpus_num)
+ procsetting.VirtualQuantity = vcpus
+ procsetting.Reservation = vcpus
+ procsetting.Limit = 100000 # static assignment to 100%
+ procsetting.LimitProcessorFeatures = limit_cpu_features
+
+ self._modify_virt_resource(procsetting, vm.path_())
+
+ def create_vm(self, vm_name, memory_mb, vcpus_num, limit_cpu_features):
+ """Creates a VM."""
+ vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
+
+ vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new()
+ vs_gs_data.ElementName = vm_name
+
+ LOG.debug(_('Creating VM %s'), vm_name)
+ (job_path,
+ ret_val) = vs_man_svc.DefineVirtualSystem([], None,
+ vs_gs_data.GetText_(1))[1:]
+ self.check_ret_val(ret_val, job_path)
+
+ vm = self._lookup_vm_check(vm_name)
+ vmsetting = self._get_vm_setting_data(vm)
+
+ LOG.debug(_('Setting memory for vm %s'), vm_name)
+ self._set_vm_memory(vm, vmsetting, memory_mb)
+
+ LOG.debug(_('Set vCPUs for vm %s'), vm_name)
+ self._set_vm_vcpus(vm, vmsetting, vcpus_num, limit_cpu_features)
+
+ def get_vm_iscsi_controller(self, vm_name):
+ vm = self._lookup_vm_check(vm_name)
+
+ vmsettings = vm.associators(
+ wmi_result_class='Msvm_VirtualSystemSettingData')
+ rasds = vmsettings[0].associators(
+ wmi_result_class='MSVM_ResourceAllocationSettingData')
+ res = [r for r in rasds
+ if r.ResourceSubType ==
+ 'Microsoft Synthetic SCSI Controller'][0]
+ return res.path_()
+
+ def _get_vm_ide_controller(self, vm, ctrller_addr):
+ vmsettings = vm.associators(
+ wmi_result_class='Msvm_VirtualSystemSettingData')
+ rasds = vmsettings[0].associators(
+ wmi_result_class='MSVM_ResourceAllocationSettingData')
+ return [r for r in rasds
+ if r.ResourceSubType == 'Microsoft Emulated IDE Controller'
+ and r.Address == str(ctrller_addr)][0].path_()
+
+ def get_vm_ide_controller(self, vm_name, ctrller_addr):
+ vm = self._lookup_vm_check(vm_name)
+ return self._get_vm_ide_controller(vm, ctrller_addr)
+
+ def get_attached_disks_count(self, scsi_controller_path):
+ volumes = self._conn.query("SELECT * FROM "
+ "Msvm_ResourceAllocationSettingData "
+ "WHERE ResourceSubType LIKE "
+ "'Microsoft Physical Disk Drive' "
+ "AND Parent = '%s'" %
+ scsi_controller_path.replace("'", "''"))
+ return len(volumes)
+
+ def attach_ide_drive(self, vm_name, path, ctrller_addr, drive_addr,
+ drive_type=constants.IDE_DISK):
+ """Create an IDE drive and attach it to the vm."""
+
+ vm = self._lookup_vm_check(vm_name)
+
+ ctrller_path = self._get_vm_ide_controller(vm, ctrller_addr)
+
+ if drive_type == constants.IDE_DISK:
+ res_sub_type = 'Microsoft Synthetic Disk Drive'
+ elif drive_type == constants.IDE_DVD:
+ res_sub_type = 'Microsoft Synthetic DVD Drive'
+
+ #Find the default disk drive object for the vm and clone it.
+ drivedflt = self._conn.query("SELECT * FROM "
+ "Msvm_ResourceAllocationSettingData "
+ "WHERE ResourceSubType LIKE "
+ "'%(res_sub_type)s' AND InstanceID LIKE "
+ "'%%Default%%'" % locals())[0]
+ drive = self._clone_wmi_obj('Msvm_ResourceAllocationSettingData',
+ drivedflt)
+ #Set the IDE ctrller as parent.
+ drive.Parent = ctrller_path
+ drive.Address = drive_addr
+ #Add the cloned disk drive object to the vm.
+ new_resources = self._add_virt_resource(drive, vm.path_())
+ drive_path = new_resources[0]
+
+ if drive_type == constants.IDE_DISK:
+ res_sub_type = 'Microsoft Virtual Hard Disk'
+ elif drive_type == constants.IDE_DVD:
+ res_sub_type = 'Microsoft Virtual CD/DVD Disk'
+
+ #Find the default VHD disk object.
+ drivedefault = self._conn.query("SELECT * FROM "
+ "Msvm_ResourceAllocationSettingData "
+ "WHERE ResourceSubType LIKE "
+ "'%(res_sub_type)s' AND "
+ "InstanceID LIKE '%%Default%%'"
+ % locals())[0]
+
+ #Clone the default and point it to the image file.
+ res = self._clone_wmi_obj('Msvm_ResourceAllocationSettingData',
+ drivedefault)
+ #Set the new drive as the parent.
+ res.Parent = drive_path
+ res.Connection = [path]
+
+ #Add the new vhd object as a virtual hard disk to the vm.
+ self._add_virt_resource(res, vm.path_())
+
+ def create_scsi_controller(self, vm_name):
+ """Create an iscsi controller ready to mount volumes."""
+
+ vm = self._lookup_vm_check(vm_name)
+ scsicontrldflt = self._conn.query("SELECT * FROM "
+ "Msvm_ResourceAllocationSettingData "
+ "WHERE ResourceSubType = 'Microsoft "
+ "Synthetic SCSI Controller' AND "
+ "InstanceID LIKE '%Default%'")[0]
+ if scsicontrldflt is None:
+ raise HyperVException(_('Controller not found'))
+ scsicontrl = self._clone_wmi_obj('Msvm_ResourceAllocationSettingData',
+ scsicontrldflt)
+ scsicontrl.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
+ scsiresource = self._add_virt_resource(scsicontrl, vm.path_())
+
+ def attach_volume_to_controller(self, vm_name, controller_path, address,
+ mounted_disk_path):
+ """Attach a volume to a controller."""
- def check_job_status(self, jobpath):
- """Poll WMI job state for completion."""
- job_wmi_path = jobpath.replace('\\', '/')
+ vm = self._lookup_vm_check(vm_name)
+
+ diskdflt = self._conn.query("SELECT * FROM "
+ "Msvm_ResourceAllocationSettingData "
+ "WHERE ResourceSubType LIKE "
+ "'Microsoft Physical Disk Drive' "
+ "AND InstanceID LIKE '%Default%'")[0]
+ diskdrive = self._clone_wmi_obj('Msvm_ResourceAllocationSettingData',
+ diskdflt)
+ diskdrive.Address = address
+ diskdrive.Parent = controller_path
+ diskdrive.HostResource = [mounted_disk_path]
+ self._add_virt_resource(diskdrive, vm.path_())
+
+ def set_nic_connection(self, vm_name, nic_name, vswitch_port):
+ nic_data = self._get_nic_data_by_name(nic_name)
+ nic_data.Connection = [vswitch_port]
+
+ vm = self._lookup_vm_check(vm_name)
+ self._modify_virt_resource(nic_data, vm.path_())
+
+ def _get_nic_data_by_name(self, name):
+ return self._conn.Msvm_SyntheticEthernetPortSettingData(
+ ElementName=name)[0]
+
+ def create_nic(self, vm_name, nic_name, mac_address):
+ """Create a (synthetic) nic and attach it to the vm."""
+ #Create a new nic
+ syntheticnics_data = self._conn.Msvm_SyntheticEthernetPortSettingData()
+ default_nic_data = [n for n in syntheticnics_data
+ if n.InstanceID.rfind('Default') > 0]
+ new_nic_data = self._clone_wmi_obj(
+ 'Msvm_SyntheticEthernetPortSettingData', default_nic_data[0])
+
+ #Configure the nic
+ new_nic_data.ElementName = nic_name
+ new_nic_data.Address = mac_address.replace(':', '')
+ new_nic_data.StaticMacAddress = 'True'
+ new_nic_data.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
+
+ #Add the new nic to the vm
+ vm = self._lookup_vm_check(vm_name)
+
+ self._add_virt_resource(new_nic_data, vm.path_())
+
+ def set_vm_state(self, vm_name, req_state):
+ """Set the desired state of the VM."""
+
+ vm = self._lookup_vm_check(vm_name)
+ (job_path, ret_val) = vm.RequestStateChange(req_state)
+ #Invalid state for current operation (32775) typically means that
+ #the VM is already in the state requested
+ self.check_ret_val(ret_val, job_path, [0, 32775])
+ LOG.debug(_("Successfully changed vm state of %(vm_name)s"
+ " to %(req_state)s") % locals())
+
+ def destroy_vm(self, vm_name, destroy_disks=True):
+ """Destroy the VM. Also destroy the associated VHD disk files."""
+
+ vm = self._lookup_vm_check(vm_name)
+
+ #Stop the VM first.
+ self.set_vm_state(vm_name, constants.HYPERV_VM_STATE_DISABLED)
+
+ vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
+ vmsettings = vm.associators(
+ wmi_result_class='Msvm_VirtualSystemSettingData')
+ rasds = vmsettings[0].associators(
+ wmi_result_class='MSVM_ResourceAllocationSettingData')
+ disk_resources = [r for r in rasds
+ if r.ResourceSubType ==
+ 'Microsoft Virtual Hard Disk']
+ volume_resources = [r for r in rasds
+ if r.ResourceSubType ==
+ 'Microsoft Physical Disk Drive']
+
+ #Collect volumes information before destroying the VM.
+ volumes_drives_list = []
+ for volume_resource in volume_resources:
+ drive_path = volume_resource.HostResource[0]
+ #Appending the Msvm_Disk path
+ volumes_drives_list.append(drive_path)
+
+ #Collect disk file information before destroying the VM.
+ disk_files = []
+ for disk_resource in disk_resources:
+ disk_files.extend([c for c in disk_resource.Connection])
+
+ #Remove the VM. Does not destroy disks.
+ (job_path, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_())
+ self.check_ret_val(ret_val, job_path)
+
+ if destroy_disks:
+ #Delete associated vhd disk files.
+ for disk in disk_files:
+ LOG.debug(_("Deleting disk file: %(disk)s") % locals())
+ self._delete_file(disk)
+
+ return volumes_drives_list
+
+ def _delete_file(self, path):
+ f = self._conn_cimv2.query("Select * from CIM_DataFile where "
+ "Name = '%s'" % path.replace("'", "''"))[0]
+ f.Delete()
+
+ def check_ret_val(self, ret_val, job_path, success_values=[0]):
+ if ret_val == constants.WMI_JOB_STATUS_STARTED:
+ self._wait_for_job(job_path)
+ elif ret_val not in success_values:
+ raise HyperVException(_('Operation failed with return value: %s')
+ % ret_val)
+
+ def _wait_for_job(self, job_path):
+ """Poll WMI job state and wait for completion."""
+
+ job_wmi_path = job_path.replace('\\', '/')
job = wmi.WMI(moniker=job_wmi_path)
while job.JobState == constants.WMI_JOB_STATE_RUNNING:
@@ -69,54 +389,30 @@ class VMUtils(object):
err_sum_desc = job.ErrorSummaryDescription
err_desc = job.ErrorDescription
err_code = job.ErrorCode
- LOG.debug(_("WMI job failed with status %(job_state)d. "
- "Error details: %(err_sum_desc)s - %(err_desc)s - "
- "Error code: %(err_code)d") % locals())
+ raise HyperVException(_("WMI job failed with status "
+ "%(job_state)d. Error details: "
+ "%(err_sum_desc)s - %(err_desc)s - "
+ "Error code: %(err_code)d")
+ % locals())
else:
(error, ret_val) = job.GetError()
if not ret_val and error:
- LOG.debug(_("WMI job failed with status %(job_state)d. "
- "Error details: %(error)s") % locals())
+ raise HyperVException(_("WMI job failed with status "
+ "%(job_state)d. Error details: "
+ "%(error)s") % locals())
else:
- LOG.debug(_("WMI job failed with status %(job_state)d. "
- "No error description available") % locals())
- return False
+ raise HyperVException(_("WMI job failed with status "
+ "%(job_state)d. No error "
+ "description available")
+ % locals())
desc = job.Description
elap = job.ElapsedTime
LOG.debug(_("WMI job succeeded: %(desc)s, Elapsed=%(elap)s")
- % locals())
- return True
-
- def get_instance_path(self, instance_name):
- instance_path = os.path.join(CONF.instances_path, instance_name)
- if not os.path.exists(instance_path):
- LOG.debug(_('Creating folder %s '), instance_path)
- os.makedirs(instance_path)
- return instance_path
-
- def get_vhd_path(self, instance_name):
- instance_path = self.get_instance_path(instance_name)
- return os.path.join(instance_path, instance_name + ".vhd")
-
- def get_base_vhd_path(self, image_name):
- base_dir = os.path.join(CONF.instances_path, '_base')
- if not os.path.exists(base_dir):
- os.makedirs(base_dir)
- return os.path.join(base_dir, image_name + ".vhd")
-
- def make_export_path(self, instance_name):
- export_folder = os.path.join(CONF.instances_path, "export",
- instance_name)
- if os.path.isdir(export_folder):
- LOG.debug(_('Removing existing folder %s '), export_folder)
- shutil.rmtree(export_folder)
- LOG.debug(_('Creating folder %s '), export_folder)
- os.makedirs(export_folder)
- return export_folder
-
- def clone_wmi_obj(self, conn, wmi_class, wmi_obj):
+ % locals())
+
+ def _clone_wmi_obj(self, wmi_class, wmi_obj):
"""Clone a WMI object."""
- cl = conn.__getattr__(wmi_class) # get the class
+ cl = getattr(self._conn, wmi_class) # get the class
newinst = cl.new()
#Copy the properties from the original.
for prop in wmi_obj._properties:
@@ -125,51 +421,78 @@ class VMUtils(object):
strguid.append(str(uuid.uuid4()))
newinst.Properties_.Item(prop).Value = strguid
else:
- newinst.Properties_.Item(prop).Value = \
- wmi_obj.Properties_.Item(prop).Value
+ prop_value = wmi_obj.Properties_.Item(prop).Value
+ newinst.Properties_.Item(prop).Value = prop_value
+
return newinst
- def add_virt_resource(self, conn, res_setting_data, target_vm):
+ def _add_virt_resource(self, res_setting_data, vm_path):
"""Adds a new resource to the VM."""
- vs_man_svc = conn.Msvm_VirtualSystemManagementService()[0]
- (job, new_resources, ret_val) = vs_man_svc.\
- AddVirtualSystemResources([res_setting_data.GetText_(1)],
- target_vm.path_())
- success = True
- if ret_val == constants.WMI_JOB_STATUS_STARTED:
- success = self.check_job_status(job)
- else:
- success = (ret_val == 0)
- if success:
- return new_resources
- else:
- return None
+ vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
+ res_xml = [res_setting_data.GetText_(1)]
+ (job_path,
+ new_resources,
+ ret_val) = vs_man_svc.AddVirtualSystemResources(res_xml, vm_path)
+ self.check_ret_val(ret_val, job_path)
+ return new_resources
- def modify_virt_resource(self, conn, res_setting_data, target_vm):
+ def _modify_virt_resource(self, res_setting_data, vm_path):
"""Updates a VM resource."""
- vs_man_svc = conn.Msvm_VirtualSystemManagementService()[0]
- (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
+ vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
+ (job_path, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
ResourceSettingData=[res_setting_data.GetText_(1)],
- ComputerSystem=target_vm.path_())
- if ret_val == constants.WMI_JOB_STATUS_STARTED:
- success = self.check_job_status(job)
- else:
- success = (ret_val == 0)
- return success
+ ComputerSystem=vm_path)
+ self.check_ret_val(ret_val, job_path)
- def remove_virt_resource(self, conn, res_setting_data, target_vm):
+ def _remove_virt_resource(self, res_setting_data, vm_path):
"""Removes a VM resource."""
- vs_man_svc = conn.Msvm_VirtualSystemManagementService()[0]
- (job, ret_val) = vs_man_svc.\
- RemoveVirtualSystemResources([res_setting_data.path_()],
- target_vm.path_())
- success = True
- if ret_val == constants.WMI_JOB_STATUS_STARTED:
- success = self.check_job_status(job)
- else:
- success = (ret_val == 0)
- return success
+ vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
+ res_path = [res_setting_data.path_()]
+ (job_path, ret_val) = vs_man_svc.RemoveVirtualSystemResources(res_path,
+ vm_path)
+ self.check_ret_val(ret_val, job_path)
+
+ def take_vm_snapshot(self, vm_name):
+ vm = self._lookup_vm_check(vm_name)
+
+ vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
+
+ (job_path, ret_val,
+ snp_setting_data) = vs_man_svc.CreateVirtualSystemSnapshot(vm.path_())
+ self.check_ret_val(ret_val, job_path)
+
+ job_wmi_path = job_path.replace('\\', '/')
+ job = wmi.WMI(moniker=job_wmi_path)
+ snp_setting_data = job.associators(
+ wmi_result_class='Msvm_VirtualSystemSettingData')[0]
+ return snp_setting_data.path_()
+
+ def remove_vm_snapshot(self, snapshot_path):
+ vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
+
+ (job_path, ret_val) = vs_man_svc.RemoveVirtualSystemSnapshot(
+ snapshot_path)
+ self.check_ret_val(ret_val, job_path)
+
+ def detach_vm_disk(self, vm_name, disk_path):
+ vm = self._lookup_vm_check(vm_name)
+ physical_disk = self._get_mounted_disk_resource_from_path(
+ disk_path)
+ self._remove_virt_resource(physical_disk, vm.path_())
+
+ def _get_mounted_disk_resource_from_path(self, disk_path):
+ physical_disks = self._conn.query("SELECT * FROM "
+ "Msvm_ResourceAllocationSettingData"
+ " WHERE ResourceSubType = "
+ "'Microsoft Physical Disk Drive'")
+ for physical_disk in physical_disks:
+ if physical_disk.HostResource:
+ if physical_disk.HostResource[0].lower() == disk_path.lower():
+ return physical_disk
- def fetch_image(self, target, context, image_id, user, project,
- *args, **kwargs):
- images.fetch(context, image_id, target, user, project)
+ def get_mounted_disk_by_drive_number(self, device_number):
+ mounted_disks = self._conn.query("SELECT * FROM Msvm_DiskDrive "
+ "WHERE DriveNumber=" +
+ str(device_number))
+ if len(mounted_disks):
+ return mounted_disks[0].path_()
diff --git a/nova/virt/hyperv/volumeops.py b/nova/virt/hyperv/volumeops.py
index b69cf7bf1..a7e56b739 100644
--- a/nova/virt/hyperv/volumeops.py
+++ b/nova/virt/hyperv/volumeops.py
@@ -1,6 +1,7 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Pedro Navarro Perez
+# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -20,210 +21,140 @@ Management class for Storage-related functions (attach, detach, etc).
"""
import time
-from nova import block_device
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.virt import driver
-from nova.virt.hyperv import baseops
+from nova.virt.hyperv import hostutils
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import volumeutils
-from nova.virt.hyperv import volumeutilsV2
+from nova.virt.hyperv import volumeutilsv2
LOG = logging.getLogger(__name__)
hyper_volumeops_opts = [
cfg.IntOpt('hyperv_attaching_volume_retry_count',
- default=10,
- help='The number of times we retry on attaching volume '),
+ default=10,
+ help='The number of times we retry on attaching volume '),
cfg.IntOpt('hyperv_wait_between_attach_retry',
- default=5,
- help='The seconds to wait between a volume attachment attempt'),
+ default=5,
+ help='The seconds to wait between an volume '
+ 'attachment attempt'),
cfg.BoolOpt('force_volumeutils_v1',
- default=False,
- help='Force volumeutils v1'),
- ]
+ default=False,
+ help='Force volumeutils v1'),
+]
CONF = cfg.CONF
CONF.register_opts(hyper_volumeops_opts)
CONF.import_opt('my_ip', 'nova.netconf')
-class VolumeOps(baseops.BaseOps):
+class VolumeOps(object):
"""
Management class for Volume-related tasks
"""
def __init__(self):
- super(VolumeOps, self).__init__()
-
+ self._hostutils = hostutils.HostUtils()
self._vmutils = vmutils.VMUtils()
- self._driver = driver
- self._block_device = block_device
- self._time = time
+ self._volutils = self._get_volume_utils()
self._initiator = None
self._default_root_device = 'vda'
- self._attaching_volume_retry_count = \
- CONF.hyperv_attaching_volume_retry_count
- self._wait_between_attach_retry = \
- CONF.hyperv_wait_between_attach_retry
- self._volutils = self._get_volume_utils()
def _get_volume_utils(self):
- if(not CONF.force_volumeutils_v1) and \
- (self._get_hypervisor_version() >= 6.2):
- return volumeutilsV2.VolumeUtilsV2(
- self._conn_storage, self._conn_wmi)
+ if(not CONF.force_volumeutils_v1 and
+ self._hostutils.get_windows_version() >= 6.2):
+ return volumeutilsv2.VolumeUtilsV2()
else:
- return volumeutils.VolumeUtils(self._conn_wmi)
-
- def _get_hypervisor_version(self):
- """Get hypervisor version.
- :returns: hypervisor version (ex. 12003)
- """
- version = self._conn_cimv2.Win32_OperatingSystem()[0]\
- .Version
- LOG.info(_('Windows version: %s ') % version)
- return version
+ return volumeutils.VolumeUtils()
def attach_boot_volume(self, block_device_info, vm_name):
"""Attach the boot volume to the IDE controller."""
+
LOG.debug(_("block device info: %s"), block_device_info)
- ebs_root = self._driver.block_device_info_get_mapping(
+ ebs_root = driver.block_device_info_get_mapping(
block_device_info)[0]
+
connection_info = ebs_root['connection_info']
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
target_portal = data['target_portal']
self._volutils.login_storage_target(target_lun, target_iqn,
- target_portal)
+ target_portal)
try:
#Getting the mounted disk
- mounted_disk = self._get_mounted_disk_from_lun(target_iqn,
- target_lun)
- #Attach to IDE controller
+ mounted_disk_path = self._get_mounted_disk_from_lun(target_iqn,
+ target_lun)
#Find the IDE controller for the vm.
- vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
- vm = vms[0]
- vmsettings = vm.associators(
- wmi_result_class='Msvm_VirtualSystemSettingData')
- rasds = vmsettings[0].associators(
- wmi_result_class='MSVM_ResourceAllocationSettingData')
- ctrller = [r for r in rasds
- if r.ResourceSubType == 'Microsoft Emulated IDE Controller'
- and r.Address == "0"]
+ ctrller_path = self._vmutils.get_vm_ide_controller(vm_name, 0)
#Attaching to the same slot as the VHD disk file
- self._attach_volume_to_controller(ctrller, 0, mounted_disk, vm)
+ self._vmutils.attach_volume_to_controller(vm_name,
+ ctrller_path, 0,
+ mounted_disk_path)
except Exception as exn:
LOG.exception(_('Attach boot from volume failed: %s'), exn)
self._volutils.logout_storage_target(target_iqn)
raise vmutils.HyperVException(
- _('Unable to attach boot volume to instance %s')
- % vm_name)
+ _('Unable to attach boot volume to instance %s') % vm_name)
def volume_in_mapping(self, mount_device, block_device_info):
return self._volutils.volume_in_mapping(mount_device,
- block_device_info)
+ block_device_info)
- def attach_volume(self, connection_info, instance_name, mountpoint):
+ def attach_volume(self, connection_info, instance_name):
"""Attach a volume to the SCSI controller."""
- LOG.debug(_("Attach_volume: %(connection_info)s, %(instance_name)s,"
- " %(mountpoint)s") % locals())
+ LOG.debug(_("Attach_volume: %(connection_info)s to %(instance_name)s")
+ % locals())
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
target_portal = data['target_portal']
self._volutils.login_storage_target(target_lun, target_iqn,
- target_portal)
+ target_portal)
try:
#Getting the mounted disk
- mounted_disk = self._get_mounted_disk_from_lun(target_iqn,
- target_lun)
+ mounted_disk_path = self._get_mounted_disk_from_lun(target_iqn,
+ target_lun)
#Find the SCSI controller for the vm
- vms = self._conn.MSVM_ComputerSystem(ElementName=instance_name)
- vm = vms[0]
- vmsettings = vm.associators(
- wmi_result_class='Msvm_VirtualSystemSettingData')
- rasds = vmsettings[0].associators(
- wmi_result_class='MSVM_ResourceAllocationSettingData')
- ctrller = [r for r in rasds
- if r.ResourceSubType == 'Microsoft Synthetic SCSI Controller']
- self._attach_volume_to_controller(
- ctrller, self._get_free_controller_slot(ctrller[0]),
- mounted_disk, vm)
+ ctrller_path = self._vmutils.get_vm_iscsi_controller(instance_name)
+
+ slot = self._get_free_controller_slot(ctrller_path)
+ self._vmutils.attach_volume_to_controller(instance_name,
+ ctrller_path,
+ slot,
+ mounted_disk_path)
except Exception as exn:
LOG.exception(_('Attach volume failed: %s'), exn)
self._volutils.logout_storage_target(target_iqn)
- raise vmutils.HyperVException(
- _('Unable to attach volume to instance %s')
- % instance_name)
+ raise vmutils.HyperVException(_('Unable to attach volume '
+ 'to instance %s') % instance_name)
- def _attach_volume_to_controller(self, controller, address, mounted_disk,
- instance):
- """Attach a volume to a controller."""
- #Find the default disk drive object for the vm and clone it.
- diskdflt = self._conn.query(
- "SELECT * FROM Msvm_ResourceAllocationSettingData \
- WHERE ResourceSubType LIKE 'Microsoft Physical Disk Drive'\
- AND InstanceID LIKE '%Default%'")[0]
- diskdrive = self._vmutils.clone_wmi_obj(self._conn,
- 'Msvm_ResourceAllocationSettingData', diskdflt)
- diskdrive.Address = address
- diskdrive.Parent = controller[0].path_()
- diskdrive.HostResource = [mounted_disk[0].path_()]
- new_resources = self._vmutils.add_virt_resource(self._conn, diskdrive,
- instance)
- if new_resources is None:
- raise vmutils.HyperVException(_('Failed to add volume to VM %s') %
- instance)
+ def _get_free_controller_slot(self, scsi_controller_path):
+ #Slots starts from 0, so the lenght of the disks gives us the free slot
+ return self._vmutils.get_attached_disks_count(scsi_controller_path)
- def _get_free_controller_slot(self, scsi_controller):
- #Getting volumes mounted in the SCSI controller
- volumes = self._conn.query(
- "SELECT * FROM Msvm_ResourceAllocationSettingData \
- WHERE ResourceSubType LIKE 'Microsoft Physical Disk Drive'\
- AND Parent = '" + scsi_controller.path_() + "'")
- #Slots starts from 0, so the length of the disks gives us the free slot
- return len(volumes)
-
- def detach_volume(self, connection_info, instance_name, mountpoint):
+ def detach_volume(self, connection_info, instance_name):
"""Dettach a volume to the SCSI controller."""
- LOG.debug(_("Detach_volume: %(connection_info)s, %(instance_name)s,"
- " %(mountpoint)s") % locals())
+ LOG.debug(_("Detach_volume: %(connection_info)s "
+ "from %(instance_name)s") % locals())
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
#Getting the mounted disk
- mounted_disk = self._get_mounted_disk_from_lun(target_iqn, target_lun)
- physical_list = self._conn.query(
- "SELECT * FROM Msvm_ResourceAllocationSettingData \
- WHERE ResourceSubType LIKE 'Microsoft Physical Disk Drive'")
- physical_disk = 0
- for phydisk in physical_list:
- host_resource_list = phydisk.HostResource
- if host_resource_list is None:
- continue
- host_resource = str(host_resource_list[0].lower())
- mounted_disk_path = str(mounted_disk[0].path_().lower())
- LOG.debug(_("Mounted disk to detach is: %s"), mounted_disk_path)
- LOG.debug(_("host_resource disk detached is: %s"), host_resource)
- if host_resource == mounted_disk_path:
- physical_disk = phydisk
- LOG.debug(_("Physical disk detached is: %s"), physical_disk)
- vms = self._conn.MSVM_ComputerSystem(ElementName=instance_name)
- vm = vms[0]
- remove_result = self._vmutils.remove_virt_resource(self._conn,
- physical_disk, vm)
- if remove_result is False:
- raise vmutils.HyperVException(
- _('Failed to remove volume from VM %s') %
- instance_name)
+ mounted_disk_path = self._get_mounted_disk_from_lun(target_iqn,
+ target_lun)
+
+ LOG.debug(_("Detaching physical disk from instance: %s"),
+ mounted_disk_path)
+ self._vmutils.detach_vm_disk(instance_name, mounted_disk_path)
+
#Sending logout
self._volutils.logout_storage_target(target_iqn)
def get_volume_connector(self, instance):
if not self._initiator:
- self._initiator = self._get_iscsi_initiator()
+ self._initiator = self._volutils.get_iscsi_initiator()
if not self._initiator:
LOG.warn(_('Could not determine iscsi initiator name'),
instance=instance)
@@ -232,87 +163,35 @@ class VolumeOps(baseops.BaseOps):
'initiator': self._initiator,
}
- def _get_iscsi_initiator(self):
- return self._volutils.get_iscsi_initiator(self._conn_cimv2)
-
def _get_mounted_disk_from_lun(self, target_iqn, target_lun):
- initiator_session = self._conn_wmi.query(
- "SELECT * FROM MSiSCSIInitiator_SessionClass \
- WHERE TargetName='" + target_iqn + "'")[0]
- devices = initiator_session.Devices
- device_number = None
- for device in devices:
- LOG.debug(_("device.InitiatorName: %s"), device.InitiatorName)
- LOG.debug(_("device.TargetName: %s"), device.TargetName)
- LOG.debug(_("device.ScsiPortNumber: %s"), device.ScsiPortNumber)
- LOG.debug(_("device.ScsiPathId: %s"), device.ScsiPathId)
- LOG.debug(_("device.ScsiTargetId): %s"), device.ScsiTargetId)
- LOG.debug(_("device.ScsiLun: %s"), device.ScsiLun)
- LOG.debug(_("device.DeviceInterfaceGuid :%s"),
- device.DeviceInterfaceGuid)
- LOG.debug(_("device.DeviceInterfaceName: %s"),
- device.DeviceInterfaceName)
- LOG.debug(_("device.LegacyName: %s"), device.LegacyName)
- LOG.debug(_("device.DeviceType: %s"), device.DeviceType)
- LOG.debug(_("device.DeviceNumber %s"), device.DeviceNumber)
- LOG.debug(_("device.PartitionNumber :%s"), device.PartitionNumber)
- scsi_lun = device.ScsiLun
- if scsi_lun == target_lun:
- device_number = device.DeviceNumber
+ device_number = self._volutils.get_device_number_for_target(target_iqn,
+ target_lun)
if device_number is None:
- raise vmutils.HyperVException(
- _('Unable to find a mounted disk for'
- ' target_iqn: %s') % target_iqn)
- LOG.debug(_("Device number : %s"), device_number)
- LOG.debug(_("Target lun : %s"), target_lun)
+ raise vmutils.HyperVException(_('Unable to find a mounted '
+ 'disk for target_iqn: %s')
+ % target_iqn)
+ LOG.debug(_('Device number: %(device_number)s, '
+ 'target lun: %(target_lun)s') % locals())
#Finding Mounted disk drive
- for i in range(1, self._attaching_volume_retry_count):
- mounted_disk = self._conn.query(
- "SELECT * FROM Msvm_DiskDrive WHERE DriveNumber=" +
- str(device_number) + "")
- LOG.debug(_("Mounted disk is: %s"), mounted_disk)
- if len(mounted_disk) > 0:
+ for i in range(1, CONF.hyperv_attaching_volume_retry_count):
+ mounted_disk_path = self._vmutils.get_mounted_disk_by_drive_number(
+ device_number)
+ if mounted_disk_path:
break
- self._time.sleep(self._wait_between_attach_retry)
- mounted_disk = self._conn.query(
- "SELECT * FROM Msvm_DiskDrive WHERE DriveNumber=" +
- str(device_number) + "")
- LOG.debug(_("Mounted disk is: %s"), mounted_disk)
- if len(mounted_disk) == 0:
- raise vmutils.HyperVException(
- _('Unable to find a mounted disk for'
- ' target_iqn: %s') % target_iqn)
- return mounted_disk
+ time.sleep(CONF.hyperv_wait_between_attach_retry)
+
+ if not mounted_disk_path:
+ raise vmutils.HyperVException(_('Unable to find a mounted disk '
+ 'for target_iqn: %s')
+ % target_iqn)
+ return mounted_disk_path
def disconnect_volume(self, physical_drive_path):
#Get the session_id of the ISCSI connection
- session_id = self._get_session_id_from_mounted_disk(
+ session_id = self._volutils.get_session_id_from_mounted_disk(
physical_drive_path)
#Logging out the target
self._volutils.execute_log_out(session_id)
- def _get_session_id_from_mounted_disk(self, physical_drive_path):
- drive_number = self._get_drive_number_from_disk_path(
- physical_drive_path)
- LOG.debug(_("Drive number to disconnect is: %s"), drive_number)
- initiator_sessions = self._conn_wmi.query(
- "SELECT * FROM MSiSCSIInitiator_SessionClass")
- for initiator_session in initiator_sessions:
- devices = initiator_session.Devices
- for device in devices:
- deviceNumber = str(device.DeviceNumber)
- LOG.debug(_("DeviceNumber : %s"), deviceNumber)
- if deviceNumber == drive_number:
- return initiator_session.SessionId
-
- def _get_drive_number_from_disk_path(self, disk_path):
- LOG.debug(_("Disk path to parse: %s"), disk_path)
- start_device_id = disk_path.find('"', disk_path.find('DeviceID'))
- LOG.debug(_("start_device_id: %s"), start_device_id)
- end_device_id = disk_path.find('"', start_device_id + 1)
- LOG.debug(_("end_device_id: %s"), end_device_id)
- deviceID = disk_path[start_device_id + 1:end_device_id]
- return deviceID[deviceID.find("\\") + 2:]
-
def get_default_root_device(self):
return self._default_root_device
diff --git a/nova/virt/hyperv/volumeutils.py b/nova/virt/hyperv/volumeutils.py
index 051c37fd6..713ace258 100644
--- a/nova/virt/hyperv/volumeutils.py
+++ b/nova/virt/hyperv/volumeutils.py
@@ -1,6 +1,7 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 Pedro Navarro Perez
+# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -35,47 +36,48 @@ CONF = cfg.CONF
class VolumeUtils(basevolumeutils.BaseVolumeUtils):
- def __init__(self, conn_wmi):
- self._conn_wmi = conn_wmi
+ def __init__(self):
+ super(VolumeUtils, self).__init__()
- def execute(self, *args, **kwargs):
- _PIPE = subprocess.PIPE # pylint: disable=E1101
- proc = subprocess.Popen(
- [args],
- stdin=_PIPE,
- stdout=_PIPE,
- stderr=_PIPE,
- )
- stdout_value, stderr_value = proc.communicate()
- if stdout_value.find('The operation completed successfully') == -1:
- raise vmutils.HyperVException(_('An error has occurred when '
- 'calling the iscsi initiator: %s') % stdout_value)
+ def execute(self, *args, **kwargs):
+ _PIPE = subprocess.PIPE # pylint: disable=E1101
+ proc = subprocess.Popen(
+ [args],
+ stdin=_PIPE,
+ stdout=_PIPE,
+ stderr=_PIPE,
+ )
+ stdout_value, stderr_value = proc.communicate()
+ if stdout_value.find('The operation completed successfully') == -1:
+ raise vmutils.HyperVException(_('An error has occurred when '
+ 'calling the iscsi initiator: %s')
+ % stdout_value)
- def login_storage_target(self, target_lun, target_iqn, target_portal):
- """Add target portal, list targets and logins to the target."""
- separator = target_portal.find(':')
- target_address = target_portal[:separator]
- target_port = target_portal[separator + 1:]
- #Adding target portal to iscsi initiator. Sending targets
- self.execute('iscsicli.exe ' + 'AddTargetPortal ' +
- target_address + ' ' + target_port +
- ' * * * * * * * * * * * * *')
- #Listing targets
- self.execute('iscsicli.exe ' + 'LisTargets')
- #Sending login
- self.execute('iscsicli.exe ' + 'qlogintarget ' + target_iqn)
- #Waiting the disk to be mounted. Research this to avoid sleep
- time.sleep(CONF.hyperv_wait_between_attach_retry)
+ def login_storage_target(self, target_lun, target_iqn, target_portal):
+ """Add target portal, list targets and logins to the target."""
+ separator = target_portal.find(':')
+ target_address = target_portal[:separator]
+ target_port = target_portal[separator + 1:]
+ #Adding target portal to iscsi initiator. Sending targets
+ self.execute('iscsicli.exe ' + 'AddTargetPortal ' +
+ target_address + ' ' + target_port +
+ ' * * * * * * * * * * * * *')
+ #Listing targets
+ self.execute('iscsicli.exe ' + 'LisTargets')
+ #Sending login
+ self.execute('iscsicli.exe ' + 'qlogintarget ' + target_iqn)
+ #Waiting the disk to be mounted. Research this to avoid sleep
+ time.sleep(CONF.hyperv_wait_between_attach_retry)
- def logout_storage_target(self, target_iqn):
- """Logs out storage target through its session id."""
+ def logout_storage_target(self, target_iqn):
+ """Logs out storage target through its session id."""
- sessions = self._conn_wmi.query(
- "SELECT * FROM MSiSCSIInitiator_SessionClass \
- WHERE TargetName='" + target_iqn + "'")
- for session in sessions:
- self.execute_log_out(session.SessionId)
+ sessions = self._conn_wmi.query("SELECT * FROM "
+ "MSiSCSIInitiator_SessionClass "
+ "WHERE TargetName='%s'" % target_iqn)
+ for session in sessions:
+ self.execute_log_out(session.SessionId)
- def execute_log_out(self, session_id):
- """Executes log out of the session described by its session ID."""
- self.execute('iscsicli.exe ' + 'logouttarget ' + session_id)
+ def execute_log_out(self, session_id):
+ """Executes log out of the session described by its session ID."""
+ self.execute('iscsicli.exe ' + 'logouttarget ' + session_id)
diff --git a/nova/virt/hyperv/volumeutilsV2.py b/nova/virt/hyperv/volumeutilsV2.py
deleted file mode 100644
index 6f5bcdac9..000000000
--- a/nova/virt/hyperv/volumeutilsV2.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-#
-# Copyright 2012 Pedro Navarro Perez
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Helper methods for operations related to the management of volumes,
-and storage repositories for Windows 2012
-"""
-import time
-
-from nova.openstack.common import cfg
-from nova.openstack.common import log as logging
-from nova.virt.hyperv import basevolumeutils
-
-LOG = logging.getLogger(__name__)
-CONF = cfg.CONF
-
-
-class VolumeUtilsV2(basevolumeutils.BaseVolumeUtils):
-
- def __init__(self, conn_storage, conn_wmi):
- self._conn_storage = conn_storage
- self._conn_wmi = conn_wmi
-
- def login_storage_target(self, target_lun, target_iqn,
- target_portal):
- """Add target portal, list targets and logins to the target."""
- separator = target_portal.find(':')
- target_address = target_portal[:separator]
- target_port = target_portal[separator + 1:]
- #Adding target portal to iscsi initiator. Sending targets
- portal = self._conn_storage.__getattr__("MSFT_iSCSITargetPortal")
- portal.New(TargetPortalAddress=target_address,
- TargetPortalPortNumber=target_port)
- #Connecting to the target
- target = self._conn_storage.__getattr__("MSFT_iSCSITarget")
- target.Connect(NodeAddress=target_iqn,
- IsPersistent=True)
- #Waiting the disk to be mounted. Research this
- time.sleep(CONF.hyperv_wait_between_attach_retry)
-
- def logout_storage_target(self, target_iqn):
- """Logs out storage target through its session id."""
-
- target = self._conn_storage.MSFT_iSCSITarget(
- NodeAddress=target_iqn)[0]
- if target.IsConnected:
- session = self._conn_storage.MSFT_iSCSISession(
- TargetNodeAddress=target_iqn)[0]
- if session.IsPersistent:
- session.Unregister()
- target.Disconnect()
-
- def execute_log_out(self, session_id):
- session = self._conn_wmi.MSiSCSIInitiator_SessionClass(
- SessionId=session_id)[0]
- self.logout_storage_target(session.TargetName)
diff --git a/nova/virt/hyperv/volumeutilsv2.py b/nova/virt/hyperv/volumeutilsv2.py
new file mode 100644
index 000000000..8322d31d3
--- /dev/null
+++ b/nova/virt/hyperv/volumeutilsv2.py
@@ -0,0 +1,75 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2012 Pedro Navarro Perez
+# Copyright 2013 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Helper methods for operations related to the management of volumes
+and storage repositories on Windows Server 2012 and above
+"""
+import sys
+import time
+
+if sys.platform == 'win32':
+ import wmi
+
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova.virt.hyperv import basevolumeutils
+
+LOG = logging.getLogger(__name__)
+CONF = cfg.CONF
+
+
+class VolumeUtilsV2(basevolumeutils.BaseVolumeUtils):
+ def __init__(self):
+ super(VolumeUtilsV2, self).__init__()
+
+ storage_namespace = '//./root/microsoft/windows/storage'
+ if sys.platform == 'win32':
+ self._conn_storage = wmi.WMI(moniker=storage_namespace)
+
+ def login_storage_target(self, target_lun, target_iqn, target_portal):
+ """Add target portal, list targets and logins to the target."""
+ separator = target_portal.find(':')
+ target_address = target_portal[:separator]
+ target_port = target_portal[separator + 1:]
+ #Adding target portal to iscsi initiator. Sending targets
+ portal = self._conn_storage.MSFT_iSCSITargetPortal
+ portal.New(TargetPortalAddress=target_address,
+ TargetPortalPortNumber=target_port)
+ #Connecting to the target
+ target = self._conn_storage.MSFT_iSCSITarget
+ target.Connect(NodeAddress=target_iqn,
+ IsPersistent=True)
+ #Waiting the disk to be mounted. Research this
+ time.sleep(CONF.hyperv_wait_between_attach_retry)
+
+ def logout_storage_target(self, target_iqn):
+ """Logs out storage target through its session id."""
+
+ target = self._conn_storage.MSFT_iSCSITarget(NodeAddress=target_iqn)[0]
+ if target.IsConnected:
+ session = self._conn_storage.MSFT_iSCSISession(
+ TargetNodeAddress=target_iqn)[0]
+ if session.IsPersistent:
+ session.Unregister()
+ target.Disconnect()
+
+ def execute_log_out(self, session_id):
+ session = self._conn_wmi.MSiSCSIInitiator_SessionClass(
+ SessionId=session_id)[0]
+ self.logout_storage_target(session.TargetName)
diff --git a/nova/virt/libvirt/blockinfo.py b/nova/virt/libvirt/blockinfo.py
new file mode 100644
index 000000000..fc4a7dbfb
--- /dev/null
+++ b/nova/virt/libvirt/blockinfo.py
@@ -0,0 +1,416 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (C) 2012-2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Handling of block device information and mapping.
+
+This module contains helper methods for intepreting the block
+device information and determining the suitable mapping to
+guest devices and libvirt XML.
+
+Throughout these methods there are a number of standard
+variables / types used
+
+ * 'mapping': a dict contains the storage device mapping.
+
+ For the default disk types it will contain the following
+ keys & values:
+
+ 'disk' -> disk_info
+ 'disk.rescue' -> disk_info
+ 'disk.local' -> disk_info
+ 'disk.swap' -> disk_info
+ 'disk.config' -> disk_info
+
+ If any of the default disks are overriden by the block
+ device info mappings, the hash value will be None
+
+ For any ephemeral device there will also be a dict entry
+
+ 'disk.eph$NUM' -> disk_info
+
+ For any volume device there will also be a dict entry:
+
+ $path -> disk_info
+
+ Finally a special key will refer to the root device:
+
+ 'root' -> disk_info
+
+
+ * 'disk_info': a tuple specifying disk configuration
+
+ It contains the following 3 fields
+
+ (disk bus, disk dev, device type)
+
+ * 'disk_bus': the guest bus type ('ide', 'virtio', 'scsi', etc)
+
+ * 'disk_dev': the device name 'vda', 'hdc', 'sdf', 'xvde' etc
+
+ * 'device_type': type of device eg 'disk', 'cdrom', 'floppy'
+
+"""
+
+from nova import block_device
+from nova import exception
+from nova.openstack.common import log as logging
+from nova.virt import configdrive
+from nova.virt import driver
+
+
+LOG = logging.getLogger(__name__)
+
+
+def has_disk_dev(mapping, disk_dev):
+ """Determine if a disk device name has already been used.
+
+ Looks at all the keys in mapping to see if any
+ corresponding disk_info tuple has a device name
+ matching disk_dev
+
+ Returns True if the disk_dev is in use."""
+
+ for disk in mapping:
+ info = mapping[disk]
+ if info['dev'] == disk_dev:
+ return True
+ return False
+
+
+def get_dev_prefix_for_disk_bus(disk_bus):
+ """Determine the dev prefix for a disk bus.
+
+ Determine the dev prefix to be combined
+ with a disk number to fix a disk_dev.
+ eg 'hd' for 'ide' bus can be used to
+ form a disk dev 'hda'
+
+ Returns the dev prefix or raises an
+ exception if the disk bus is unknown."""
+
+ if disk_bus == "ide":
+ return "hd"
+ elif disk_bus == "virtio":
+ return "vd"
+ elif disk_bus == "xen":
+ # Two possible mappings for Xen, xvda or sda
+ # which are interchangable, so we pick sda
+ return "sd"
+ elif disk_bus == "scsi":
+ return "sd"
+ elif disk_bus == "usb":
+ return "sd"
+ elif disk_bus == "uml":
+ return "ubd"
+ else:
+ raise exception.NovaException(
+ _("Unable to determine disk prefix for %s") %
+ disk_bus)
+
+
+def get_dev_count_for_disk_bus(disk_bus):
+ """Determine the number disks supported.
+
+ Determine how many disks can be supported in
+ a single VM for a particular disk bus.
+
+ Returns the number of disks supported."""
+
+ if disk_bus == "ide":
+ return 4
+ else:
+ return 26
+
+
+def find_disk_dev_for_disk_bus(mapping, bus, last_device=False):
+ """Identify a free disk dev name for a bus.
+
+ Determines the possible disk dev names for
+ the bus, and then checks them in order until
+ it identifies one that is not yet used in the
+ disk mapping. If 'last_device' is set, it will
+ only consider the last available disk dev name.
+
+ Returns the chosen disk_dev name, or raises an
+ exception if none is available.
+ """
+
+ dev_prefix = get_dev_prefix_for_disk_bus(bus)
+ max_dev = get_dev_count_for_disk_bus(bus)
+ if last_device:
+ devs = [max_dev - 1]
+ else:
+ devs = range(max_dev)
+
+ for idx in devs:
+ disk_dev = dev_prefix + chr(ord('a') + idx)
+ if not has_disk_dev(mapping, disk_dev):
+ return disk_dev
+
+ raise exception.NovaException(
+ _("No free disk device names for prefix '%s'"),
+ dev_prefix)
+
+
+def is_disk_bus_valid_for_virt(virt_type, disk_bus):
+ valid_bus = {
+ 'qemu': ['virtio', 'scsi', 'ide', 'usb'],
+ 'kvm': ['virtio', 'scsi', 'ide', 'usb'],
+ 'xen': ['xen', 'ide'],
+ 'uml': ['uml'],
+ }
+
+ if virt_type not in valid_bus:
+ raise exception.NovaException(
+ _("Unsupported virt type %s") % virt_type)
+
+ return disk_bus in valid_bus[virt_type]
+
+
+def get_disk_bus_for_device_type(virt_type,
+ image_meta=None,
+ device_type="disk"):
+ """Determine the best disk bus to use for a device type.
+
+ Considering the currently configured virtualization
+ type, return the optimal disk_bus to use for a given
+ device type. For example, for a disk on KVM it will
+ return 'virtio', while for a CDROM it will return 'ide'
+
+ Returns the disk_bus, or returns None if the device
+ type is not supported for this virtualization"""
+
+ # Prefer a disk bus set against the image first of all
+ if image_meta:
+ key = device_type + "_bus"
+ disk_bus = image_meta.get('properties', {}).get(key)
+ if disk_bus is not None:
+ if not is_disk_bus_valid_for_virt(virt_type, disk_bus):
+ raise exception.NovaException(
+ _("Disk bus %(disk_bus)s is not valid for %(virt)s") %
+ {'disk_bus': disk_bus, 'virt': virt_type})
+ return disk_bus
+
+ # Otherwise pick a hypervisor default disk bus
+ if virt_type == "uml":
+ if device_type == "disk":
+ return "uml"
+ elif virt_type == "xen":
+ if device_type == "cdrom":
+ return "ide"
+ elif device_type == "disk":
+ return "xen"
+ elif virt_type in ("qemu", "kvm"):
+ if device_type == "cdrom":
+ return "ide"
+ elif device_type == "disk":
+ return "virtio"
+
+ return None
+
+
+def get_disk_bus_for_disk_dev(virt_type, disk_dev):
+ """Determine the disk bus for a disk dev.
+
+ Given a disk devi like 'hda', 'sdf', 'xvdb', etc
+ guess what the most appropriate disk bus is for
+ the currently configured virtualization technology
+
+ Returns the disk bus, or raises an Exception if
+ the disk dev prefix is unknown."""
+
+ if disk_dev[:2] == 'hd':
+ return "ide"
+ elif disk_dev[:2] == 'sd':
+ # Reverse mapping 'sd' is not reliable
+ # there are many possible mappings. So
+ # this picks the most likely mappings
+ if virt_type == "xen":
+ return "xen"
+ else:
+ return "scsi"
+ elif disk_dev[:2] == 'vd':
+ return "virtio"
+ elif disk_dev[:3] == 'xvd':
+ return "xen"
+ elif disk_dev[:3] == 'ubd':
+ return "uml"
+ else:
+ raise exception.NovaException(
+ _("Unable to determine disk bus for '%s'") %
+ disk_dev[:1])
+
+
+def get_next_disk_info(mapping, disk_bus,
+ device_type='disk',
+ last_device=False):
+ """Determine the disk info for the next device on disk_bus.
+
+ Considering the disks already listed in the disk mapping,
+ determine the next available disk dev that can be assigned
+ for the disk bus.
+
+ Returns the disk_info for the next available disk."""
+
+ disk_dev = find_disk_dev_for_disk_bus(mapping,
+ disk_bus,
+ last_device)
+ return {'bus': disk_bus,
+ 'dev': disk_dev,
+ 'type': device_type}
+
+
+def get_eph_disk(ephemeral):
+ return 'disk.eph' + str(ephemeral['num'])
+
+
+def get_disk_mapping(virt_type, instance,
+ disk_bus, cdrom_bus,
+ block_device_info=None,
+ image_meta=None, rescue=False):
+ """Determine how to map default disks to the virtual machine.
+
+ This is about figuring out whether the default 'disk',
+ 'disk.local', 'disk.swap' and 'disk.config' images have
+ been overriden by the block device mapping.
+
+ Returns the guest disk mapping for the devices."""
+
+ inst_type = instance['instance_type']
+
+ mapping = {}
+
+ if virt_type == "lxc":
+ return mapping
+
+ if rescue:
+ rescue_info = get_next_disk_info(mapping,
+ disk_bus)
+ mapping['disk.rescue'] = rescue_info
+ mapping['root'] = rescue_info
+
+ os_info = get_next_disk_info(mapping,
+ disk_bus)
+ mapping['disk'] = os_info
+
+ return mapping
+
+ if image_meta and image_meta.get('disk_format') == 'iso':
+ root_disk_bus = cdrom_bus
+ root_device_type = 'cdrom'
+ else:
+ root_disk_bus = disk_bus
+ root_device_type = 'disk'
+
+ root_device_name = driver.block_device_info_get_root(block_device_info)
+ if root_device_name is not None:
+ root_device = block_device.strip_dev(root_device_name)
+ root_info = {'bus': get_disk_bus_for_disk_dev(virt_type,
+ root_device),
+ 'dev': root_device,
+ 'type': root_device_type}
+ else:
+ root_info = get_next_disk_info(mapping,
+ root_disk_bus,
+ root_device_type)
+ mapping['root'] = root_info
+ if not block_device.volume_in_mapping(root_info['dev'],
+ block_device_info):
+ mapping['disk'] = root_info
+
+ eph_info = get_next_disk_info(mapping,
+ disk_bus)
+ ephemeral_device = False
+ if not (block_device.volume_in_mapping(eph_info['dev'],
+ block_device_info) or
+ 0 in [eph['num'] for eph in
+ driver.block_device_info_get_ephemerals(
+ block_device_info)]):
+ if instance['ephemeral_gb'] > 0:
+ ephemeral_device = True
+
+ if ephemeral_device:
+ mapping['disk.local'] = eph_info
+
+ for eph in driver.block_device_info_get_ephemerals(
+ block_device_info):
+ disk_dev = block_device.strip_dev(eph['device_name'])
+ disk_bus = get_disk_bus_for_disk_dev(virt_type, disk_dev)
+
+ mapping[get_eph_disk(eph)] = {'bus': disk_bus,
+ 'dev': disk_dev,
+ 'type': 'disk'}
+
+ swap = driver.block_device_info_get_swap(block_device_info)
+ if driver.swap_is_usable(swap):
+ disk_dev = block_device.strip_dev(swap['device_name'])
+ disk_bus = get_disk_bus_for_disk_dev(virt_type, disk_dev)
+
+ mapping['disk.swap'] = {'bus': disk_bus,
+ 'dev': disk_dev,
+ 'type': 'disk'}
+ elif inst_type['swap'] > 0:
+ swap_info = get_next_disk_info(mapping,
+ disk_bus)
+ if not block_device.volume_in_mapping(swap_info['dev'],
+ block_device_info):
+ mapping['disk.swap'] = swap_info
+
+ block_device_mapping = driver.block_device_info_get_mapping(
+ block_device_info)
+
+ for vol in block_device_mapping:
+ disk_dev = vol['mount_device'].rpartition("/")[2]
+ disk_bus = get_disk_bus_for_disk_dev(virt_type, disk_dev)
+
+ mapping[vol['mount_device']] = {'bus': disk_bus,
+ 'dev': disk_dev,
+ 'type': 'disk'}
+
+ if configdrive.enabled_for(instance):
+ config_info = get_next_disk_info(mapping,
+ disk_bus,
+ last_device=True)
+ mapping['disk.config'] = config_info
+
+ return mapping
+
+
+def get_disk_info(virt_type, instance, block_device_info=None,
+ image_meta=None, rescue=False):
+ """Determine guest disk mapping info.
+
+ This is a wrapper around get_disk_mapping, which
+ also returns the chosen disk_bus and cdrom_bus.
+ The returned data is in a dict
+
+ - disk_bus: the bus for harddisks
+ - cdrom_bus: the bus for CDROMs
+ - mapping: the disk mapping
+
+ Returns the disk mapping disk."""
+
+ disk_bus = get_disk_bus_for_device_type(virt_type, image_meta, "disk")
+ cdrom_bus = get_disk_bus_for_device_type(virt_type, image_meta, "cdrom")
+ mapping = get_disk_mapping(virt_type, instance,
+ disk_bus, cdrom_bus,
+ block_device_info,
+ image_meta, rescue)
+
+ return {'disk_bus': disk_bus,
+ 'cdrom_bus': cdrom_bus,
+ 'mapping': mapping}
diff --git a/nova/virt/libvirt/config.py b/nova/virt/libvirt/config.py
index ed5b21c79..d6ef3fca9 100644
--- a/nova/virt/libvirt/config.py
+++ b/nova/virt/libvirt/config.py
@@ -549,6 +549,7 @@ class LibvirtConfigGuestInterface(LibvirtConfigGuestDevice):
self.mac_addr = None
self.script = None
self.source_dev = None
+ self.source_mode = "private"
self.vporttype = None
self.vportparams = []
self.filtername = None
@@ -571,7 +572,7 @@ class LibvirtConfigGuestInterface(LibvirtConfigGuestDevice):
dev.append(etree.Element("script", path=self.script))
elif self.net_type == "direct":
dev.append(etree.Element("source", dev=self.source_dev,
- mode="private"))
+ mode=self.source_mode))
else:
dev.append(etree.Element("source", bridge=self.source_dev))
diff --git a/nova/virt/libvirt/designer.py b/nova/virt/libvirt/designer.py
index b832db4fa..176eeef4c 100644
--- a/nova/virt/libvirt/designer.py
+++ b/nova/virt/libvirt/designer.py
@@ -70,6 +70,38 @@ def set_vif_host_backend_ovs_config(conf, brname, interfaceid, tapname=None):
conf.script = ""
+def set_vif_host_backend_802qbg_config(conf, devname, managerid,
+ typeid, typeidversion,
+ instanceid, tapname=None):
+ """Populate a LibvirtConfigGuestInterface instance
+ with host backend details for an 802.1qbg device"""
+
+ conf.net_type = "direct"
+ conf.source_dev = devname
+ conf.source_mode = "vepa"
+ conf.vporttype = "802.1Qbg"
+ conf.add_vport_param("managerid", managerid)
+ conf.add_vport_param("typeid", typeid)
+ conf.add_vport_param("typeidversion", typeidversion)
+ conf.add_vport_param("instanceid", instanceid)
+ if tapname:
+ conf.target_dev = tapname
+
+
+def set_vif_host_backend_802qbh_config(conf, devname, profileid,
+ tapname=None):
+ """Populate a LibvirtConfigGuestInterface instance
+ with host backend details for an 802.1qbh device"""
+
+ conf.net_type = "direct"
+ conf.source_dev = devname
+ conf.source_mode = "vepa"
+ conf.vporttype = "802.1Qbh"
+ conf.add_vport_param("profileid", profileid)
+ if tapname:
+ conf.target_dev = tapname
+
+
def set_vif_host_backend_filter_config(conf, name,
primary_addr,
dhcp_server=None,
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 46b63d1c6..7e34aef3d 100644..100755
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -47,6 +47,7 @@ import os
import shutil
import sys
import tempfile
+import time
import uuid
from eventlet import greenthread
@@ -75,6 +76,7 @@ from nova.virt import configdrive
from nova.virt.disk import api as disk
from nova.virt import driver
from nova.virt import firewall
+from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import firewall as libvirt_firewall
from nova.virt.libvirt import imagebackend
@@ -114,8 +116,8 @@ libvirt_opts = [
cfg.IntOpt('libvirt_inject_partition',
default=1,
help='The partition to inject to : '
- '-1 => inspect (libguestfs only), 0 => not partitioned, '
- '>0 => partition number'),
+ '-2 => disable, -1 => inspect (libguestfs only), '
+ '0 => not partitioned, >0 => partition number'),
cfg.BoolOpt('use_usb_tablet',
default=True,
help='Sync virtual and real mouse cursors in Windows VMs'),
@@ -140,7 +142,7 @@ libvirt_opts = [
'raw, qcow2, vmdk, vdi). '
'Defaults to same as source image'),
cfg.StrOpt('libvirt_vif_driver',
- default='nova.virt.libvirt.vif.LibvirtBridgeDriver',
+ default='nova.virt.libvirt.vif.LibvirtGenericVIFDriver',
help='The libvirt VIF driver to configure the VIFs.'),
cfg.ListOpt('libvirt_volume_drivers',
default=[
@@ -149,7 +151,7 @@ libvirt_opts = [
'fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver',
'rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
'sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
- 'nfs=nova.virt.libvirt.volume_nfs.NfsVolumeDriver'
+ 'nfs=nova.virt.libvirt.volume.LibvirtNFSVolumeDriver'
],
help='Libvirt handlers for remote volumes.'),
cfg.StrOpt('libvirt_disk_prefix',
@@ -254,10 +256,10 @@ MIN_LIBVIRT_VERSION = (0, 9, 6)
# When the above version matches/exceeds this version
# delete it & corresponding code using it
MIN_LIBVIRT_HOST_CPU_VERSION = (0, 9, 10)
-
-
-def _get_eph_disk(ephemeral):
- return 'disk.eph' + str(ephemeral['num'])
+# Live snapshot requirements
+REQ_HYPERVISOR_LIVESNAPSHOT = "QEMU"
+MIN_LIBVIRT_LIVESNAPSHOT_VERSION = (1, 0, 0)
+MIN_QEMU_LIVESNAPSHOT_VERSION = (1, 3, 0)
class LibvirtDriver(driver.ComputeDriver):
@@ -283,23 +285,15 @@ class LibvirtDriver(driver.ComputeDriver):
DEFAULT_FIREWALL_DRIVER,
self.virtapi,
get_connection=self._get_connection)
- self.vif_driver = importutils.import_object(CONF.libvirt_vif_driver)
+
+ vif_class = importutils.import_class(CONF.libvirt_vif_driver)
+ self.vif_driver = vif_class(self._get_connection)
self.volume_drivers = driver.driver_dict_from_config(
CONF.libvirt_volume_drivers, self)
self._host_state = None
- disk_prefix_map = {"lxc": "", "uml": "ubd", "xen": "sd"}
- if CONF.libvirt_disk_prefix:
- self._disk_prefix = CONF.libvirt_disk_prefix
- else:
- self._disk_prefix = disk_prefix_map.get(CONF.libvirt_type, 'vd')
- self.default_root_device = self._disk_prefix + 'a'
- self.default_second_device = self._disk_prefix + 'b'
- self.default_third_device = self._disk_prefix + 'c'
- self.default_last_device = self._disk_prefix + 'z'
-
self._disk_cachemode = None
self.image_cache_manager = imagecache.ImageCacheManager()
self.image_backend = imagebackend.Backend(CONF.use_cow_images)
@@ -325,16 +319,29 @@ class LibvirtDriver(driver.ComputeDriver):
self._host_state = HostState(self.virtapi, self.read_only)
return self._host_state
- def has_min_version(self, ver):
- libvirt_version = self._conn.getLibVersion()
-
+ def has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None):
def _munge_version(ver):
return ver[0] * 1000000 + ver[1] * 1000 + ver[2]
- if libvirt_version < _munge_version(ver):
- return False
+ try:
+ if lv_ver is not None:
+ libvirt_version = self._conn.getLibVersion()
+ if libvirt_version < _munge_version(lv_ver):
+ return False
- return True
+ if hv_ver is not None:
+ hypervisor_version = self._conn.getVersion()
+ if hypervisor_version < _munge_version(hv_ver):
+ return False
+
+ if hv_type is not None:
+ hypervisor_type = self._conn.getType()
+ if hypervisor_type != hv_type:
+ return False
+
+ return True
+ except Exception:
+ return False
def init_host(self, host):
if not self.has_min_version(MIN_LIBVIRT_VERSION):
@@ -581,10 +588,10 @@ class LibvirtDriver(driver.ComputeDriver):
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
- mount_device = vol['mount_device'].rpartition("/")[2]
+ disk_dev = vol['mount_device'].rpartition("/")[2]
self.volume_driver_method('disconnect_volume',
connection_info,
- mount_device)
+ disk_dev)
if destroy_disks:
target = libvirt_utils.get_instance_path(instance)
@@ -655,7 +662,7 @@ class LibvirtDriver(driver.ComputeDriver):
def volume_driver_method(self, method_name, connection_info,
*args, **kwargs):
driver_type = connection_info.get('driver_volume_type')
- if not driver_type in self.volume_drivers:
+ if driver_type not in self.volume_drivers:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
driver = self.volume_drivers[driver_type]
method = getattr(driver, method_name)
@@ -664,10 +671,16 @@ class LibvirtDriver(driver.ComputeDriver):
def attach_volume(self, connection_info, instance, mountpoint):
instance_name = instance['name']
virt_dom = self._lookup_by_name(instance_name)
- mount_device = mountpoint.rpartition("/")[2]
+ disk_dev = mountpoint.rpartition("/")[2]
+ disk_info = {
+ 'dev': disk_dev,
+ 'bus': blockinfo.get_disk_bus_for_disk_dev(CONF.libvirt_type,
+ disk_dev),
+ 'type': 'disk',
+ }
conf = self.volume_driver_method('connect_volume',
connection_info,
- mount_device)
+ disk_info)
try:
# NOTE(vish): We can always affect config because our
@@ -683,14 +696,14 @@ class LibvirtDriver(driver.ComputeDriver):
errcode = ex.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_FAILED:
self.volume_driver_method('disconnect_volume',
- connection_info,
- mount_device)
- raise exception.DeviceIsBusy(device=mount_device)
+ connection_info,
+ disk_dev)
+ raise exception.DeviceIsBusy(device=disk_dev)
with excutils.save_and_reraise_exception():
self.volume_driver_method('disconnect_volume',
- connection_info,
- mount_device)
+ connection_info,
+ disk_dev)
@staticmethod
def _get_disk_xml(xml, device):
@@ -712,18 +725,21 @@ class LibvirtDriver(driver.ComputeDriver):
virt_dom = self._lookup_by_name(instance['name'])
xml = virt_dom.XMLDesc(0)
except exception.InstanceNotFound:
- xml = self.to_xml(instance, network_info,
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance,
+ block_device_info)
+ xml = self.to_xml(instance, network_info, disk_info,
block_device_info=block_device_info)
return xml
def detach_volume(self, connection_info, instance, mountpoint):
instance_name = instance['name']
- mount_device = mountpoint.rpartition("/")[2]
+ disk_dev = mountpoint.rpartition("/")[2]
try:
virt_dom = self._lookup_by_name(instance_name)
- xml = self._get_disk_xml(virt_dom.XMLDesc(0), mount_device)
+ xml = self._get_disk_xml(virt_dom.XMLDesc(0), disk_dev)
if not xml:
- raise exception.DiskNotFound(location=mount_device)
+ raise exception.DiskNotFound(location=disk_dev)
else:
# NOTE(vish): We can always affect config because our
# domains are persistent, but we should only
@@ -746,7 +762,7 @@ class LibvirtDriver(driver.ComputeDriver):
self.volume_driver_method('disconnect_volume',
connection_info,
- mount_device)
+ disk_dev)
def snapshot(self, context, instance, image_href, update_task_state):
"""Create snapshot from a running VM instance.
@@ -806,35 +822,67 @@ class LibvirtDriver(driver.ComputeDriver):
(state, _max_mem, _mem, _cpus, _t) = virt_dom.info()
state = LIBVIRT_POWER_STATE[state]
+ # NOTE(rmk): Live snapshots require QEMU 1.3 and Libvirt 1.0.0.
+ # These restrictions can be relaxed as other configurations
+ # can be validated.
+ if self.has_min_version(MIN_LIBVIRT_LIVESNAPSHOT_VERSION,
+ MIN_QEMU_LIVESNAPSHOT_VERSION,
+ REQ_HYPERVISOR_LIVESNAPSHOT) \
+ and not source_format == "lvm":
+ live_snapshot = True
+ else:
+ live_snapshot = False
+
+ # NOTE(rmk): We cannot perform live snapshots when a managedSave
+ # file is present, so we will use the cold/legacy method
+ # for instances which are shutdown.
+ if state == power_state.SHUTDOWN:
+ live_snapshot = False
+
# NOTE(dkang): managedSave does not work for LXC
- if CONF.libvirt_type != 'lxc':
+ if CONF.libvirt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING or state == power_state.PAUSED:
virt_dom.managedSave(0)
- # Make the snapshot
- snapshot = self.image_backend.snapshot(disk_path, snapshot_name,
- image_type=source_format)
+ snapshot_backend = self.image_backend.snapshot(disk_path,
+ snapshot_name,
+ image_type=source_format)
- snapshot.create()
- update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
+ if live_snapshot:
+ LOG.info(_("Beginning live snapshot process"),
+ instance=instance)
+ else:
+ LOG.info(_("Beginning cold snapshot process"),
+ instance=instance)
+ snapshot_backend.snapshot_create()
- # Export the snapshot to a raw image
+ update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
snapshot_directory = CONF.libvirt_snapshots_directory
fileutils.ensure_tree(snapshot_directory)
with utils.tempdir(dir=snapshot_directory) as tmpdir:
try:
out_path = os.path.join(tmpdir, snapshot_name)
- snapshot.extract(out_path, image_format)
+ if live_snapshot:
+ # NOTE (rmk): libvirt needs to be able to write to the
+ # temp directory, which is owned nova.
+ utils.execute('chmod', '777', tmpdir, run_as_root=True)
+ self._live_snapshot(virt_dom, disk_path, out_path,
+ image_format)
+ else:
+ snapshot_backend.snapshot_extract(out_path, image_format)
finally:
- snapshot.delete()
+ if not live_snapshot:
+ snapshot_backend.snapshot_delete()
# NOTE(dkang): because previous managedSave is not called
# for LXC, _create_domain must not be called.
- if CONF.libvirt_type != 'lxc':
+ if CONF.libvirt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING:
self._create_domain(domain=virt_dom)
elif state == power_state.PAUSED:
self._create_domain(domain=virt_dom,
launch_flags=libvirt.VIR_DOMAIN_START_PAUSED)
+ LOG.info(_("Snapshot extracted, beginning image upload"),
+ instance=instance)
# Upload that image to the image service
@@ -845,8 +893,74 @@ class LibvirtDriver(driver.ComputeDriver):
image_href,
metadata,
image_file)
+ LOG.info(_("Snapshot image upload complete"),
+ instance=instance)
+
+ def _live_snapshot(self, domain, disk_path, out_path, image_format):
+ """Snapshot an instance without downtime."""
+ # Save a copy of the domain's running XML file
+ xml = domain.XMLDesc(0)
+
+ # Abort is an idempotent operation, so make sure any block
+ # jobs which may have failed are ended.
+ try:
+ domain.blockJobAbort(disk_path, 0)
+ except Exception:
+ pass
- def reboot(self, instance, network_info, reboot_type='SOFT',
+ def _wait_for_block_job(domain, disk_path):
+ status = domain.blockJobInfo(disk_path, 0)
+ try:
+ cur = status.get('cur', 0)
+ end = status.get('end', 0)
+ except Exception:
+ return False
+
+ if cur == end and cur != 0 and end != 0:
+ return False
+ else:
+ return True
+
+ # NOTE (rmk): We are using shallow rebases as a workaround to a bug
+ # in QEMU 1.3. In order to do this, we need to create
+ # a destination image with the original backing file
+ # and matching size of the instance root disk.
+ src_disk_size = libvirt_utils.get_disk_size(disk_path)
+ src_back_path = libvirt_utils.get_disk_backing_file(disk_path,
+ basename=False)
+ disk_delta = out_path + '.delta'
+ libvirt_utils.create_cow_image(src_back_path, disk_delta,
+ src_disk_size)
+
+ try:
+ # NOTE (rmk): blockRebase cannot be executed on persistent
+ # domains, so we need to temporarily undefine it.
+ # If any part of this block fails, the domain is
+ # re-defined regardless.
+ if domain.isPersistent():
+ domain.undefine()
+
+ # NOTE (rmk): Establish a temporary mirror of our root disk and
+ # issue an abort once we have a complete copy.
+ domain.blockRebase(disk_path, disk_delta, 0,
+ libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
+ libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT |
+ libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW)
+
+ while _wait_for_block_job(domain, disk_path):
+ time.sleep(0.5)
+
+ domain.blockJobAbort(disk_path, 0)
+ libvirt_utils.chown(disk_delta, os.getuid())
+ finally:
+ self._conn.defineXML(xml)
+
+ # Convert the delta (CoW) image with a backing file to a flat
+ # image with no backing file.
+ libvirt_utils.extract_snapshot(disk_delta, 'qcow2', None,
+ out_path, image_format)
+
+ def reboot(self, context, instance, network_info, reboot_type='SOFT',
block_device_info=None):
"""Reboot a virtual machine, given an instance reference."""
if reboot_type == 'SOFT':
@@ -858,7 +972,8 @@ class LibvirtDriver(driver.ComputeDriver):
else:
LOG.warn(_("Failed to soft reboot instance."),
instance=instance)
- return self._hard_reboot(instance, network_info, block_device_info)
+ return self._hard_reboot(context, instance, network_info,
+ block_device_info)
def _soft_reboot(self, instance):
"""Attempt to shutdown and restart the instance gracefully.
@@ -904,7 +1019,8 @@ class LibvirtDriver(driver.ComputeDriver):
greenthread.sleep(1)
return False
- def _hard_reboot(self, instance, network_info, block_device_info=None):
+ def _hard_reboot(self, context, instance, network_info,
+ block_device_info=None):
"""Reboot a virtual machine, given an instance reference.
Performs a Libvirt reset (if supported) on the domain.
@@ -918,9 +1034,19 @@ class LibvirtDriver(driver.ComputeDriver):
"""
self._destroy(instance)
- xml = self.to_xml(instance, network_info,
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance,
+ block_device_info)
+ xml = self.to_xml(instance, network_info, disk_info,
block_device_info=block_device_info,
write_to_disk=True)
+
+ # NOTE (rmk): Re-populate any missing backing files.
+ disk_info_json = self.get_instance_disk_info(instance['name'], xml)
+ self._create_images_and_backing(context, instance, disk_info_json)
+
+ # Initialize all the necessary networking, block devices and
+ # start the instance.
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
@@ -989,7 +1115,7 @@ class LibvirtDriver(driver.ComputeDriver):
# Instance is not up and could be in an unknown state.
# Be as absolute as possible about getting it back into
# a known and running state.
- self._hard_reboot(instance, network_info, block_device_info)
+ self._hard_reboot(context, instance, network_info, block_device_info)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
@@ -1011,9 +1137,16 @@ class LibvirtDriver(driver.ComputeDriver):
'kernel_id': CONF.rescue_kernel_id or instance['kernel_id'],
'ramdisk_id': CONF.rescue_ramdisk_id or instance['ramdisk_id'],
}
- xml = self.to_xml(instance, network_info, image_meta,
- rescue=rescue_images)
- self._create_image(context, instance, xml, '.rescue', rescue_images,
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance,
+ None,
+ image_meta,
+ rescue=True)
+ xml = self.to_xml(instance, network_info, disk_info,
+ image_meta, rescue=rescue_images)
+ self._create_image(context, instance, xml,
+ disk_info['mapping'],
+ '.rescue', rescue_images,
network_info=network_info,
admin_pass=rescue_password)
self._destroy(instance)
@@ -1049,14 +1182,20 @@ class LibvirtDriver(driver.ComputeDriver):
# for xenapi(tr3buchet)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
- xml = self.to_xml(instance, network_info, image_meta,
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance,
+ block_device_info,
+ image_meta)
+ xml = self.to_xml(instance, network_info,
+ disk_info, image_meta,
block_device_info=block_device_info)
if image_meta:
self._create_image(context, instance, xml,
- network_info=network_info,
- block_device_info=block_device_info,
- files=injected_files,
- admin_pass=admin_password)
+ disk_info['mapping'],
+ network_info=network_info,
+ block_device_info=block_device_info,
+ files=injected_files,
+ admin_pass=admin_password)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
LOG.debug(_("Instance is running"), instance=instance)
@@ -1259,7 +1398,8 @@ class LibvirtDriver(driver.ComputeDriver):
if os.path.exists(console_log):
libvirt_utils.chown(console_log, os.getuid())
- def _create_image(self, context, instance, libvirt_xml, suffix='',
+ def _create_image(self, context, instance, libvirt_xml,
+ disk_mapping, suffix='',
disk_images=None, network_info=None,
block_device_info=None, files=None, admin_pass=None):
if not suffix:
@@ -1319,8 +1459,7 @@ class LibvirtDriver(driver.ComputeDriver):
if size == 0 or suffix == '.rescue':
size = None
- if not self._volume_in_mapping(self.default_root_device,
- block_device_info):
+ if 'disk' in disk_mapping:
image('disk').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=root_fname,
@@ -1335,9 +1474,7 @@ class LibvirtDriver(driver.ComputeDriver):
os_type_with_default = 'default'
ephemeral_gb = instance['ephemeral_gb']
- if ephemeral_gb and not self._volume_in_mapping(
- self.default_second_device, block_device_info):
- swap_device = self.default_third_device
+ if 'disk.local' in disk_mapping:
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral0',
os_type=instance["os_type"])
@@ -1347,8 +1484,6 @@ class LibvirtDriver(driver.ComputeDriver):
filename=fname,
size=size,
ephemeral_size=ephemeral_gb)
- else:
- swap_device = self.default_second_device
for eph in driver.block_device_info_get_ephemerals(block_device_info):
fn = functools.partial(self._create_ephemeral,
@@ -1356,48 +1491,29 @@ class LibvirtDriver(driver.ComputeDriver):
os_type=instance["os_type"])
size = eph['size'] * 1024 * 1024 * 1024
fname = "ephemeral_%s_%s" % (eph['size'], os_type_with_default)
- image(_get_eph_disk(eph)).cache(fetch_func=fn,
- filename=fname,
- size=size,
- ephemeral_size=eph['size'])
-
- swap_mb = 0
-
- swap = driver.block_device_info_get_swap(block_device_info)
- if driver.swap_is_usable(swap):
- swap_mb = swap['swap_size']
- elif (inst_type['swap'] > 0 and
- not self._volume_in_mapping(swap_device, block_device_info)):
- swap_mb = inst_type['swap']
-
- if swap_mb > 0:
- size = swap_mb * 1024 * 1024
- image('disk.swap').cache(fetch_func=self._create_swap,
- filename="swap_%s" % swap_mb,
- size=size,
- swap_mb=swap_mb)
-
- # target partition for file injection
- target_partition = None
- if not instance['kernel_id']:
- target_partition = CONF.libvirt_inject_partition
- if target_partition == 0:
- target_partition = None
- if CONF.libvirt_type == 'lxc':
- target_partition = None
-
- if CONF.libvirt_inject_key and instance['key_data']:
- key = str(instance['key_data'])
- else:
- key = None
-
- # File injection
- metadata = instance.get('metadata')
-
- if not CONF.libvirt_inject_password:
- admin_pass = None
-
- net = netutils.get_injected_network_template(network_info)
+ image(blockinfo.get_eph_disk(eph)).cache(
+ fetch_func=fn,
+ filename=fname,
+ size=size,
+ ephemeral_size=eph['size'])
+
+ if 'disk.swap' in disk_mapping:
+ swap_mb = 0
+
+ swap = driver.block_device_info_get_swap(block_device_info)
+ if driver.swap_is_usable(swap):
+ swap_mb = swap['swap_size']
+ elif (inst_type['swap'] > 0 and
+ not block_device.volume_in_mapping(
+ swap['dev'], block_device_info)):
+ swap_mb = inst_type['swap']
+
+ if swap_mb > 0:
+ size = swap_mb * 1024 * 1024
+ image('disk.swap').cache(fetch_func=self._create_swap,
+ filename="swap_%s" % swap_mb,
+ size=size,
+ swap_mb=swap_mb)
# Config drive
if configdrive.required_by(instance):
@@ -1420,48 +1536,52 @@ class LibvirtDriver(driver.ComputeDriver):
e, instance=instance)
raise
- elif any((key, net, metadata, admin_pass, files)):
- # If we're not using config_drive, inject into root fs
- injection_path = image('disk').path
- img_id = instance['image_ref']
+ # File injection
+ elif CONF.libvirt_inject_partition != -2:
+ target_partition = None
+ if not instance['kernel_id']:
+ target_partition = CONF.libvirt_inject_partition
+ if target_partition == 0:
+ target_partition = None
+ if CONF.libvirt_type == 'lxc':
+ target_partition = None
- for inject in ('key', 'net', 'metadata', 'admin_pass', 'files'):
- if locals()[inject]:
- LOG.info(_('Injecting %(inject)s into image'
- ' %(img_id)s'), locals(), instance=instance)
- try:
- disk.inject_data(injection_path,
- key, net, metadata, admin_pass, files,
- partition=target_partition,
- use_cow=CONF.use_cow_images,
- mandatory=('files',))
- except Exception as e:
- LOG.error(_('Error injecting data into image '
- '%(img_id)s (%(e)s)') % locals(),
- instance=instance)
- raise
+ if CONF.libvirt_inject_key and instance['key_data']:
+ key = str(instance['key_data'])
+ else:
+ key = None
+
+ net = netutils.get_injected_network_template(network_info)
+
+ metadata = instance.get('metadata')
+
+ if not CONF.libvirt_inject_password:
+ admin_pass = None
+
+ if any((key, net, metadata, admin_pass, files)):
+ # If we're not using config_drive, inject into root fs
+ injection_path = image('disk').path
+ img_id = instance['image_ref']
+
+ for inj in ('key', 'net', 'metadata', 'admin_pass', 'files'):
+ if locals()[inj]:
+ LOG.info(_('Injecting %(inj)s into image '
+ '%(img_id)s'), locals(), instance=instance)
+ try:
+ disk.inject_data(injection_path,
+ key, net, metadata, admin_pass, files,
+ partition=target_partition,
+ use_cow=CONF.use_cow_images,
+ mandatory=('files',))
+ except Exception as e:
+ LOG.error(_('Error injecting data into image '
+ '%(img_id)s (%(e)s)') % locals(),
+ instance=instance)
+ raise
if CONF.libvirt_type == 'uml':
libvirt_utils.chown(image('disk').path, 'root')
- @staticmethod
- def _volume_in_mapping(mount_device, block_device_info):
- block_device_list = [block_device.strip_dev(vol['mount_device'])
- for vol in
- driver.block_device_info_get_mapping(
- block_device_info)]
- swap = driver.block_device_info_get_swap(block_device_info)
- if driver.swap_is_usable(swap):
- block_device_list.append(
- block_device.strip_dev(swap['device_name']))
- block_device_list += [block_device.strip_dev(ephemeral['device_name'])
- for ephemeral in
- driver.block_device_info_get_ephemerals(
- block_device_info)]
-
- LOG.debug(_("block_device_list %s"), block_device_list)
- return block_device.strip_dev(mount_device) in block_device_list
-
def get_host_capabilities(self):
"""Returns an instance of config.LibvirtConfigCaps representing
the capabilities of the host"""
@@ -1548,11 +1668,23 @@ class LibvirtDriver(driver.ComputeDriver):
return cpu
+ def get_guest_disk_config(self, instance, name, disk_mapping,
+ image_type=None):
+ image = self.image_backend.image(instance,
+ name,
+ image_type)
+ disk_info = disk_mapping[name]
+ return image.libvirt_info(disk_info['bus'],
+ disk_info['dev'],
+ disk_info['type'],
+ self.disk_cachemode)
+
def get_guest_storage_config(self, instance, image_meta,
+ disk_info,
rescue, block_device_info,
- inst_type,
- root_device_name, root_device):
+ inst_type):
devices = []
+ disk_mapping = disk_info['mapping']
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
@@ -1564,112 +1696,64 @@ class LibvirtDriver(driver.ComputeDriver):
libvirt_utils.get_instance_path(instance), 'rootfs')
devices.append(fs)
else:
- if image_meta and image_meta.get('disk_format') == 'iso':
- root_device_type = 'cdrom'
- root_device = 'hda'
- else:
- root_device_type = 'disk'
-
- if CONF.libvirt_type == "uml":
- default_disk_bus = "uml"
- elif CONF.libvirt_type == "xen":
- default_disk_bus = "xen"
- else:
- default_disk_bus = "virtio"
-
- def disk_info(name, disk_dev, disk_bus=default_disk_bus,
- device_type="disk"):
- image = self.image_backend.image(instance, name)
- return image.libvirt_info(disk_bus,
- disk_dev,
- device_type,
- self.disk_cachemode)
if rescue:
- diskrescue = disk_info('disk.rescue',
- self.default_root_device,
- device_type=root_device_type)
+ diskrescue = self.get_guest_disk_config(instance,
+ 'disk.rescue',
+ disk_mapping)
devices.append(diskrescue)
- diskos = disk_info('disk',
- self.default_second_device)
+ diskos = self.get_guest_disk_config(instance,
+ 'disk',
+ disk_mapping)
devices.append(diskos)
else:
- ebs_root = self._volume_in_mapping(self.default_root_device,
- block_device_info)
-
- if not ebs_root:
- if root_device_type == "cdrom":
- bus = "ide"
- else:
- bus = default_disk_bus
- diskos = disk_info('disk',
- root_device,
- bus,
- root_device_type)
+ if 'disk' in disk_mapping:
+ diskos = self.get_guest_disk_config(instance,
+ 'disk',
+ disk_mapping)
devices.append(diskos)
- ephemeral_device = None
- if not (self._volume_in_mapping(self.default_second_device,
- block_device_info) or
- 0 in [eph['num'] for eph in
- driver.block_device_info_get_ephemerals(
- block_device_info)]):
- if instance['ephemeral_gb'] > 0:
- ephemeral_device = self.default_second_device
-
- if ephemeral_device is not None:
- disklocal = disk_info('disk.local', ephemeral_device)
+ if 'disk.local' in disk_mapping:
+ disklocal = self.get_guest_disk_config(instance,
+ 'disk.local',
+ disk_mapping)
devices.append(disklocal)
-
- if ephemeral_device is not None:
- swap_device = self.default_third_device
self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'default_ephemeral_device':
- '/dev/' + self.default_second_device})
- else:
- swap_device = self.default_second_device
+ '/dev/' + disklocal.target_dev})
for eph in driver.block_device_info_get_ephemerals(
block_device_info):
- diskeph = disk_info(_get_eph_disk(eph),
- block_device.strip_dev(
- eph['device_name']))
+ diskeph = self.get_guest_disk_config(
+ instance,
+ blockinfo.get_eph_disk(eph),
+ disk_mapping)
devices.append(diskeph)
- swap = driver.block_device_info_get_swap(block_device_info)
- if driver.swap_is_usable(swap):
- diskswap = disk_info('disk.swap',
- block_device.strip_dev(
- swap['device_name']))
- devices.append(diskswap)
- elif (inst_type['swap'] > 0 and
- not self._volume_in_mapping(swap_device,
- block_device_info)):
- diskswap = disk_info('disk.swap', swap_device)
+ if 'disk.swap' in disk_mapping:
+ diskswap = self.get_guest_disk_config(instance,
+ 'disk.swap',
+ disk_mapping)
devices.append(diskswap)
self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
- {'default_swap_device': '/dev/' + swap_device})
+ {'default_swap_device': '/dev/' + diskswap.target_dev})
for vol in block_device_mapping:
connection_info = vol['connection_info']
- mount_device = vol['mount_device'].rpartition("/")[2]
+ info = disk_mapping[vol['mount_device']]
cfg = self.volume_driver_method('connect_volume',
connection_info,
- mount_device)
+ info)
devices.append(cfg)
- if configdrive.enabled_for(instance):
- diskconfig = vconfig.LibvirtConfigGuestDisk()
- diskconfig.source_type = "file"
- diskconfig.driver_format = "raw"
- diskconfig.driver_cache = self.disk_cachemode
- diskconfig.source_path = os.path.join(
- libvirt_utils.get_instance_path(instance), "disk.config")
- diskconfig.target_dev = self.default_last_device
- diskconfig.target_bus = default_disk_bus
+ if 'disk.config' in disk_mapping:
+ diskconfig = self.get_guest_disk_config(instance,
+ 'disk.config',
+ disk_mapping,
+ 'raw')
devices.append(diskconfig)
return devices
@@ -1686,8 +1770,8 @@ class LibvirtDriver(driver.ComputeDriver):
return sysinfo
- def get_guest_config(self, instance, network_info, image_meta, rescue=None,
- block_device_info=None):
+ def get_guest_config(self, instance, network_info, image_meta,
+ disk_info, rescue=None, block_device_info=None):
"""Get config data for parameters.
:param rescue: optional dictionary that should contain the key
@@ -1696,6 +1780,7 @@ class LibvirtDriver(driver.ComputeDriver):
"""
inst_type = instance['instance_type']
inst_path = libvirt_utils.get_instance_path(instance)
+ disk_mapping = disk_info['mapping']
guest = vconfig.LibvirtConfigGuest()
guest.virt_type = CONF.libvirt_type
@@ -1706,16 +1791,17 @@ class LibvirtDriver(driver.ComputeDriver):
guest.cpu = self.get_guest_cpu_config()
- root_device_name = driver.block_device_info_get_root(block_device_info)
- if root_device_name:
- root_device = block_device.strip_dev(root_device_name)
+ if 'root' in disk_mapping:
+ root_device_name = "/dev/" + disk_mapping['root']['dev']
else:
+ root_device_name = None
+
+ if root_device_name:
# NOTE(yamahata):
# for nova.api.ec2.cloud.CloudController.get_metadata()
- root_device = self.default_root_device
self.virtapi.instance_update(
nova_context.get_admin_context(), instance['uuid'],
- {'root_device_name': '/dev/' + self.default_root_device})
+ {'root_device_name': root_device_name})
guest.os_type = vm_mode.get_from_instance(instance)
@@ -1745,10 +1831,10 @@ class LibvirtDriver(driver.ComputeDriver):
elif CONF.libvirt_type == "uml":
guest.os_type = vm_mode.UML
guest.os_kernel = "/usr/bin/linux"
- guest.os_root = root_device_name or "/dev/ubda"
+ guest.os_root = root_device_name
else:
if CONF.libvirt_type == "xen" and guest.os_type == vm_mode.XEN:
- guest.os_root = root_device_name or "/dev/xvda"
+ guest.os_root = root_device_name
else:
guest.os_type = vm_mode.HVM
@@ -1759,7 +1845,7 @@ class LibvirtDriver(driver.ComputeDriver):
guest.os_cmdline = "ro"
else:
guest.os_cmdline = ("root=%s console=ttyS0" %
- (root_device_name or "/dev/vda",))
+ root_device_name)
if rescue.get('ramdisk_id'):
guest.os_initrd = os.path.join(inst_path, "ramdisk.rescue")
@@ -1769,7 +1855,7 @@ class LibvirtDriver(driver.ComputeDriver):
guest.os_cmdline = "ro"
else:
guest.os_cmdline = ("root=%s console=ttyS0" %
- (root_device_name or "/dev/vda",))
+ root_device_name)
if instance['ramdisk_id']:
guest.os_initrd = os.path.join(inst_path, "ramdisk")
else:
@@ -1799,17 +1885,15 @@ class LibvirtDriver(driver.ComputeDriver):
for cfg in self.get_guest_storage_config(instance,
image_meta,
+ disk_info,
rescue,
block_device_info,
- inst_type,
- root_device_name,
- root_device):
+ inst_type):
guest.add_device(cfg)
for (network, mapping) in network_info:
- self.vif_driver.plug(instance, (network, mapping))
cfg = self.vif_driver.get_config(instance,
- network, mapping)
+ network, mapping)
guest.add_device(cfg)
if CONF.libvirt_type == "qemu" or CONF.libvirt_type == "kvm":
@@ -1875,11 +1959,17 @@ class LibvirtDriver(driver.ComputeDriver):
return guest
- def to_xml(self, instance, network_info, image_meta=None, rescue=None,
+ def to_xml(self, instance, network_info, disk_info,
+ image_meta=None, rescue=None,
block_device_info=None, write_to_disk=False):
- LOG.debug(_('Starting toXML method'), instance=instance)
+ LOG.debug(_("Start to_xml instance=%(instance)s "
+ "network_info=%(network_info)s "
+ "disk_info=%(disk_info)s "
+ "image_meta=%(image_meta)s rescue=%(rescue)s"
+ "block_device_info=%(block_device_info)s") %
+ locals())
conf = self.get_guest_config(instance, network_info, image_meta,
- rescue, block_device_info)
+ disk_info, rescue, block_device_info)
xml = conf.to_xml()
if write_to_disk:
@@ -1888,7 +1978,7 @@ class LibvirtDriver(driver.ComputeDriver):
xml_path = os.path.join(instance_dir, 'libvirt.xml')
libvirt_utils.write_to_file(xml_path, xml)
- LOG.debug(_('Finished toXML method'), instance=instance)
+ LOG.debug(_('End to_xml instance=%(instance)s xml=%(xml)s') % locals())
return xml
def _lookup_by_name(self, instance_name):
@@ -1956,8 +2046,12 @@ class LibvirtDriver(driver.ComputeDriver):
# namespace and so there is no need to keep the container rootfs
# mounted in the host namespace
if CONF.libvirt_type == 'lxc':
+ state = self.get_info(instance)['state']
container_dir = os.path.join(inst_path, 'rootfs')
- disk.teardown_container(container_dir=container_dir)
+ if state == power_state.RUNNING:
+ disk.clean_lxc_namespace(container_dir=container_dir)
+ else:
+ disk.teardown_container(container_dir=container_dir)
return domain
@@ -1970,10 +2064,16 @@ class LibvirtDriver(driver.ComputeDriver):
for vol in block_device_mapping:
connection_info = vol['connection_info']
- mount_device = vol['mount_device'].rpartition("/")[2]
+ disk_dev = vol['mount_device'].rpartition("/")[2]
+ disk_info = {
+ 'dev': disk_dev,
+ 'bus': blockinfo.get_disk_bus_for_disk_dev(CONF.libvirt_type,
+ disk_dev),
+ 'type': 'disk',
+ }
self.volume_driver_method('connect_volume',
connection_info,
- mount_device)
+ disk_info)
self.plug_vifs(instance, network_info)
self.firewall_driver.setup_basic_filtering(instance, network_info)
@@ -2102,18 +2202,15 @@ class LibvirtDriver(driver.ComputeDriver):
"""
total = 0
+ if CONF.libvirt_type == 'lxc':
+ return total + 1
+
dom_ids = self.list_instance_ids()
for dom_id in dom_ids:
try:
dom = self._conn.lookupByID(dom_id)
vcpus = dom.vcpus()
- if vcpus is None:
- # dom.vcpus is not implemented for lxc, but returning 0 for
- # a used count is hardly useful for something measuring
- # usage
- total += 1
- else:
- total += len(vcpus[1])
+ total += len(vcpus[1])
except libvirt.libvirtError as err:
if err.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
LOG.debug(_("List of domains returned by libVirt: %s")
@@ -2715,10 +2812,16 @@ class LibvirtDriver(driver.ComputeDriver):
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
- mount_device = vol['mount_device'].rpartition("/")[2]
+ disk_dev = vol['mount_device'].rpartition("/")[2]
+ disk_info = {
+ 'dev': disk_dev,
+ 'bus': blockinfo.get_disk_bus_for_disk_dev(CONF.libvirt_type,
+ disk_dev),
+ 'type': 'disk',
+ }
self.volume_driver_method('connect_volume',
connection_info,
- mount_device)
+ disk_info)
# We call plug_vifs before the compute manager calls
# ensure_filtering_rules_for_instance, to ensure bridge is set up
@@ -2739,8 +2842,17 @@ class LibvirtDriver(driver.ComputeDriver):
greenthread.sleep(1)
def pre_block_migration(self, ctxt, instance, disk_info_json):
- """Preparation block migration.
+ """Preparation for block migration."""
+ # NOTE (rmk): When preparing for a block migration, the instance dir
+ # should not exist on the destination hypervisor.
+ instance_dir = libvirt_utils.get_instance_path(instance)
+ if os.path.exists(instance_dir):
+ raise exception.DestinationDiskExists(path=instance_dir)
+ os.mkdir(instance_dir)
+ self._create_images_and_backing(ctxt, instance, disk_info_json)
+ def _create_images_and_backing(self, ctxt, instance, disk_info_json):
+ """
:params ctxt: security context
:params instance:
nova.db.sqlalchemy.models.Instance object
@@ -2750,19 +2862,14 @@ class LibvirtDriver(driver.ComputeDriver):
"""
disk_info = jsonutils.loads(disk_info_json)
-
- # make instance directory
instance_dir = libvirt_utils.get_instance_path(instance)
- if os.path.exists(instance_dir):
- raise exception.DestinationDiskExists(path=instance_dir)
- os.mkdir(instance_dir)
for info in disk_info:
base = os.path.basename(info['path'])
# Get image type and create empty disk image, and
# create backing file in case of qcow2.
instance_disk = os.path.join(instance_dir, base)
- if not info['backing_file']:
+ if not info['backing_file'] and not os.path.exists(instance_disk):
libvirt_utils.create_image(info['type'], instance_disk,
info['disk_size'])
else:
@@ -2805,18 +2912,19 @@ class LibvirtDriver(driver.ComputeDriver):
if instance_ref["name"] not in dom_list:
# In case of block migration, destination does not have
# libvirt.xml
- self.to_xml(instance_ref, network_info, block_device_info,
- write_to_disk=True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance_ref)
+ self.to_xml(instance_ref, network_info, disk_info,
+ block_device_info, write_to_disk=True)
# libvirt.xml should be made by to_xml(), but libvirt
# does not accept to_xml() result, since uuid is not
# included in to_xml() result.
dom = self._lookup_by_name(instance_ref["name"])
self._conn.defineXML(dom.XMLDesc(0))
- def get_instance_disk_info(self, instance_name):
+ def get_instance_disk_info(self, instance_name, xml=None):
"""Preparation block migration.
- :params ctxt: security context
:params instance_ref:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
@@ -2829,18 +2937,22 @@ class LibvirtDriver(driver.ComputeDriver):
'disk_size':'83886080'},...]"
"""
- disk_info = []
+ # NOTE (rmk): Passing the domain XML into this function is optional.
+ # When it is not passed, we attempt to extract it from
+ # the pre-existing definition.
+ if xml is None:
+ try:
+ virt_dom = self._lookup_by_name(instance_name)
+ xml = virt_dom.XMLDesc(0)
+ except libvirt.libvirtError as ex:
+ error_code = ex.get_error_code()
+ msg = _("Error from libvirt while getting description of "
+ "%(instance_name)s: [Error Code %(error_code)s] "
+ "%(ex)s") % locals()
+ LOG.warn(msg)
+ raise exception.InstanceNotFound(instance_id=instance_name)
- virt_dom = self._lookup_by_name(instance_name)
- try:
- xml = virt_dom.XMLDesc(0)
- except libvirt.libvirtError as ex:
- error_code = ex.get_error_code()
- msg = _("Error from libvirt while getting description of "
- "%(instance_name)s: [Error Code %(error_code)s] "
- "%(ex)s") % locals()
- LOG.warn(msg)
- raise exception.InstanceNotFound(instance_id=instance_name)
+ disk_info = []
doc = etree.fromstring(xml)
disk_nodes = doc.findall('.//devices/disk')
path_nodes = doc.findall('.//devices/disk/source')
@@ -2959,10 +3071,10 @@ class LibvirtDriver(driver.ComputeDriver):
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
- mount_device = vol['mount_device'].rpartition("/")[2]
+ disk_dev = vol['mount_device'].rpartition("/")[2]
self.volume_driver_method('disconnect_volume',
connection_info,
- mount_device)
+ disk_dev)
# copy disks to destination
# rename instance dir to +_resize at first for using
@@ -3049,13 +3161,18 @@ class LibvirtDriver(driver.ComputeDriver):
'-O', 'qcow2', info['path'], path_qcow)
utils.execute('mv', path_qcow, info['path'])
- xml = self.to_xml(instance, network_info,
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance,
+ block_device_info,
+ image_meta)
+ xml = self.to_xml(instance, network_info, disk_info,
block_device_info=block_device_info)
# assume _create_image do nothing if a target file exists.
# TODO(oda): injecting files is not necessary
self._create_image(context, instance, xml,
- network_info=network_info,
- block_device_info=None)
+ disk_mapping=disk_info['mapping'],
+ network_info=network_info,
+ block_device_info=None)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
timer = utils.FixedIntervalLoopingCall(self._wait_for_running,
@@ -3071,7 +3188,10 @@ class LibvirtDriver(driver.ComputeDriver):
inst_base_resize = inst_base + "_resize"
utils.execute('mv', inst_base_resize, inst_base)
- xml = self.to_xml(instance, network_info,
+ disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
+ instance,
+ block_device_info)
+ xml = self.to_xml(instance, network_info, disk_info,
block_device_info=block_device_info)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py
index 3323b8f1d..c9a1b1fbb 100644
--- a/nova/virt/libvirt/firewall.py
+++ b/nova/virt/libvirt/firewall.py
@@ -29,11 +29,7 @@ LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('use_ipv6', 'nova.netconf')
-try:
- import libvirt
-except ImportError:
- LOG.warn(_("Libvirt module could not be loaded. NWFilterFirewall will "
- "not work correctly."))
+libvirt = None
class NWFilterFirewall(base_firewall.FirewallDriver):
@@ -47,6 +43,13 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
def __init__(self, virtapi, get_connection, **kwargs):
super(NWFilterFirewall, self).__init__(virtapi)
+ global libvirt
+ if libvirt is None:
+ try:
+ libvirt = __import__('libvirt')
+ except ImportError:
+ LOG.warn(_("Libvirt module could not be loaded. "
+ "NWFilterFirewall will not work correctly."))
self._libvirt_get_connection = get_connection
self.static_filters_configured = False
self.handle_security_groups = False
@@ -117,18 +120,31 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
if mapping['dhcp_server']:
allow_dhcp = True
break
+
+ base_filter = self.get_base_filter_list(instance, allow_dhcp)
+
+ for (network, mapping) in network_info:
+ nic_id = mapping['mac'].replace(':', '')
+ instance_filter_name = self._instance_filter_name(instance, nic_id)
+ self._define_filter(self._filter_container(instance_filter_name,
+ base_filter))
+
+ def get_base_filter_list(self, instance, allow_dhcp):
+ """
+ Obtain a list of base filters to apply to an instance.
+ The return value should be a list of strings, each
+ specifying a filter name. Subclasses can override this
+ function to add additional filters as needed. Additional
+ filters added to the list must also be correctly defined
+ within the subclass.
+ """
if pipelib.is_vpn_image(instance['image_ref']):
base_filter = 'nova-vpn'
elif allow_dhcp:
base_filter = 'nova-base'
else:
base_filter = 'nova-nodhcp'
-
- for (network, mapping) in network_info:
- nic_id = mapping['mac'].replace(':', '')
- instance_filter_name = self._instance_filter_name(instance, nic_id)
- self._define_filter(self._filter_container(instance_filter_name,
- [base_filter]))
+ return [base_filter]
def _ensure_static_filters(self):
"""Static filters are filters that have no need to be IP aware.
diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py
index 0815c142f..74148a866 100644..100755
--- a/nova/virt/libvirt/imagebackend.py
+++ b/nova/virt/libvirt/imagebackend.py
@@ -25,8 +25,8 @@ from nova.openstack.common import fileutils
from nova.openstack.common import lockutils
from nova import utils
from nova.virt.disk import api as disk
+from nova.virt import images
from nova.virt.libvirt import config as vconfig
-from nova.virt.libvirt import snapshots
from nova.virt.libvirt import utils as libvirt_utils
__imagebackend_opts = [
@@ -120,31 +120,34 @@ class Image(object):
if not os.path.exists(target):
fetch_func(target=target, *args, **kwargs)
- if not os.path.exists(self.path):
- base_dir = os.path.join(CONF.instances_path, CONF.base_dir_name)
- if not os.path.exists(base_dir):
- fileutils.ensure_tree(base_dir)
- base = os.path.join(base_dir, filename)
+ base_dir = os.path.join(CONF.instances_path, CONF.base_dir_name)
+ if not os.path.exists(base_dir):
+ fileutils.ensure_tree(base_dir)
+ base = os.path.join(base_dir, filename)
+ if not os.path.exists(self.path) or not os.path.exists(base):
self.create_image(call_if_not_exists, base, size,
*args, **kwargs)
- @abc.abstractmethod
- def snapshot(self, name):
- """Create snapshot object for this image
+ def snapshot_create(self):
+ raise NotImplementedError
- :name: snapshot name
- """
- pass
+ def snapshot_extract(self, target, out_format):
+ raise NotImplementedError
+
+ def snapshot_delete(self):
+ raise NotImplementedError
class Raw(Image):
- def __init__(self, instance=None, name=None, path=None):
+ def __init__(self, instance=None, disk_name=None, path=None,
+ snapshot_name=None):
super(Raw, self).__init__("file", "raw", is_block_dev=False)
self.path = (path or
os.path.join(libvirt_utils.get_instance_path(instance),
- name))
+ disk_name))
+ self.snapshot_name = snapshot_name
def create_image(self, prepare_template, base, size, *args, **kwargs):
@lockutils.synchronized(base, 'nova-', external=True,
@@ -160,20 +163,29 @@ class Raw(Image):
prepare_template(target=self.path, *args, **kwargs)
else:
prepare_template(target=base, *args, **kwargs)
- with utils.remove_path_on_error(self.path):
- copy_raw_image(base, self.path, size)
+ if not os.path.exists(self.path):
+ with utils.remove_path_on_error(self.path):
+ copy_raw_image(base, self.path, size)
+
+ def snapshot_create(self):
+ pass
- def snapshot(self, name):
- return snapshots.RawSnapshot(self.path, name)
+ def snapshot_extract(self, target, out_format):
+ images.convert_image(self.path, target, out_format)
+
+ def snapshot_delete(self):
+ pass
class Qcow2(Image):
- def __init__(self, instance=None, name=None, path=None):
+ def __init__(self, instance=None, disk_name=None, path=None,
+ snapshot_name=None):
super(Qcow2, self).__init__("file", "qcow2", is_block_dev=False)
self.path = (path or
os.path.join(libvirt_utils.get_instance_path(instance),
- name))
+ disk_name))
+ self.snapshot_name = snapshot_name
def create_image(self, prepare_template, base, size, *args, **kwargs):
@lockutils.synchronized(base, 'nova-', external=True,
@@ -183,12 +195,22 @@ class Qcow2(Image):
if size:
disk.extend(target, size)
- prepare_template(target=base, *args, **kwargs)
- with utils.remove_path_on_error(self.path):
- copy_qcow2_image(base, self.path, size)
+ if not os.path.exists(base):
+ prepare_template(target=base, *args, **kwargs)
+ if not os.path.exists(self.path):
+ with utils.remove_path_on_error(self.path):
+ copy_qcow2_image(base, self.path, size)
+
+ def snapshot_create(self):
+ libvirt_utils.create_snapshot(self.path, self.snapshot_name)
- def snapshot(self, name):
- return snapshots.Qcow2Snapshot(self.path, name)
+ def snapshot_extract(self, target, out_format):
+ libvirt_utils.extract_snapshot(self.path, 'qcow2',
+ self.snapshot_name, target,
+ out_format)
+
+ def snapshot_delete(self):
+ libvirt_utils.delete_snapshot(self.path, self.snapshot_name)
class Lvm(Image):
@@ -196,7 +218,8 @@ class Lvm(Image):
def escape(filename):
return filename.replace('_', '__')
- def __init__(self, instance=None, name=None, path=None):
+ def __init__(self, instance=None, disk_name=None, path=None,
+ snapshot_name=None):
super(Lvm, self).__init__("block", "raw", is_block_dev=True)
if path:
@@ -211,7 +234,7 @@ class Lvm(Image):
' flag to use LVM images.'))
self.vg = CONF.libvirt_images_volume_group
self.lv = '%s_%s' % (self.escape(instance['name']),
- self.escape(name))
+ self.escape(disk_name))
self.path = os.path.join('/dev', self.vg, self.lv)
self.sparse = CONF.libvirt_sparse_logical_volumes
@@ -251,9 +274,6 @@ class Lvm(Image):
with excutils.save_and_reraise_exception():
libvirt_utils.remove_logical_volumes(path)
- def snapshot(self, name):
- return snapshots.LvmSnapshot(self.path, name)
-
class Backend(object):
def __init__(self, use_cow):
@@ -272,7 +292,7 @@ class Backend(object):
raise RuntimeError(_('Unknown image_type=%s') % image_type)
return image
- def image(self, instance, name, image_type=None):
+ def image(self, instance, disk_name, image_type=None):
"""Constructs image for selected backend
:instance: Instance name.
@@ -281,9 +301,9 @@ class Backend(object):
Optional, is CONF.libvirt_images_type by default.
"""
backend = self.backend(image_type)
- return backend(instance=instance, name=name)
+ return backend(instance=instance, disk_name=disk_name)
- def snapshot(self, path, snapshot_name, image_type=None):
+ def snapshot(self, disk_path, snapshot_name, image_type=None):
"""Returns snapshot for given image
:path: path to image
@@ -291,4 +311,4 @@ class Backend(object):
:image_type: type of image
"""
backend = self.backend(image_type)
- return backend(path=path).snapshot(snapshot_name)
+ return backend(path=disk_path, snapshot_name=snapshot_name)
diff --git a/nova/virt/libvirt/imagecache.py b/nova/virt/libvirt/imagecache.py
index 8f677b482..ea7bded95 100644
--- a/nova/virt/libvirt/imagecache.py
+++ b/nova/virt/libvirt/imagecache.py
@@ -305,7 +305,7 @@ class ImageCacheManager(object):
backing_path = os.path.join(CONF.instances_path,
CONF.base_dir_name,
backing_file)
- if not backing_path in inuse_images:
+ if backing_path not in inuse_images:
inuse_images.append(backing_path)
if backing_path in self.unexplained_images:
@@ -464,7 +464,7 @@ class ImageCacheManager(object):
# _verify_checksum returns True if the checksum is ok, and None if
# there is no checksum file
checksum_result = self._verify_checksum(img_id, base_file)
- if not checksum_result is None:
+ if checksum_result is not None:
image_bad = not checksum_result
# Give other threads a chance to run
@@ -555,7 +555,7 @@ class ImageCacheManager(object):
# Elements remaining in unexplained_images might be in use
inuse_backing_images = self._list_backing_images()
for backing_path in inuse_backing_images:
- if not backing_path in self.active_base_files:
+ if backing_path not in self.active_base_files:
self.active_base_files.append(backing_path)
# Anything left is an unknown base image
diff --git a/nova/virt/libvirt/snapshots.py b/nova/virt/libvirt/snapshots.py
deleted file mode 100644
index c85550eae..000000000
--- a/nova/virt/libvirt/snapshots.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 Grid Dynamics
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import abc
-
-from nova.virt import images
-from nova.virt.libvirt import utils as libvirt_utils
-
-
-class Snapshot(object):
- @abc.abstractmethod
- def create(self):
- """Create new snapshot."""
- pass
-
- @abc.abstractmethod
- def extract(self, target, out_format):
- """Extract snapshot content to file
-
- :target: path to extraction
- :out_format: format of extraction (raw, qcow2, ...)
- """
- pass
-
- @abc.abstractmethod
- def delete(self):
- """Delete snapshot."""
- pass
-
-
-class RawSnapshot(object):
- def __init__(self, path, name):
- self.path = path
- self.name = name
-
- def create(self):
- pass
-
- def extract(self, target, out_format):
- images.convert_image(self.path, target, out_format)
-
- def delete(self):
- pass
-
-
-class Qcow2Snapshot(object):
- def __init__(self, path, name):
- self.path = path
- self.name = name
-
- def create(self):
- libvirt_utils.create_snapshot(self.path, self.name)
-
- def extract(self, target, out_format):
- libvirt_utils.extract_snapshot(self.path, 'qcow2',
- self.name, target,
- out_format)
-
- def delete(self):
- libvirt_utils.delete_snapshot(self.path, self.name)
-
-
-class LvmSnapshot(object):
- def __init__(self, path, name):
- self.path = path
- self.name = name
-
- def create(self):
- raise NotImplementedError(_("LVM snapshots not implemented"))
-
- def extract(self, target, out_format):
- raise NotImplementedError(_("LVM snapshots not implemented"))
-
- def delete(self):
- raise NotImplementedError(_("LVM snapshots not implemented"))
diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py
index 4b3517da7..b8e0cafec 100644..100755
--- a/nova/virt/libvirt/utils.py
+++ b/nova/virt/libvirt/utils.py
@@ -29,7 +29,15 @@ from nova.openstack.common import log as logging
from nova import utils
from nova.virt import images
+libvirt_opts = [
+ cfg.BoolOpt('libvirt_snapshot_compression',
+ default=False,
+ help='Compress snapshot images when possible. This '
+ 'currently applies exclusively to qcow2 images'),
+ ]
+
CONF = cfg.CONF
+CONF.register_opts(libvirt_opts)
CONF.import_opt('instances_path', 'nova.compute.manager')
LOG = logging.getLogger(__name__)
@@ -63,7 +71,7 @@ def create_image(disk_format, path, size):
execute('qemu-img', 'create', '-f', disk_format, path, size)
-def create_cow_image(backing_file, path):
+def create_cow_image(backing_file, path, size=None):
"""Create COW image
Creates a COW image with the given backing file
@@ -89,6 +97,8 @@ def create_cow_image(backing_file, path):
# cow_opts += ['preallocation=%s' % base_details['preallocation']]
if base_details and base_details.encryption:
cow_opts += ['encryption=%s' % base_details.encryption]
+ if size is not None:
+ cow_opts += ['size=%s' % size]
if cow_opts:
# Format as a comma separated list
csv_opts = ",".join(cow_opts)
@@ -227,6 +237,7 @@ def clear_logical_volume(path):
vol_size = logical_volume_size(path)
bs = 1024 * 1024
direct_flags = ('oflag=direct',)
+ sync_flags = ()
remaining_bytes = vol_size
# The loop caters for versions of dd that
@@ -238,11 +249,14 @@ def clear_logical_volume(path):
'if=/dev/zero', 'of=%s' % path,
'seek=%s' % seek_blocks, 'count=%s' % zero_blocks)
zero_cmd += direct_flags
+ zero_cmd += sync_flags
if zero_blocks:
utils.execute(*zero_cmd, run_as_root=True)
remaining_bytes %= bs
bs /= 1024 # Limit to 3 iterations
- direct_flags = () # Only use O_DIRECT with initial block size
+ # Use O_DIRECT with initial block size and fdatasync otherwise
+ direct_flags = ()
+ sync_flags = ('conv=fdatasync',)
def remove_logical_volumes(*paths):
@@ -273,7 +287,7 @@ def pick_disk_driver_name(is_block_dev=False):
if is_block_dev:
return "phy"
else:
- return "file"
+ return "tap"
elif CONF.libvirt_type in ('kvm', 'qemu'):
return "qemu"
else:
@@ -292,14 +306,14 @@ def get_disk_size(path):
return int(size)
-def get_disk_backing_file(path):
+def get_disk_backing_file(path, basename=True):
"""Get the backing file of a disk image
:param path: Path to the disk image
:returns: a path to the image's backing store
"""
backing_file = images.qemu_img_info(path).backing_file
- if backing_file:
+ if backing_file and basename:
backing_file = os.path.basename(backing_file)
return backing_file
@@ -403,16 +417,19 @@ def extract_snapshot(disk_path, source_fmt, snapshot_name, out_path, dest_fmt):
# NOTE(markmc): ISO is just raw to qemu-img
if dest_fmt == 'iso':
dest_fmt = 'raw'
- qemu_img_cmd = ('qemu-img',
- 'convert',
- '-f',
- source_fmt,
- '-O',
- dest_fmt,
- '-s',
- snapshot_name,
- disk_path,
- out_path)
+
+ qemu_img_cmd = ('qemu-img', 'convert', '-f', source_fmt, '-O', dest_fmt)
+
+ # Conditionally enable compression of snapshots.
+ if CONF.libvirt_snapshot_compression and dest_fmt == "qcow2":
+ qemu_img_cmd += ('-c',)
+
+ # When snapshot name is omitted we do a basic convert, which
+ # is used by live snapshots.
+ if snapshot_name is not None:
+ qemu_img_cmd += ('-s', snapshot_name)
+
+ qemu_img_cmd += (disk_path, out_path)
execute(*qemu_img_cmd)
diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py
index d90a5e295..0990f29b1 100644
--- a/nova/virt/libvirt/vif.py
+++ b/nova/virt/libvirt/vif.py
@@ -19,6 +19,8 @@
"""VIF drivers for libvirt."""
+import copy
+
from nova import exception
from nova.network import linux_net
from nova.network import model as network_model
@@ -28,7 +30,7 @@ from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import designer
-from nova.virt import netutils
+
LOG = logging.getLogger(__name__)
libvirt_vif_opts = [
@@ -47,9 +49,26 @@ CONF.register_opts(libvirt_vif_opts)
CONF.import_opt('libvirt_type', 'nova.virt.libvirt.driver')
CONF.import_opt('use_ipv6', 'nova.netconf')
+# Since libvirt 0.9.11, <interface type='bridge'>
+# supports OpenVSwitch natively.
+LIBVIRT_OVS_VPORT_VERSION = 9011
+
class LibvirtBaseVIFDriver(object):
+ def __init__(self, get_connection):
+ self.get_connection = get_connection
+ self.libvirt_version = None
+
+ def has_libvirt_version(self, want):
+ if self.libvirt_version is None:
+ conn = self.get_connection()
+ self.libvirt_version = conn.getLibVersion()
+
+ if self.libvirt_version >= want:
+ return True
+ return False
+
def get_vif_devname(self, mapping):
if 'vif_devname' in mapping:
return mapping['vif_devname']
@@ -72,19 +91,40 @@ class LibvirtBaseVIFDriver(object):
return conf
+ def plug(self, instance, vif):
+ pass
+
+ def unplug(self, instance, vif):
+ pass
+
-class LibvirtBridgeDriver(LibvirtBaseVIFDriver):
- """VIF driver for Linux bridge."""
+class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
+ """Generic VIF driver for libvirt networking."""
def get_bridge_name(self, network):
return network['bridge']
- def get_config(self, instance, network, mapping):
- """Get VIF configurations for bridge type."""
+ def get_ovs_interfaceid(self, mapping):
+ return mapping['ovs_interfaceid']
- mac_id = mapping['mac'].replace(':', '')
+ def get_br_name(self, iface_id):
+ return ("qbr" + iface_id)[:network_model.NIC_NAME_LEN]
+
+ def get_veth_pair_names(self, iface_id):
+ return (("qvb%s" % iface_id)[:network_model.NIC_NAME_LEN],
+ ("qvo%s" % iface_id)[:network_model.NIC_NAME_LEN])
- conf = super(LibvirtBridgeDriver,
+ def get_firewall_required(self):
+ # TODO(berrange): Extend this to use information from VIF model
+ # which can indicate whether the network provider (eg Quantum)
+ # has already applied firewall filtering itself.
+ if CONF.firewall_driver != "nova.virt.firewall.NoopFirewallDriver":
+ return True
+ return False
+
+ def get_config_bridge(self, instance, network, mapping):
+ """Get VIF configurations for bridge type."""
+ conf = super(LibvirtGenericVIFDriver,
self).get_config(instance,
network,
mapping)
@@ -93,6 +133,7 @@ class LibvirtBridgeDriver(LibvirtBaseVIFDriver):
conf, self.get_bridge_name(network),
self.get_vif_devname(mapping))
+ mac_id = mapping['mac'].replace(':', '')
name = "nova-instance-" + instance['name'] + "-" + mac_id
primary_addr = mapping['ips'][0]['ip']
dhcp_server = ra_server = ipv4_cidr = ipv6_cidr = None
@@ -106,14 +147,113 @@ class LibvirtBridgeDriver(LibvirtBaseVIFDriver):
if CONF.use_ipv6:
ipv6_cidr = network['cidr_v6']
- designer.set_vif_host_backend_filter_config(
- conf, name, primary_addr, dhcp_server,
- ra_server, ipv4_cidr, ipv6_cidr)
+ if self.get_firewall_required():
+ designer.set_vif_host_backend_filter_config(
+ conf, name, primary_addr, dhcp_server,
+ ra_server, ipv4_cidr, ipv6_cidr)
return conf
- def plug(self, instance, vif):
+ def get_config_ovs_ethernet(self, instance, network, mapping):
+ conf = super(LibvirtGenericVIFDriver,
+ self).get_config(instance,
+ network,
+ mapping)
+
+ dev = self.get_vif_devname(mapping)
+ designer.set_vif_host_backend_ethernet_config(conf, dev)
+
+ return conf
+
+ def get_config_ovs_bridge(self, instance, network, mapping):
+ conf = super(LibvirtGenericVIFDriver,
+ self).get_config(instance,
+ network,
+ mapping)
+
+ designer.set_vif_host_backend_ovs_config(
+ conf, self.get_bridge_name(network),
+ self.get_ovs_interfaceid(mapping),
+ self.get_vif_devname(mapping))
+
+ return conf
+
+ def get_config_ovs_hybrid(self, instance, network, mapping):
+ newnet = copy.deepcopy(network)
+ newnet['bridge'] = self.get_br_name(mapping['vif_uuid'])
+ return self.get_config_bridge(instance,
+ newnet,
+ mapping)
+
+ def get_config_ovs(self, instance, network, mapping):
+ if self.get_firewall_required():
+ return self.get_config_ovs_hybrid(instance, network,
+ mapping)
+ elif self.has_libvirt_version(LIBVIRT_OVS_VPORT_VERSION):
+ return self.get_config_ovs_bridge(instance, network,
+ mapping)
+ else:
+ return self.get_config_ovs_ethernet(instance, network,
+ mapping)
+
+ def get_config_802qbg(self, instance, network, mapping):
+ conf = super(LibvirtGenericVIFDriver,
+ self).get_config(instance,
+ network,
+ mapping)
+
+ params = mapping["qbg_params"]
+ designer.set_vif_host_backend_802qbg_config(
+ conf, network["interface"],
+ params['managerid'],
+ params['typeid'],
+ params['typeidversion'],
+ params['instanceid'])
+
+ return conf
+
+ def get_config_802qbh(self, instance, network, mapping):
+ conf = super(LibvirtGenericVIFDriver,
+ self).get_config(instance,
+ network,
+ mapping)
+
+ params = mapping["qbh_params"]
+ designer.set_vif_host_backend_802qbh_config(
+ conf, network["interface"],
+ params['profileid'])
+
+ return conf
+
+ def get_config(self, instance, network, mapping):
+ vif_type = mapping.get('vif_type')
+
+ LOG.debug(_("vif_type=%(vif_type)s instance=%(instance)s "
+ "network=%(network)s mapping=%(mapping)s")
+ % locals())
+
+ if vif_type is None:
+ raise exception.NovaException(
+ _("vif_type parameter must be present "
+ "for this vif_driver implementation"))
+
+ if vif_type == network_model.VIF_TYPE_BRIDGE:
+ return self.get_config_bridge(instance, network, mapping)
+ elif vif_type == network_model.VIF_TYPE_OVS:
+ return self.get_config_ovs(instance, network, mapping)
+ elif vif_type == network_model.VIF_TYPE_802_QBG:
+ return self.get_config_802qbg(instance, network, mapping)
+ elif vif_type == network_model.VIF_TYPE_802_QBH:
+ return self.get_config_802qbh(instance, network, mapping)
+ else:
+ raise exception.NovaException(
+ _("Unexpected vif_type=%s") % vif_type)
+
+ def plug_bridge(self, instance, vif):
"""Ensure that the bridge exists, and add VIF to it."""
+ super(LibvirtGenericVIFDriver,
+ self).plug(instance, vif)
+
network, mapping = vif
if (not network.get('multi_host') and
mapping.get('should_create_bridge')):
@@ -135,37 +275,10 @@ class LibvirtBridgeDriver(LibvirtBaseVIFDriver):
self.get_bridge_name(network),
iface)
- def unplug(self, instance, vif):
- """No manual unplugging required."""
- pass
-
-
-class LibvirtOpenVswitchDriver(LibvirtBaseVIFDriver):
- """VIF driver for Open vSwitch that uses libivrt type='ethernet'
+ def plug_ovs_ethernet(self, instance, vif):
+ super(LibvirtGenericVIFDriver,
+ self).plug(instance, vif)
- Used for libvirt versions that do not support
- OVS virtual port XML (0.9.10 or earlier).
- """
-
- def get_bridge_name(self, network):
- return network.get('bridge') or CONF.libvirt_ovs_bridge
-
- def get_ovs_interfaceid(self, mapping):
- return mapping.get('ovs_interfaceid') or mapping['vif_uuid']
-
- def get_config(self, instance, network, mapping):
- dev = self.get_vif_devname(mapping)
-
- conf = super(LibvirtOpenVswitchDriver,
- self).get_config(instance,
- network,
- mapping)
-
- designer.set_vif_host_backend_ethernet_config(conf, dev)
-
- return conf
-
- def plug(self, instance, vif):
network, mapping = vif
iface_id = self.get_ovs_interfaceid(mapping)
dev = self.get_vif_devname(mapping)
@@ -174,54 +287,21 @@ class LibvirtOpenVswitchDriver(LibvirtBaseVIFDriver):
dev, iface_id, mapping['mac'],
instance['uuid'])
- def unplug(self, instance, vif):
- """Unplug the VIF by deleting the port from the bridge."""
- try:
- network, mapping = vif
- linux_net.delete_ovs_vif_port(self.get_bridge_name(network),
- self.get_vif_devname(mapping))
- except exception.ProcessExecutionError:
- LOG.exception(_("Failed while unplugging vif"), instance=instance)
-
-
-class LibvirtHybridOVSBridgeDriver(LibvirtBridgeDriver):
- """VIF driver that uses OVS + Linux Bridge for iptables compatibility.
-
- Enables the use of OVS-based Quantum plugins while at the same
- time using iptables-based filtering, which requires that vifs be
- plugged into a linux bridge, not OVS. IPtables filtering is useful for
- in particular for Nova security groups.
- """
+ def plug_ovs_bridge(self, instance, vif):
+ """No manual plugging required."""
+ super(LibvirtGenericVIFDriver,
+ self).plug(instance, vif)
- def get_br_name(self, iface_id):
- return ("qbr" + iface_id)[:network_model.NIC_NAME_LEN]
-
- def get_veth_pair_names(self, iface_id):
- return (("qvb%s" % iface_id)[:network_model.NIC_NAME_LEN],
- ("qvo%s" % iface_id)[:network_model.NIC_NAME_LEN])
-
- def get_bridge_name(self, network):
- return network.get('bridge') or CONF.libvirt_ovs_bridge
-
- def get_ovs_interfaceid(self, mapping):
- return mapping.get('ovs_interfaceid') or mapping['vif_uuid']
-
- def get_config(self, instance, network, mapping):
- br_name = self.get_br_name(mapping['vif_uuid'])
- network['bridge'] = br_name
- return super(LibvirtHybridOVSBridgeDriver,
- self).get_config(instance,
- network,
- mapping)
-
- def plug(self, instance, vif):
+ def plug_ovs_hybrid(self, instance, vif):
"""Plug using hybrid strategy
Create a per-VIF linux bridge, then link that bridge to the OVS
integration bridge via a veth device, setting up the other end
of the veth device just like a normal OVS port. Then boot the
- VIF on the linux bridge using standard libvirt mechanisms
+ VIF on the linux bridge using standard libvirt mechanisms.
"""
+ super(LibvirtGenericVIFDriver,
+ self).plug(instance, vif)
network, mapping = vif
iface_id = self.get_ovs_interfaceid(mapping)
@@ -239,12 +319,78 @@ class LibvirtHybridOVSBridgeDriver(LibvirtBridgeDriver):
v2_name, iface_id, mapping['mac'],
instance['uuid'])
- def unplug(self, instance, vif):
+ def plug_ovs(self, instance, vif):
+ if self.get_firewall_required():
+ self.plug_ovs_hybrid(instance, vif)
+ elif self.has_libvirt_version(LIBVIRT_OVS_VPORT_VERSION):
+ self.plug_ovs_bridge(instance, vif)
+ else:
+ self.plug_ovs_ethernet(instance, vif)
+
+ def plug_802qbg(self, instance, vif):
+ super(LibvirtGenericVIFDriver,
+ self).plug(instance, vif)
+
+ def plug_802qbh(self, instance, vif):
+ super(LibvirtGenericVIFDriver,
+ self).plug(instance, vif)
+
+ def plug(self, instance, vif):
+ network, mapping = vif
+ vif_type = mapping.get('vif_type')
+
+ LOG.debug(_("vif_type=%(vif_type)s instance=%(instance)s "
+ "network=%(network)s mapping=%(mapping)s")
+ % locals())
+
+ if vif_type is None:
+ raise exception.NovaException(
+ _("vif_type parameter must be present "
+ "for this vif_driver implementation"))
+
+ if vif_type == network_model.VIF_TYPE_BRIDGE:
+ self.plug_bridge(instance, vif)
+ elif vif_type == network_model.VIF_TYPE_OVS:
+ self.plug_ovs(instance, vif)
+ elif vif_type == network_model.VIF_TYPE_802_QBG:
+ self.plug_802qbg(instance, vif)
+ elif vif_type == network_model.VIF_TYPE_802_QBH:
+ self.plug_802qbh(instance, vif)
+ else:
+ raise exception.NovaException(
+ _("Unexpected vif_type=%s") % vif_type)
+
+ def unplug_bridge(self, instance, vif):
+ """No manual unplugging required."""
+ super(LibvirtGenericVIFDriver,
+ self).unplug(instance, vif)
+
+ def unplug_ovs_ethernet(self, instance, vif):
+ """Unplug the VIF by deleting the port from the bridge."""
+ super(LibvirtGenericVIFDriver,
+ self).unplug(instance, vif)
+
+ try:
+ network, mapping = vif
+ linux_net.delete_ovs_vif_port(self.get_bridge_name(network),
+ self.get_vif_devname(mapping))
+ except exception.ProcessExecutionError:
+ LOG.exception(_("Failed while unplugging vif"), instance=instance)
+
+ def unplug_ovs_bridge(self, instance, vif):
+ """No manual unplugging required."""
+ super(LibvirtGenericVIFDriver,
+ self).unplug(instance, vif)
+
+ def unplug_ovs_hybrid(self, instance, vif):
"""UnPlug using hybrid strategy
Unhook port from OVS, unhook port from bridge, delete
bridge, and delete both veth devices.
"""
+ super(LibvirtGenericVIFDriver,
+ self).unplug(instance, vif)
+
try:
network, mapping = vif
br_name = self.get_br_name(mapping['vif_uuid'])
@@ -260,10 +406,67 @@ class LibvirtHybridOVSBridgeDriver(LibvirtBridgeDriver):
except exception.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
+ def unplug_ovs(self, instance, vif):
+ if self.get_firewall_required():
+ self.unplug_ovs_hybrid(instance, vif)
+ elif self.has_libvirt_version(LIBVIRT_OVS_VPORT_VERSION):
+ self.unplug_ovs_bridge(instance, vif)
+ else:
+ self.unplug_ovs_ethernet(instance, vif)
+
+ def unplug_802qbg(self, instance, vif):
+ super(LibvirtGenericVIFDriver,
+ self).unplug(instance, vif)
+
+ def unplug_802qbh(self, instance, vif):
+ super(LibvirtGenericVIFDriver,
+ self).unplug(instance, vif)
+
+ def unplug(self, instance, vif):
+ network, mapping = vif
+ vif_type = mapping.get('vif_type')
+
+ LOG.debug(_("vif_type=%(vif_type)s instance=%(instance)s "
+ "network=%(network)s mapping=%(mapping)s")
+ % locals())
+
+ if vif_type is None:
+ raise exception.NovaException(
+ _("vif_type parameter must be present "
+ "for this vif_driver implementation"))
+
+ if vif_type == network_model.VIF_TYPE_BRIDGE:
+ self.unplug_bridge(instance, vif)
+ elif vif_type == network_model.VIF_TYPE_OVS:
+ self.unplug_ovs(instance, vif)
+ elif vif_type == network_model.VIF_TYPE_802_QBG:
+ self.unplug_802qbg(instance, vif)
+ elif vif_type == network_model.VIF_TYPE_802_QBH:
+ self.unplug_802qbh(instance, vif)
+ else:
+ raise exception.NovaException(
+ _("Unexpected vif_type=%s") % vif_type)
+
+
+class LibvirtBridgeDriver(LibvirtGenericVIFDriver):
+ """Retained in Grizzly for compatibility with Quantum
+ drivers which do not yet report 'vif_type' port binding.
+ Will be deprecated in Havana, and removed in Ixxxx."""
+
+ def get_config(self, instance, network, mapping):
+ return self.get_config_bridge(instance, network, mapping)
+
+ def plug(self, instance, vif):
+ self.plug_bridge(instance, vif)
+
+ def unplug(self, instance, vif):
+ self.unplug_bridge(instance, vif)
+
-class LibvirtOpenVswitchVirtualPortDriver(LibvirtBaseVIFDriver):
- """VIF driver for Open vSwitch that uses integrated libvirt
- OVS virtual port XML (introduced in libvirt 0.9.11)."""
+class LibvirtOpenVswitchDriver(LibvirtGenericVIFDriver):
+ """Retained in Grizzly for compatibility with Quantum
+ drivers which do not yet report 'vif_type' port binding.
+ Will be deprecated in Havana, and removed in Ixxxx."""
def get_bridge_name(self, network):
return network.get('bridge') or CONF.libvirt_ovs_bridge
@@ -272,54 +475,71 @@ class LibvirtOpenVswitchVirtualPortDriver(LibvirtBaseVIFDriver):
return mapping.get('ovs_interfaceid') or mapping['vif_uuid']
def get_config(self, instance, network, mapping):
- """Pass data required to create OVS virtual port element."""
- conf = super(LibvirtOpenVswitchVirtualPortDriver,
- self).get_config(instance,
- network,
- mapping)
+ return self.get_config_ovs_ethernet(instance, network, mapping)
- designer.set_vif_host_backend_ovs_config(
- conf, self.get_bridge_name(network),
- self.get_ovs_interfaceid(mapping),
- self.get_vif_devname(mapping))
+ def plug(self, instance, vif):
+ self.plug_ovs_ethernet(instance, vif)
- return conf
+ def unplug(self, instance, vif):
+ self.unplug_ovs_ethernet(instance, vif)
+
+
+class LibvirtHybridOVSBridgeDriver(LibvirtGenericVIFDriver):
+ """Retained in Grizzly for compatibility with Quantum
+ drivers which do not yet report 'vif_type' port binding.
+ Will be deprecated in Havana, and removed in Ixxxx."""
+
+ def get_bridge_name(self, network):
+ return network.get('bridge') or CONF.libvirt_ovs_bridge
+
+ def get_ovs_interfaceid(self, mapping):
+ return mapping.get('ovs_interfaceid') or mapping['vif_uuid']
+
+ def get_config(self, instance, network, mapping):
+ return self.get_config_ovs_hybrid(instance, network, mapping)
def plug(self, instance, vif):
- pass
+ return self.plug_ovs_hybrid(instance, vif)
def unplug(self, instance, vif):
- """No action needed. Libvirt takes care of cleanup."""
- pass
+ return self.unplug_ovs_hybrid(instance, vif)
-class QuantumLinuxBridgeVIFDriver(LibvirtBaseVIFDriver):
- """VIF driver for Linux Bridge when running Quantum."""
+class LibvirtOpenVswitchVirtualPortDriver(LibvirtGenericVIFDriver):
+ """Retained in Grizzly for compatibility with Quantum
+ drivers which do not yet report 'vif_type' port binding.
+ Will be deprecated in Havana, and removed in Ixxxx."""
def get_bridge_name(self, network):
- def_bridge = ("brq" + network['id'])[:network_model.NIC_NAME_LEN]
- return network.get('bridge') or def_bridge
+ return network.get('bridge') or CONF.libvirt_ovs_bridge
+
+ def get_ovs_interfaceid(self, mapping):
+ return mapping.get('ovs_interfaceid') or mapping['vif_uuid']
def get_config(self, instance, network, mapping):
- linux_net.LinuxBridgeInterfaceDriver.ensure_bridge(
- self.get_bridge_name(network),
- None,
- filtering=False)
+ return self.get_config_ovs_bridge(instance, network, mapping)
- conf = super(QuantumLinuxBridgeVIFDriver,
- self).get_config(instance,
- network,
- mapping)
+ def plug(self, instance, vif):
+ return self.plug_ovs_bridge(instance, vif)
- designer.set_vif_host_backend_bridge_config(
- conf, self.get_bridge_name(network),
- self.get_vif_devname(mapping))
+ def unplug(self, instance, vif):
+ return self.unplug_ovs_bridge(instance, vif)
- return conf
+
+class QuantumLinuxBridgeVIFDriver(LibvirtGenericVIFDriver):
+ """Retained in Grizzly for compatibility with Quantum
+ drivers which do not yet report 'vif_type' port binding.
+ Will be deprecated in Havana, and removed in Ixxxx."""
+
+ def get_bridge_name(self, network):
+ def_bridge = ("brq" + network['id'])[:network_model.NIC_NAME_LEN]
+ return network.get('bridge') or def_bridge
+
+ def get_config(self, instance, network, mapping):
+ return self.get_config_bridge(instance, network, mapping)
def plug(self, instance, vif):
- pass
+ self.plug_bridge(instance, vif)
def unplug(self, instance, vif):
- """No action needed. Libvirt takes care of cleanup."""
- pass
+ self.unplug_bridge(instance, vif)
diff --git a/nova/virt/libvirt/volume.py b/nova/virt/libvirt/volume.py
index f9a948fb5..724a859b8 100644
--- a/nova/virt/libvirt/volume.py
+++ b/nova/virt/libvirt/volume.py
@@ -17,6 +17,7 @@
"""Volume drivers for libvirt."""
+import hashlib
import os
import time
@@ -24,6 +25,7 @@ from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
+from nova import paths
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import utils as virtutils
@@ -40,67 +42,90 @@ volume_opts = [
cfg.StrOpt('rbd_secret_uuid',
default=None,
help='the libvirt uuid of the secret for the rbd_user'
- 'volumes')
+ 'volumes'),
+ cfg.StrOpt('nfs_mount_point_base',
+ default=paths.state_path_def('mnt'),
+ help='Dir where the nfs volume is mounted on the compute node'),
+ cfg.StrOpt('nfs_mount_options',
+ default=None,
+ help='Mount options passed to the nfs client. See section '
+ 'of the nfs man page for details'),
]
CONF = cfg.CONF
CONF.register_opts(volume_opts)
-class LibvirtVolumeDriver(object):
+class LibvirtBaseVolumeDriver(object):
"""Base class for volume drivers."""
- def __init__(self, connection):
+ def __init__(self, connection, is_block_dev):
self.connection = connection
+ self.is_block_dev = is_block_dev
- def connect_volume(self, connection_info, mount_device):
+ def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
+
conf = vconfig.LibvirtConfigGuestDisk()
- conf.source_type = "block"
- conf.driver_name = virtutils.pick_disk_driver_name(is_block_dev=True)
+ conf.driver_name = virtutils.pick_disk_driver_name(self.is_block_dev)
+ conf.device_type = disk_info['type']
conf.driver_format = "raw"
conf.driver_cache = "none"
- conf.source_path = connection_info['data']['device_path']
- conf.target_dev = mount_device
- conf.target_bus = "virtio"
+ conf.target_dev = disk_info['dev']
+ conf.target_bus = disk_info['bus']
conf.serial = connection_info.get('serial')
return conf
- def disconnect_volume(self, connection_info, mount_device):
+ def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
pass
-class LibvirtFakeVolumeDriver(LibvirtVolumeDriver):
- """Driver to attach Network volumes to libvirt."""
+class LibvirtVolumeDriver(LibvirtBaseVolumeDriver):
+ """Class for volumes backed by local file."""
+ def __init__(self, connection):
+ super(LibvirtVolumeDriver,
+ self).__init__(connection, is_block_dev=True)
+
+ def connect_volume(self, connection_info, disk_info):
+ """Connect the volume to a local device."""
+ conf = super(LibvirtVolumeDriver,
+ self).connect_volume(connection_info,
+ disk_info)
+ conf.source_type = "block"
+ conf.source_path = connection_info['data']['device_path']
+ return conf
- def connect_volume(self, connection_info, mount_device):
- conf = vconfig.LibvirtConfigGuestDisk()
+
+class LibvirtFakeVolumeDriver(LibvirtBaseVolumeDriver):
+ """Driver to attach fake volumes to libvirt."""
+ def __init__(self, connection):
+ super(LibvirtFakeVolumeDriver,
+ self).__init__(connection, is_block_dev=True)
+
+ def connect_volume(self, connection_info, disk_info):
+ """Connect the volume to a fake device."""
+ conf = super(LibvirtFakeVolumeDriver,
+ self).connect_volume(connection_info,
+ disk_info)
conf.source_type = "network"
- conf.driver_name = "qemu"
- conf.driver_format = "raw"
- conf.driver_cache = "none"
conf.source_protocol = "fake"
conf.source_host = "fake"
- conf.target_dev = mount_device
- conf.target_bus = "virtio"
- conf.serial = connection_info.get('serial')
return conf
-class LibvirtNetVolumeDriver(LibvirtVolumeDriver):
+class LibvirtNetVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
+ def __init__(self, connection):
+ super(LibvirtNetVolumeDriver,
+ self).__init__(connection, is_block_dev=False)
- def connect_volume(self, connection_info, mount_device):
- conf = vconfig.LibvirtConfigGuestDisk()
+ def connect_volume(self, connection_info, disk_info):
+ conf = super(LibvirtNetVolumeDriver,
+ self).connect_volume(connection_info,
+ disk_info)
conf.source_type = "network"
- conf.driver_name = virtutils.pick_disk_driver_name(is_block_dev=False)
- conf.driver_format = "raw"
- conf.driver_cache = "none"
conf.source_protocol = connection_info['driver_volume_type']
conf.source_host = connection_info['data']['name']
- conf.target_dev = mount_device
- conf.target_bus = "virtio"
- conf.serial = connection_info.get('serial')
netdisk_properties = connection_info['data']
auth_enabled = netdisk_properties.get('auth_enabled')
if (conf.source_protocol == 'rbd' and
@@ -118,8 +143,11 @@ class LibvirtNetVolumeDriver(LibvirtVolumeDriver):
return conf
-class LibvirtISCSIVolumeDriver(LibvirtVolumeDriver):
+class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
+ def __init__(self, connection):
+ super(LibvirtISCSIVolumeDriver,
+ self).__init__(connection, is_block_dev=False)
def _run_iscsiadm(self, iscsi_properties, iscsi_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
@@ -139,8 +167,12 @@ class LibvirtISCSIVolumeDriver(LibvirtVolumeDriver):
return self._run_iscsiadm(iscsi_properties, iscsi_command, **kwargs)
@lockutils.synchronized('connect_volume', 'nova-')
- def connect_volume(self, connection_info, mount_device):
+ def connect_volume(self, connection_info, disk_info):
"""Attach the volume to instance_name."""
+ conf = super(LibvirtISCSIVolumeDriver,
+ self).connect_volume(connection_info,
+ disk_info)
+
iscsi_properties = connection_info['data']
# NOTE(vish): If we are on the same host as nova volume, the
# discovery makes the target so we don't need to
@@ -183,12 +215,13 @@ class LibvirtISCSIVolumeDriver(LibvirtVolumeDriver):
# The /dev/disk/by-path/... node is not always present immediately
# TODO(justinsb): This retry-with-delay is a pattern, move to utils?
tries = 0
+ disk_dev = disk_info['dev']
while not os.path.exists(host_device):
if tries >= CONF.num_iscsi_scan_tries:
raise exception.NovaException(_("iSCSI device not found at %s")
% (host_device))
- LOG.warn(_("ISCSI volume not yet found at: %(mount_device)s. "
+ LOG.warn(_("ISCSI volume not yet found at: %(disk_dev)s. "
"Will rescan & retry. Try number: %(tries)s") %
locals())
@@ -200,19 +233,19 @@ class LibvirtISCSIVolumeDriver(LibvirtVolumeDriver):
time.sleep(tries ** 2)
if tries != 0:
- LOG.debug(_("Found iSCSI node %(mount_device)s "
+ LOG.debug(_("Found iSCSI node %(disk_dev)s "
"(after %(tries)s rescans)") %
locals())
- connection_info['data']['device_path'] = host_device
- sup = super(LibvirtISCSIVolumeDriver, self)
- return sup.connect_volume(connection_info, mount_device)
+ conf.source_type = "block"
+ conf.source_path = host_device
+ return conf
@lockutils.synchronized('connect_volume', 'nova-')
- def disconnect_volume(self, connection_info, mount_device):
+ def disconnect_volume(self, connection_info, disk_dev):
"""Detach the volume from instance_name."""
- sup = super(LibvirtISCSIVolumeDriver, self)
- sup.disconnect_volume(connection_info, mount_device)
+ super(LibvirtISCSIVolumeDriver,
+ self).disconnect_volume(connection_info, disk_dev)
iscsi_properties = connection_info['data']
# NOTE(vish): Only disconnect from the target if no luns from the
# target are in use.
@@ -228,3 +261,64 @@ class LibvirtISCSIVolumeDriver(LibvirtVolumeDriver):
check_exit_code=[0, 21, 255])
self._run_iscsiadm(iscsi_properties, ('--op', 'delete'),
check_exit_code=[0, 21, 255])
+
+
+class LibvirtNFSVolumeDriver(LibvirtBaseVolumeDriver):
+ """Class implements libvirt part of volume driver for NFS."""
+
+ def __init__(self, connection):
+ """Create back-end to nfs."""
+ super(LibvirtNFSVolumeDriver,
+ self).__init__(connection, is_block_dev=False)
+
+ def connect_volume(self, connection_info, disk_info):
+ """Connect the volume. Returns xml for libvirt."""
+ conf = super(LibvirtNFSVolumeDriver,
+ self).connect_volume(connection_info,
+ disk_info)
+ path = self._ensure_mounted(connection_info['data']['export'])
+ path = os.path.join(path, connection_info['data']['name'])
+ conf.source_type = 'file'
+ conf.source_path = path
+ return conf
+
+ def _ensure_mounted(self, nfs_export):
+ """
+ @type nfs_export: string
+ """
+ mount_path = os.path.join(CONF.nfs_mount_point_base,
+ self.get_hash_str(nfs_export))
+ self._mount_nfs(mount_path, nfs_export, ensure=True)
+ return mount_path
+
+ def _mount_nfs(self, mount_path, nfs_share, ensure=False):
+ """Mount nfs export to mount path."""
+ if not self._path_exists(mount_path):
+ utils.execute('mkdir', '-p', mount_path)
+
+ # Construct the NFS mount command.
+ nfs_cmd = ['mount', '-t', 'nfs']
+ if CONF.nfs_mount_options is not None:
+ nfs_cmd.extend(['-o', CONF.nfs_mount_options])
+ nfs_cmd.extend([nfs_share, mount_path])
+
+ try:
+ utils.execute(*nfs_cmd, run_as_root=True)
+ except exception.ProcessExecutionError as exc:
+ if ensure and 'already mounted' in exc.message:
+ LOG.warn(_("%s is already mounted"), nfs_share)
+ else:
+ raise
+
+ @staticmethod
+ def get_hash_str(base_str):
+ """returns string that represents hash of base_str (in hex format)."""
+ return hashlib.md5(base_str).hexdigest()
+
+ @staticmethod
+ def _path_exists(path):
+ """Check path."""
+ try:
+ return utils.execute('stat', path, run_as_root=True)
+ except exception.ProcessExecutionError:
+ return False
diff --git a/nova/virt/libvirt/volume_nfs.py b/nova/virt/libvirt/volume_nfs.py
index b5083937d..668969ed2 100644..100755
--- a/nova/virt/libvirt/volume_nfs.py
+++ b/nova/virt/libvirt/volume_nfs.py
@@ -15,82 +15,26 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Volume driver for using NFS as volumes storage. Nova compute part."""
+"""Deprecated file, kept for back-compat only. To be removed in Hxxxx."""
-import ctypes
-import os
-
-from nova import exception
-from nova.openstack.common import cfg
from nova.openstack.common import log as logging
-from nova import paths
-from nova import utils
from nova.virt.libvirt import volume
LOG = logging.getLogger(__name__)
-volume_opts = [
- cfg.StrOpt('nfs_mount_point_base',
- default=paths.state_path_def('mnt'),
- help='Base dir where nfs expected to be mounted on compute'),
-]
-CONF = cfg.CONF
-CONF.register_opts(volume_opts)
-
-class NfsVolumeDriver(volume.LibvirtVolumeDriver):
- """Class implements libvirt part of volume driver for NFS."""
+class NfsVolumeDriver(volume.LibvirtNFSVolumeDriver):
+ """Deprecated driver for NFS, renamed to LibvirtNFSVolumeDriver
+ and moved into the main volume.py module. Kept for backwards
+ compatibility in the Grizzly cycle to give users opportunity
+ to configure before its removal in the Hxxxx cycle."""
def __init__(self, *args, **kwargs):
- """Create back-end to nfs and check connection."""
- super(NfsVolumeDriver, self).__init__(*args, **kwargs)
-
- def connect_volume(self, connection_info, mount_device):
- """Connect the volume. Returns xml for libvirt."""
- path = self._ensure_mounted(connection_info['data']['export'])
- path = os.path.join(path, connection_info['data']['name'])
- connection_info['data']['device_path'] = path
- conf = super(NfsVolumeDriver, self).connect_volume(connection_info,
- mount_device)
- conf.source_type = 'file'
- return conf
-
- def disconnect_volume(self, connection_info, mount_device):
- """Disconnect the volume."""
- pass
-
- def _ensure_mounted(self, nfs_export):
- """
- @type nfs_export: string
- """
- mount_path = os.path.join(CONF.nfs_mount_point_base,
- self.get_hash_str(nfs_export))
- self._mount_nfs(mount_path, nfs_export, ensure=True)
- return mount_path
-
- def _mount_nfs(self, mount_path, nfs_share, ensure=False):
- """Mount nfs export to mount path."""
- if not self._path_exists(mount_path):
- utils.execute('mkdir', '-p', mount_path)
-
- try:
- utils.execute('mount', '-t', 'nfs', nfs_share, mount_path,
- run_as_root=True)
- except exception.ProcessExecutionError as exc:
- if ensure and 'already mounted' in exc.message:
- LOG.warn(_("%s is already mounted"), nfs_share)
- else:
- raise
-
- @staticmethod
- def get_hash_str(base_str):
- """returns string that represents hash of base_str (in hex format)."""
- return str(ctypes.c_uint64(hash(base_str)).value)
-
- @staticmethod
- def _path_exists(path):
- """Check path."""
- try:
- return utils.execute('stat', path, run_as_root=True)
- except exception.ProcessExecutionError:
- return False
+ super(NfsVolumeDriver,
+ self).__init__(*args, **kwargs)
+ LOG.deprecated(
+ _("The nova.virt.libvirt.volume_nfs.NfsVolumeDriver "
+ "class is deprecated and will be removed in the "
+ "Hxxxx release. Please update nova.conf so that "
+ "the 'libvirt_volume_drivers' parameter refers to "
+ "nova.virt.libvirt.volume.LibvirtNFSVolumeDriver."))
diff --git a/nova/virt/powervm/blockdev.py b/nova/virt/powervm/blockdev.py
index fb3a0210c..76caca1b9 100644
--- a/nova/virt/powervm/blockdev.py
+++ b/nova/virt/powervm/blockdev.py
@@ -18,16 +18,11 @@ import hashlib
import os
import re
-from eventlet import greenthread
-
-from nova import utils
-
from nova.image import glance
-
from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
-
+from nova import utils
from nova.virt import images
from nova.virt.powervm import command
from nova.virt.powervm import common
diff --git a/nova/virt/powervm/driver.py b/nova/virt/powervm/driver.py
index ccba3cf73..dd0f473a6 100644..100755
--- a/nova/virt/powervm/driver.py
+++ b/nova/virt/powervm/driver.py
@@ -14,19 +14,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-import os
import time
-from nova.compute import task_states
-from nova.compute import vm_states
-
-from nova import context as nova_context
-
from nova.image import glance
-
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
-
from nova.virt import driver
from nova.virt.powervm import operator
@@ -45,7 +37,8 @@ powervm_opts = [
help='PowerVM manager user name'),
cfg.StrOpt('powervm_mgr_passwd',
default=None,
- help='PowerVM manager user password'),
+ help='PowerVM manager user password',
+ secret=True),
cfg.StrOpt('powervm_img_remote_path',
default=None,
help='PowerVM image remote path'),
@@ -95,17 +88,20 @@ class PowerVMDriver(driver.ComputeDriver):
def plug_vifs(self, instance, network_info):
pass
+ def macs_for_instance(self, instance):
+ return self._powervm.macs_for_instance(instance)
+
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""Create a new instance/VM/domain on powerVM."""
- self._powervm.spawn(context, instance, image_meta['id'])
+ self._powervm.spawn(context, instance, image_meta['id'], network_info)
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
"""Destroy (shutdown and delete) the specified instance."""
self._powervm.destroy(instance['name'], destroy_disks)
- def reboot(self, instance, network_info, reboot_type,
+ def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None):
"""Reboot the specified instance.
diff --git a/nova/virt/powervm/operator.py b/nova/virt/powervm/operator.py
index 87da30a14..c5c2b5f04 100644
--- a/nova/virt/powervm/operator.py
+++ b/nova/virt/powervm/operator.py
@@ -15,6 +15,7 @@
# under the License.
import decimal
+import random
import re
import time
@@ -170,7 +171,7 @@ class PowerVMOperator(object):
self._host_stats = data
- def spawn(self, context, instance, image_id):
+ def spawn(self, context, instance, image_id, network_info):
def _create_lpar_instance(instance):
host_stats = self.get_host_stats(refresh=True)
inst_name = instance['name']
@@ -201,9 +202,21 @@ class PowerVMOperator(object):
try:
# Network
+ # To ensure the MAC address on the guest matches the
+ # generated value, pull the first 10 characters off the
+ # MAC address for the mac_base_value parameter and then
+ # get the integer value of the final 2 characters as the
+ # slot_id parameter
+ mac = network_info[0]['address']
+ mac_base_value = (mac[:-2]).replace(':', '')
eth_id = self._operator.get_virtual_eth_adapter_id()
+ slot_id = int(mac[-2:], 16)
+ virtual_eth_adapters = ('%(slot_id)s/0/%(eth_id)s//0/0' %
+ locals())
# LPAR configuration data
+ # max_virtual_slots is hardcoded to 64 since we generate a MAC
+ # address that must be placed in slots 32 - 64
lpar_inst = LPAR.LPAR(
name=inst_name, lpar_env='aixlinux',
min_mem=mem_min, desired_mem=mem,
@@ -213,10 +226,14 @@ class PowerVMOperator(object):
min_proc_units=cpus_units_min,
desired_proc_units=cpus_units,
max_proc_units=cpus_max,
- virtual_eth_adapters='4/0/%s//0/0' % eth_id)
+ virtual_eth_mac_base_value=mac_base_value,
+ max_virtual_slots=64,
+ virtual_eth_adapters=virtual_eth_adapters)
LOG.debug(_("Creating LPAR instance '%s'") % instance['name'])
self._operator.create_lpar(lpar_inst)
+ #TODO(mjfork) capture the error and handle the error when the MAC
+ # prefix already exists on the system (1 in 2^28)
except nova_exception.ProcessExecutionError:
LOG.exception(_("LPAR instance '%s' creation failed") %
instance['name'])
@@ -345,6 +362,9 @@ class PowerVMOperator(object):
def power_on(self, instance_name):
self._operator.start_lpar(instance_name)
+ def macs_for_instance(self, instance):
+ return self._operator.macs_for_instance(instance)
+
class BaseOperator(object):
"""Base operator for IVM and HMC managed systems."""
@@ -573,6 +593,9 @@ class BaseOperator(object):
self._connection, command, check_exit_code=check_exit_code)
return stdout.read().splitlines()
+ def macs_for_instance(self, instance):
+ pass
+
class IVMOperator(BaseOperator):
"""Integrated Virtualization Manager (IVM) Operator.
@@ -583,3 +606,32 @@ class IVMOperator(BaseOperator):
def __init__(self, ivm_connection):
self.command = command.IVMCommand()
BaseOperator.__init__(self, ivm_connection)
+
+ def macs_for_instance(self, instance):
+ """Generates set of valid MAC addresses for an IVM instance."""
+ # NOTE(vish): We would prefer to use 0xfe here to ensure that linux
+ # bridge mac addresses don't change, but it appears to
+ # conflict with libvirt, so we use the next highest octet
+ # that has the unicast and locally administered bits set
+ # properly: 0xfa.
+ # Discussion: https://bugs.launchpad.net/nova/+bug/921838
+ # NOTE(mjfork): For IVM-based PowerVM, we cannot directly set a MAC
+ # address on an LPAR, but rather need to construct one
+ # that can be used. Retain the 0xfe as noted above,
+ # but ensure the final 3 hex values represent a value
+ # between 32 and 64 so we can assign as the slot id on
+ # the system.
+ # FA:xx:xx:xx:x0:[32-64]
+
+ macs = set()
+ mac_base = [0xfa,
+ random.randint(0x00, 0xff),
+ random.randint(0x00, 0xff),
+ random.randint(0x00, 0xff),
+ random.randint(0x00, 0xff) & 0xf0,
+ random.randint(0x00, 0x00)]
+ for n in range(32, 64):
+ mac_base[5] = n
+ macs.add(':'.join(map(lambda x: "%02x" % x, mac_base)))
+
+ return macs
diff --git a/nova/virt/vmwareapi/__init__.py b/nova/virt/vmwareapi/__init__.py
index 37d816f8c..1b9732b44 100644
--- a/nova/virt/vmwareapi/__init__.py
+++ b/nova/virt/vmwareapi/__init__.py
@@ -21,3 +21,4 @@
from nova.virt.vmwareapi import driver
VMwareESXDriver = driver.VMwareESXDriver
+VMwareVCDriver = driver.VMwareVCDriver
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index 67822f2c9..19f984c7d 100644..100755
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -21,9 +21,10 @@ A connection to the VMware ESX platform.
**Related Flags**
-:vmwareapi_host_ip: IP address of VMware ESX server.
-:vmwareapi_host_username: Username for connection to VMware ESX Server.
-:vmwareapi_host_password: Password for connection to VMware ESX Server.
+:vmwareapi_host_ip: IP address or Name of VMware ESX/VC server.
+:vmwareapi_host_username: Username for connection to VMware ESX/VC Server.
+:vmwareapi_host_password: Password for connection to VMware ESX/VC Server.
+:vmwareapi_cluster_name: Name of a VMware Cluster ComputeResource.
:vmwareapi_task_poll_interval: The interval (seconds) used for polling of
remote tasks
(default: 5.0).
@@ -50,6 +51,7 @@ from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import host
from nova.virt.vmwareapi import vim
from nova.virt.vmwareapi import vim_util
+from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import vmops
from nova.virt.vmwareapi import volumeops
@@ -59,29 +61,37 @@ LOG = logging.getLogger(__name__)
vmwareapi_opts = [
cfg.StrOpt('vmwareapi_host_ip',
default=None,
- help='URL for connection to VMware ESX host. Required if '
- 'compute_driver is vmwareapi.VMwareESXDriver.'),
+ help='URL for connection to VMware ESX/VC host. Required if '
+ 'compute_driver is vmwareapi.VMwareESXDriver or '
+ 'vmwareapi.VMwareVCDriver.'),
cfg.StrOpt('vmwareapi_host_username',
default=None,
- help='Username for connection to VMware ESX host. '
+ help='Username for connection to VMware ESX/VC host. '
'Used only if compute_driver is '
- 'vmwareapi.VMwareESXDriver.'),
+ 'vmwareapi.VMwareESXDriver or vmwareapi.VMwareVCDriver.'),
cfg.StrOpt('vmwareapi_host_password',
default=None,
- help='Password for connection to VMware ESX host. '
+ help='Password for connection to VMware ESX/VC host. '
'Used only if compute_driver is '
- 'vmwareapi.VMwareESXDriver.'),
+ 'vmwareapi.VMwareESXDriver or vmwareapi.VMwareVCDriver.',
+ secret=True),
+ cfg.StrOpt('vmwareapi_cluster_name',
+ default=None,
+ help='Name of a VMware Cluster ComputeResource. '
+ 'Used only if compute_driver is '
+ 'vmwareapi.VMwareVCDriver.'),
cfg.FloatOpt('vmwareapi_task_poll_interval',
default=5.0,
help='The interval used for polling of remote tasks. '
'Used only if compute_driver is '
- 'vmwareapi.VMwareESXDriver.'),
+ 'vmwareapi.VMwareESXDriver or '
+ 'vmwareapi.VMwareVCDriver.'),
cfg.IntOpt('vmwareapi_api_retry_count',
default=10,
help='The number of times we retry on failures, e.g., '
'socket error, etc. '
'Used only if compute_driver is '
- 'vmwareapi.VMwareESXDriver.'),
+ 'vmwareapi.VMwareESXDriver or vmwareapi.VMwareVCDriver.'),
cfg.IntOpt('vnc_port',
default=5900,
help='VNC starting port'),
@@ -90,7 +100,8 @@ vmwareapi_opts = [
help='Total number of VNC ports'),
cfg.StrOpt('vnc_password',
default=None,
- help='VNC password'),
+ help='VNC password',
+ secret=True),
cfg.BoolOpt('use_linked_clone',
default=True,
help='Whether to use linked clone'),
@@ -126,14 +137,17 @@ class VMwareESXDriver(driver.ComputeDriver):
raise Exception(_("Must specify vmwareapi_host_ip,"
"vmwareapi_host_username "
"and vmwareapi_host_password to use"
- "compute_driver=vmwareapi.VMwareESXDriver"))
+ "compute_driver=vmwareapi.VMwareESXDriver or "
+ "vmwareapi.VMwareVCDriver"))
self._session = VMwareAPISession(self._host_ip,
host_username, host_password,
api_retry_count, scheme=scheme)
- self._volumeops = volumeops.VMwareVolumeOps(self._session)
+ self._cluster_name = CONF.vmwareapi_cluster_name
+ self._volumeops = volumeops.VMwareVolumeOps(self._session,
+ self._cluster_name)
self._vmops = vmops.VMwareVMOps(self._session, self.virtapi,
- self._volumeops)
+ self._volumeops, self._cluster_name)
self._host = host.Host(self._session)
self._host_state = None
@@ -166,7 +180,7 @@ class VMwareESXDriver(driver.ComputeDriver):
"""Create snapshot from a running VM instance."""
self._vmops.snapshot(context, instance, name, update_task_state)
- def reboot(self, instance, network_info, reboot_type,
+ def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None):
"""Reboot VM instance."""
self._vmops.reboot(instance, network_info)
@@ -209,40 +223,6 @@ class VMwareESXDriver(driver.ComputeDriver):
"""Power on the specified instance."""
self._vmops.power_on(instance)
- def migrate_disk_and_power_off(self, context, instance, dest,
- instance_type, network_info,
- block_device_info=None):
- """
- Transfers the disk of a running instance in multiple phases, turning
- off the instance before the end.
- """
- return self._vmops.migrate_disk_and_power_off(context, instance,
- dest, instance_type)
-
- def confirm_migration(self, migration, instance, network_info):
- """Confirms a resize, destroying the source VM."""
- self._vmops.confirm_migration(migration, instance, network_info)
-
- def finish_revert_migration(self, instance, network_info,
- block_device_info=None):
- """Finish reverting a resize, powering back on the instance."""
- self._vmops.finish_revert_migration(instance)
-
- def finish_migration(self, context, migration, instance, disk_info,
- network_info, image_meta, resize_instance=False,
- block_device_info=None):
- """Completes a resize, turning on the migrated instance."""
- self._vmops.finish_migration(context, migration, instance, disk_info,
- network_info, image_meta, resize_instance)
-
- def live_migration(self, context, instance_ref, dest,
- post_method, recover_method, block_migration=False,
- migrate_data=None):
- """Live migration of an instance to another host."""
- self._vmops.live_migration(context, instance_ref, dest,
- post_method, recover_method,
- block_migration)
-
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances."""
self._vmops.poll_rebooting_instances(timeout, instances)
@@ -361,6 +341,64 @@ class VMwareESXDriver(driver.ComputeDriver):
return self._vmops.list_interfaces(instance_name)
+class VMwareVCDriver(VMwareESXDriver):
+ """The ESX host connection object."""
+
+ def __init__(self, virtapi, read_only=False, scheme="https"):
+ super(VMwareVCDriver, self).__init__(virtapi)
+ if not self._cluster_name:
+ self._cluster = None
+ else:
+ self._cluster = vm_util.get_cluster_ref_from_name(
+ self._session, self._cluster_name)
+ if self._cluster is None:
+ raise exception.NotFound(_("VMware Cluster %s is not found")
+ % self._cluster_name)
+ self._vc_state = None
+
+ @property
+ def host_state(self):
+ if not self._vc_state:
+ self._vc_state = host.VCState(self._session,
+ self._host_ip,
+ self._cluster)
+ return self._vc_state
+
+ def migrate_disk_and_power_off(self, context, instance, dest,
+ instance_type, network_info,
+ block_device_info=None):
+ """
+ Transfers the disk of a running instance in multiple phases, turning
+ off the instance before the end.
+ """
+ return self._vmops.migrate_disk_and_power_off(context, instance,
+ dest, instance_type)
+
+ def confirm_migration(self, migration, instance, network_info):
+ """Confirms a resize, destroying the source VM."""
+ self._vmops.confirm_migration(migration, instance, network_info)
+
+ def finish_revert_migration(self, instance, network_info,
+ block_device_info=None):
+ """Finish reverting a resize, powering back on the instance."""
+ self._vmops.finish_revert_migration(instance)
+
+ def finish_migration(self, context, migration, instance, disk_info,
+ network_info, image_meta, resize_instance=False,
+ block_device_info=None):
+ """Completes a resize, turning on the migrated instance."""
+ self._vmops.finish_migration(context, migration, instance, disk_info,
+ network_info, image_meta, resize_instance)
+
+ def live_migration(self, context, instance_ref, dest,
+ post_method, recover_method, block_migration=False,
+ migrate_data=None):
+ """Live migration of an instance to another host."""
+ self._vmops.live_migration(context, instance_ref, dest,
+ post_method, recover_method,
+ block_migration)
+
+
class VMwareAPISession(object):
"""
Sets up a session with the ESX host and handles all
diff --git a/nova/virt/vmwareapi/host.py b/nova/virt/vmwareapi/host.py
index 424dac707..9d11901d6 100644
--- a/nova/virt/vmwareapi/host.py
+++ b/nova/virt/vmwareapi/host.py
@@ -18,8 +18,6 @@
Management class for host-related functions (start, reboot, etc).
"""
-import json
-
from nova import exception
from nova.openstack.common import log as logging
from nova.virt.vmwareapi import vim_util
@@ -140,3 +138,68 @@ class HostState(object):
self._stats = data
return data
+
+
+class VCState(object):
+ """Manages information about the VC host this compute
+ node is running on.
+ """
+ def __init__(self, session, host_name, cluster):
+ super(VCState, self).__init__()
+ self._session = session
+ self._host_name = host_name
+ self._cluster = cluster
+ self._stats = {}
+ self.update_status()
+
+ def get_host_stats(self, refresh=False):
+ """Return the current state of the host. If 'refresh' is
+ True, run the update first.
+ """
+ if refresh:
+ self.update_status()
+ return self._stats
+
+ def update_status(self):
+ """Update the current state of the host.
+ """
+ host_mor = vm_util.get_host_ref(self._session, self._cluster)
+ if host_mor is None:
+ return
+
+ summary = self._session._call_method(vim_util,
+ "get_dynamic_property",
+ host_mor,
+ "HostSystem",
+ "summary")
+
+ if summary is None:
+ return
+
+ try:
+ ds = vm_util.get_datastore_ref_and_name(self._session,
+ self._cluster)
+ except exception.DatastoreNotFound:
+ ds = (None, None, 0, 0)
+
+ data = {}
+ data["vcpus"] = summary.hardware.numCpuThreads
+ data["cpu_info"] =\
+ {"vendor": summary.hardware.vendor,
+ "model": summary.hardware.cpuModel,
+ "topology": {"cores": summary.hardware.numCpuCores,
+ "sockets": summary.hardware.numCpuPkgs,
+ "threads": summary.hardware.numCpuThreads}
+ }
+ data["disk_total"] = ds[2] / (1024 * 1024)
+ data["disk_available"] = ds[3] / (1024 * 1024)
+ data["disk_used"] = data["disk_total"] - data["disk_available"]
+ data["host_memory_total"] = summary.hardware.memorySize / (1024 * 1024)
+ data["host_memory_free"] = data["host_memory_total"] -\
+ summary.quickStats.overallMemoryUsage
+ data["hypervisor_type"] = summary.config.product.name
+ data["hypervisor_version"] = summary.config.product.version
+ data["hypervisor_hostname"] = self._host_name
+
+ self._stats = data
+ return data
diff --git a/nova/virt/vmwareapi/network_util.py b/nova/virt/vmwareapi/network_util.py
index f63d7f723..5a83b0763 100644
--- a/nova/virt/vmwareapi/network_util.py
+++ b/nova/virt/vmwareapi/network_util.py
@@ -29,14 +29,22 @@ from nova.virt.vmwareapi import vm_util
LOG = logging.getLogger(__name__)
-def get_network_with_the_name(session, network_name="vmnet0"):
+def get_network_with_the_name(session, network_name="vmnet0", cluster=None):
"""
Gets reference to the network whose name is passed as the
argument.
"""
- hostsystems = session._call_method(vim_util, "get_objects",
- "HostSystem", ["network"])
- vm_networks_ret = hostsystems[0].propSet[0].val
+ host = vm_util.get_host_ref(session, cluster)
+ if cluster is not None:
+ vm_networks_ret = session._call_method(vim_util,
+ "get_dynamic_property", cluster,
+ "ClusterComputeResource",
+ "network")
+ else:
+ vm_networks_ret = session._call_method(vim_util,
+ "get_dynamic_property", host,
+ "HostSystem", "network")
+
# Meaning there are no networks on the host. suds responds with a ""
# in the parent property field rather than a [] in the
# ManagedObjectReference property field of the parent
@@ -77,14 +85,13 @@ def get_network_with_the_name(session, network_name="vmnet0"):
return None
-def get_vswitch_for_vlan_interface(session, vlan_interface):
+def get_vswitch_for_vlan_interface(session, vlan_interface, cluster=None):
"""
Gets the vswitch associated with the physical network adapter
with the name supplied.
"""
# Get the list of vSwicthes on the Host System
- host_mor = session._call_method(vim_util, "get_objects",
- "HostSystem")[0].obj
+ host_mor = vm_util.get_host_ref(session, cluster)
vswitches_ret = session._call_method(vim_util,
"get_dynamic_property", host_mor,
"HostSystem", "config.network.vswitch")
@@ -105,10 +112,9 @@ def get_vswitch_for_vlan_interface(session, vlan_interface):
pass
-def check_if_vlan_interface_exists(session, vlan_interface):
+def check_if_vlan_interface_exists(session, vlan_interface, cluster=None):
"""Checks if the vlan_inteface exists on the esx host."""
- host_mor = session._call_method(vim_util, "get_objects",
- "HostSystem")[0].obj
+ host_mor = vm_util.get_host_ref(session, cluster)
physical_nics_ret = session._call_method(vim_util,
"get_dynamic_property", host_mor,
"HostSystem", "config.network.pnic")
@@ -122,10 +128,9 @@ def check_if_vlan_interface_exists(session, vlan_interface):
return False
-def get_vlanid_and_vswitch_for_portgroup(session, pg_name):
+def get_vlanid_and_vswitch_for_portgroup(session, pg_name, cluster=None):
"""Get the vlan id and vswicth associated with the port group."""
- host_mor = session._call_method(vim_util, "get_objects",
- "HostSystem")[0].obj
+ host_mor = vm_util.get_host_ref(session, cluster)
port_grps_on_host_ret = session._call_method(vim_util,
"get_dynamic_property", host_mor,
"HostSystem", "config.network.portgroup")
@@ -141,7 +146,7 @@ def get_vlanid_and_vswitch_for_portgroup(session, pg_name):
return p_gp.spec.vlanId, p_grp_vswitch_name
-def create_port_group(session, pg_name, vswitch_name, vlan_id=0):
+def create_port_group(session, pg_name, vswitch_name, vlan_id=0, cluster=None):
"""
Creates a port group on the host system with the vlan tags
supplied. VLAN id 0 means no vlan id association.
@@ -152,8 +157,7 @@ def create_port_group(session, pg_name, vswitch_name, vlan_id=0):
vswitch_name,
pg_name,
vlan_id)
- host_mor = session._call_method(vim_util, "get_objects",
- "HostSystem")[0].obj
+ host_mor = vm_util.get_host_ref(session, cluster)
network_system_mor = session._call_method(vim_util,
"get_dynamic_property", host_mor,
"HostSystem", "configManager.networkSystem")
diff --git a/nova/virt/vmwareapi/vif.py b/nova/virt/vmwareapi/vif.py
index 5684e6aa6..137045508 100644
--- a/nova/virt/vmwareapi/vif.py
+++ b/nova/virt/vmwareapi/vif.py
@@ -36,7 +36,7 @@ vmwareapi_vif_opts = [
CONF.register_opts(vmwareapi_vif_opts)
-def ensure_vlan_bridge(self, session, network):
+def ensure_vlan_bridge(self, session, network, cluster=None):
"""Create a vlan and bridge unless they already exist."""
vlan_num = network['vlan']
bridge = network['bridge']
@@ -45,28 +45,31 @@ def ensure_vlan_bridge(self, session, network):
# Check if the vlan_interface physical network adapter exists on the
# host.
if not network_util.check_if_vlan_interface_exists(session,
- vlan_interface):
+ vlan_interface,
+ cluster):
raise exception.NetworkAdapterNotFound(adapter=vlan_interface)
# Get the vSwitch associated with the Physical Adapter
vswitch_associated = network_util.get_vswitch_for_vlan_interface(
- session, vlan_interface)
+ session, vlan_interface, cluster)
if vswitch_associated is None:
raise exception.SwitchNotFoundForNetworkAdapter(
adapter=vlan_interface)
# Check whether bridge already exists and retrieve the the ref of the
# network whose name_label is "bridge"
- network_ref = network_util.get_network_with_the_name(session, bridge)
+ network_ref = network_util.get_network_with_the_name(session, bridge,
+ cluster)
if network_ref is None:
# Create a port group on the vSwitch associated with the
# vlan_interface corresponding physical network adapter on the ESX
# host.
network_util.create_port_group(session, bridge,
- vswitch_associated, vlan_num)
+ vswitch_associated, vlan_num,
+ cluster)
else:
# Get the vlan id and vswitch corresponding to the port group
_get_pg_info = network_util.get_vlanid_and_vswitch_for_portgroup
- pg_vlanid, pg_vswitch = _get_pg_info(session, bridge)
+ pg_vlanid, pg_vswitch = _get_pg_info(session, bridge, cluster)
# Check if the vswitch associated is proper
if pg_vswitch != vswitch_associated:
diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py
index af481b566..c1015cb13 100644
--- a/nova/virt/vmwareapi/vm_util.py
+++ b/nova/virt/vmwareapi/vm_util.py
@@ -480,11 +480,61 @@ def get_vm_ref_from_name(session, vm_name):
return None
-def get_datastore_ref_and_name(session):
+def get_cluster_ref_from_name(session, cluster_name):
+ """Get reference to the cluster with the name specified."""
+ cls = session._call_method(vim_util, "get_objects",
+ "ClusterComputeResource", ["name"])
+ for cluster in cls:
+ if cluster.propSet[0].val == cluster_name:
+ return cluster.obj
+ return None
+
+
+def get_host_ref(session, cluster=None):
+ """Get reference to a host within the cluster specified."""
+ if cluster is None:
+ host_mor = session._call_method(vim_util, "get_objects",
+ "HostSystem")[0].obj
+ else:
+ host_ret = session._call_method(vim_util, "get_dynamic_property",
+ cluster, "ClusterComputeResource",
+ "host")
+ if host_ret is None:
+ return
+ if not host_ret.ManagedObjectReference:
+ return
+ host_mor = host_ret.ManagedObjectReference[0]
+
+ return host_mor
+
+
+def get_datastore_ref_and_name(session, cluster=None, host=None):
"""Get the datastore list and choose the first local storage."""
- data_stores = session._call_method(vim_util, "get_objects",
- "Datastore", ["summary.type", "summary.name",
- "summary.capacity", "summary.freeSpace"])
+ if cluster is None and host is None:
+ data_stores = session._call_method(vim_util, "get_objects",
+ "Datastore", ["summary.type", "summary.name",
+ "summary.capacity", "summary.freeSpace"])
+ else:
+ if cluster is not None:
+ datastore_ret = session._call_method(
+ vim_util,
+ "get_dynamic_property", cluster,
+ "ClusterComputeResource", "datastore")
+ else:
+ datastore_ret = session._call_method(
+ vim_util,
+ "get_dynamic_property", host,
+ "HostSystem", "datastore")
+
+ if datastore_ret is None:
+ raise exception.DatastoreNotFound()
+ data_store_mors = datastore_ret.ManagedObjectReference
+ data_stores = session._call_method(vim_util,
+ "get_properties_for_a_collection_of_objects",
+ "Datastore", data_store_mors,
+ ["summary.type", "summary.name",
+ "summary.capacity", "summary.freeSpace"])
+
for elem in data_stores:
ds_name = None
ds_type = None
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index 5ea9f105c..106de0cb0 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -35,9 +35,7 @@ from nova import context as nova_context
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import excutils
-from nova.openstack.common import importutils
from nova.openstack.common import log as logging
-from nova.openstack.common import timeutils
from nova.virt import driver
from nova.virt.vmwareapi import network_util
from nova.virt.vmwareapi import vif as vmwarevif
@@ -75,12 +73,17 @@ RESIZE_TOTAL_STEPS = 4
class VMwareVMOps(object):
"""Management class for VM-related tasks."""
- def __init__(self, session, virtapi, volumeops):
+ def __init__(self, session, virtapi, volumeops, cluster_name=None):
"""Initializer."""
self.compute_api = compute.API()
self._session = session
self._virtapi = virtapi
self._volumeops = volumeops
+ if not cluster_name:
+ self._cluster = None
+ else:
+ self._cluster = vm_util.get_cluster_ref_from_name(
+ self._session, cluster_name)
self._instance_path_base = VMWARE_PREFIX + CONF.base_dir_name
self._default_root_device = 'vda'
self._rescue_suffix = '-rescue'
@@ -135,7 +138,7 @@ class VMwareVMOps(object):
client_factory = self._session._get_vim().client.factory
service_content = self._session._get_vim().get_service_content()
- ds = vm_util.get_datastore_ref_and_name(self._session)
+ ds = vm_util.get_datastore_ref_and_name(self._session, self._cluster)
data_store_ref = ds[0]
data_store_name = ds[1]
@@ -159,11 +162,12 @@ class VMwareVMOps(object):
(vmdk_file_size_in_kb, os_type, adapter_type,
disk_type) = _get_image_properties()
- vm_folder_ref, res_pool_ref = self._get_vmfolder_and_res_pool_refs()
+ vm_folder_ref = self._get_vmfolder_ref()
+ res_pool_ref = self._get_res_pool_ref()
def _check_if_network_bridge_exists(network_name):
network_ref = network_util.get_network_with_the_name(
- self._session, network_name)
+ self._session, network_name, self._cluster)
if network_ref is None:
raise exception.NetworkNotFoundForBridge(bridge=network_name)
return network_ref
@@ -178,7 +182,8 @@ class VMwareVMOps(object):
CONF.vmware.integration_bridge
if mapping.get('should_create_vlan'):
network_ref = vmwarevif.ensure_vlan_bridge(
- self._session, network)
+ self._session, network,
+ self._cluster)
else:
network_ref = _check_if_network_bridge_exists(network_name)
vif_infos.append({'network_name': network_name,
@@ -341,8 +346,8 @@ class VMwareVMOps(object):
"data_store_name": data_store_name},
instance=instance)
- ebs_root = self._volume_in_mapping(self._default_root_device,
- block_device_info)
+ ebs_root = block_device.volume_in_mapping(
+ self._default_root_device, block_device_info)
if not ebs_root:
linked_clone = CONF.use_linked_clone
@@ -488,7 +493,7 @@ class VMwareVMOps(object):
vm_ref,
"VirtualMachine",
"datastore")
- if not ds_ref_ret:
+ if ds_ref_ret is None:
raise exception.DatastoreNotFound()
ds_ref = ds_ref_ret.ManagedObjectReference[0]
ds_browser = vim_util.get_dynamic_property(
@@ -651,8 +656,7 @@ class VMwareVMOps(object):
LOG.debug(_("Destroyed the VM"), instance=instance)
except Exception, excep:
LOG.warn(_("In vmwareapi:vmops:delete, got this exception"
- " while destroying the VM: %s") % str(excep),
- instance=instance)
+ " while destroying the VM: %s") % str(excep))
if network_info:
self.unplug_vifs(instance, network_info)
@@ -704,8 +708,7 @@ class VMwareVMOps(object):
LOG.debug(_("Unregistered the VM"), instance=instance)
except Exception, excep:
LOG.warn(_("In vmwareapi:vmops:destroy, got this exception"
- " while un-registering the VM: %s") % str(excep),
- instance=instance)
+ " while un-registering the VM: %s") % str(excep))
if network_info:
self.unplug_vifs(instance, network_info)
@@ -737,8 +740,7 @@ class VMwareVMOps(object):
LOG.warn(_("In vmwareapi:vmops:destroy, "
"got this exception while deleting"
" the VM contents from the disk: %s")
- % str(excep),
- instance=instance)
+ % str(excep))
except Exception, exc:
LOG.exception(exc, instance=instance)
@@ -938,11 +940,12 @@ class VMwareVMOps(object):
total_steps=RESIZE_TOTAL_STEPS)
# Get the clone vm spec
- ds_ref = vm_util.get_datastore_ref_and_name(self._session)[0]
+ ds_ref = vm_util.get_datastore_ref_and_name(
+ self._session, None, dest)[0]
client_factory = self._session._get_vim().client.factory
rel_spec = vm_util.relocate_vm_spec(client_factory, ds_ref, host_ref)
clone_spec = vm_util.clone_vm_spec(client_factory, rel_spec)
- vm_folder_ref, res_pool_ref = self._get_vmfolder_and_res_pool_refs()
+ vm_folder_ref = self._get_vmfolder_ref()
# 3. Clone VM on ESX host
LOG.debug(_("Cloning VM to host %s") % dest, instance=instance)
@@ -1205,18 +1208,27 @@ class VMwareVMOps(object):
return host.obj
return None
- def _get_vmfolder_and_res_pool_refs(self):
+ def _get_vmfolder_ref(self):
"""Get the Vm folder ref from the datacenter."""
dc_objs = self._session._call_method(vim_util, "get_objects",
- "Datacenter", ["vmFolder"])
+ "Datacenter", ["vmFolder"])
# There is only one default datacenter in a standalone ESX host
vm_folder_ref = dc_objs[0].propSet[0].val
+ return vm_folder_ref
+ def _get_res_pool_ref(self):
# Get the resource pool. Taking the first resource pool coming our
# way. Assuming that is the default resource pool.
- res_pool_ref = self._session._call_method(vim_util, "get_objects",
- "ResourcePool")[0].obj
- return vm_folder_ref, res_pool_ref
+ if self._cluster is None:
+ res_pool_ref = self._session._call_method(vim_util, "get_objects",
+ "ResourcePool")[0].obj
+ else:
+ res_pool_ref = self._session._call_method(vim_util,
+ "get_dynamic_property",
+ self._cluster,
+ "ClusterComputeResource",
+ "resourcePool")
+ return res_pool_ref
def _path_exists(self, ds_browser, ds_path):
"""Check if the path exists on the datastore."""
@@ -1271,9 +1283,11 @@ class VMwareVMOps(object):
DataStore.
"""
LOG.debug(_("Creating directory with path %s") % ds_path)
+ dc_ref = self._get_datacenter_ref_and_name()[0]
self._session._call_method(self._session._get_vim(), "MakeDirectory",
self._session._get_vim().get_service_content().fileManager,
- name=ds_path, createParentDirectories=False)
+ name=ds_path, datacenter=dc_ref,
+ createParentDirectories=False)
LOG.debug(_("Created directory with path %s") % ds_path)
def _check_if_folder_file_exists(self, ds_ref, ds_name,
@@ -1333,21 +1347,3 @@ class VMwareVMOps(object):
interfaces.append(device.key)
return interfaces
-
- @staticmethod
- def _volume_in_mapping(mount_device, block_device_info):
- block_device_list = [block_device.strip_dev(vol['mount_device'])
- for vol in
- driver.block_device_info_get_mapping(
- block_device_info)]
- swap = driver.block_device_info_get_swap(block_device_info)
- if driver.swap_is_usable(swap):
- block_device_list.append(
- block_device.strip_dev(swap['device_name']))
- block_device_list += [block_device.strip_dev(ephemeral['device_name'])
- for ephemeral in
- driver.block_device_info_get_ephemerals(
- block_device_info)]
-
- LOG.debug(_("block_device_list %s"), block_device_list)
- return block_device.strip_dev(mount_device) in block_device_list
diff --git a/nova/virt/vmwareapi/volume_util.py b/nova/virt/vmwareapi/volume_util.py
index 9d556cd26..ae9a30e37 100644
--- a/nova/virt/vmwareapi/volume_util.py
+++ b/nova/virt/vmwareapi/volume_util.py
@@ -22,9 +22,9 @@ and storage repositories
import re
import string
-from nova import exception
from nova.openstack.common import log as logging
from nova.virt.vmwareapi import vim_util
+from nova.virt.vmwareapi import vm_util
LOG = logging.getLogger(__name__)
@@ -36,33 +36,33 @@ class StorageError(Exception):
super(StorageError, self).__init__(message)
-def get_host_iqn(session):
+def get_host_iqn(session, cluster=None):
"""
Return the host iSCSI IQN.
"""
- host_mor = session._call_method(vim_util, "get_objects",
- "HostSystem")[0].obj
+ host_mor = vm_util.get_host_ref(session, cluster)
hbas_ret = session._call_method(vim_util, "get_dynamic_property",
host_mor, "HostSystem",
"config.storageDevice.hostBusAdapter")
# Meaning there are no host bus adapters on the host
- if not hbas_ret:
+ if hbas_ret is None:
return
host_hbas = hbas_ret.HostHostBusAdapter
+ if not host_hbas:
+ return
for hba in host_hbas:
if hba.__class__.__name__ == 'HostInternetScsiHba':
return hba.iScsiName
-def find_st(session, data):
+def find_st(session, data, cluster=None):
"""
Return the iSCSI Target given a volume info.
"""
target_portal = data['target_portal']
target_iqn = data['target_iqn']
- host_mor = session._call_method(vim_util, "get_objects",
- "HostSystem")[0].obj
+ host_mor = vm_util.get_host_ref(session, cluster)
lst_properties = ["config.storageDevice.hostBusAdapter",
"config.storageDevice.scsiTopology",
@@ -134,13 +134,14 @@ def find_st(session, data):
return result
-def rescan_iscsi_hba(session):
+def rescan_iscsi_hba(session, cluster=None):
"""
Rescan the iSCSI HBA to discover iSCSI targets.
"""
- # There is only one default storage system in a standalone ESX host
- storage_system_mor = session._call_method(vim_util, "get_objects",
- "HostSystem", ["configManager.storageSystem"])[0].propSet[0].val
+ host_mor = vm_util.get_host_ref(session, cluster)
+ storage_system_mor = session._call_method(vim_util, "get_dynamic_property",
+ host_mor, "HostSystem",
+ "configManager.storageSystem")
hbas_ret = session._call_method(vim_util,
"get_dynamic_property",
storage_system_mor,
diff --git a/nova/virt/vmwareapi/volumeops.py b/nova/virt/vmwareapi/volumeops.py
index 922d2135b..855106e4a 100644
--- a/nova/virt/vmwareapi/volumeops.py
+++ b/nova/virt/vmwareapi/volumeops.py
@@ -35,8 +35,13 @@ class VMwareVolumeOps(object):
Management class for Volume-related tasks
"""
- def __init__(self, session):
+ def __init__(self, session, cluster_name=None):
self._session = session
+ if not cluster_name:
+ self._cluster = None
+ else:
+ self._cluster = vm_util.get_cluster_ref_from_name(
+ self._session, cluster_name)
def attach_disk_to_vm(self, vm_ref, instance_name,
adapter_type, disk_type, vmdk_path=None,
@@ -88,14 +93,16 @@ class VMwareVolumeOps(object):
target_iqn = data['target_iqn']
LOG.debug(_("Discovering iSCSI target %(target_iqn)s from "
"%(target_portal)s.") % locals())
- device_name, uuid = volume_util.find_st(self._session, data)
+ device_name, uuid = volume_util.find_st(self._session, data,
+ self._cluster)
if device_name:
LOG.debug(_("Storage target found. No need to discover"))
return (device_name, uuid)
# Rescan iSCSI HBA
- volume_util.rescan_iscsi_hba(self._session)
+ volume_util.rescan_iscsi_hba(self._session, self._cluster)
# Find iSCSI Target again
- device_name, uuid = volume_util.find_st(self._session, data)
+ device_name, uuid = volume_util.find_st(self._session, data,
+ self._cluster)
if device_name:
LOG.debug(_("Discovered iSCSI target %(target_iqn)s from "
"%(target_portal)s.") % locals())
@@ -106,7 +113,7 @@ class VMwareVolumeOps(object):
def get_volume_connector(self, instance):
"""Return volume connector information."""
- iqn = volume_util.get_host_iqn(self._session)
+ iqn = volume_util.get_host_iqn(self._session, self._cluster)
return {
'ip': CONF.vmwareapi_host_ip,
'initiator': iqn,
@@ -167,7 +174,8 @@ class VMwareVolumeOps(object):
data = connection_info['data']
# Discover iSCSI Target
- device_name, uuid = volume_util.find_st(self._session, data)
+ device_name, uuid = volume_util.find_st(self._session, data,
+ self._cluster)
if device_name is None:
raise volume_util.StorageError(_("Unable to find iSCSI Target"))
diff --git a/nova/virt/xenapi/agent.py b/nova/virt/xenapi/agent.py
index ef08edbc1..8220fb67b 100644
--- a/nova/virt/xenapi/agent.py
+++ b/nova/virt/xenapi/agent.py
@@ -123,8 +123,9 @@ def _get_agent_version(session, instance, vm_ref):
class XenAPIBasedAgent(object):
- def __init__(self, session, instance, vm_ref):
+ def __init__(self, session, virtapi, instance, vm_ref):
self.session = session
+ self.virtapi = virtapi
self.instance = instance
self.vm_ref = vm_ref
@@ -188,7 +189,7 @@ class XenAPIBasedAgent(object):
if resp['returncode'] != 'D0':
msg = _('Failed to exchange keys: %(resp)r') % locals()
LOG.error(msg, instance=self.instance)
- raise Exception(msg)
+ raise NotImplementedError(msg)
# Some old versions of the Windows agent have a trailing \\r\\n
# (ie CRLF escaped) for some reason. Strip that off.
@@ -208,13 +209,17 @@ class XenAPIBasedAgent(object):
if resp['returncode'] != '0':
msg = _('Failed to update password: %(resp)r') % locals()
LOG.error(msg, instance=self.instance)
- raise Exception(msg)
+ raise NotImplementedError(msg)
sshkey = self.instance.get('key_data')
if sshkey:
+ ctxt = context.get_admin_context()
enc = crypto.ssh_encrypt_text(sshkey, new_pass)
- password.set_password(context.get_admin_context(),
- self.instance['uuid'], base64.b64encode(enc))
+ sys_meta = utils.metadata_to_dict(self.instance['system_metadata'])
+ sys_meta.update(password.convert_password(ctxt,
+ base64.b64encode(enc)))
+ self.virtapi.instance_update(ctxt, self.instance['uuid'],
+ {'system_metadata': sys_meta})
return resp['message']
diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py
index a894e95b9..c1a578f3b 100644..100755
--- a/nova/virt/xenapi/driver.py
+++ b/nova/virt/xenapi/driver.py
@@ -71,7 +71,8 @@ xenapi_opts = [
cfg.StrOpt('xenapi_connection_password',
default=None,
help='Password for connection to XenServer/Xen Cloud Platform. '
- 'Used only if compute_driver=xenapi.XenAPIDriver'),
+ 'Used only if compute_driver=xenapi.XenAPIDriver',
+ secret=True),
cfg.IntOpt('xenapi_connection_concurrent',
default=5,
help='Maximum number of concurrent XenAPI connections. '
@@ -192,7 +193,7 @@ class XenAPIDriver(driver.ComputeDriver):
"""Create snapshot from a running VM instance."""
self._vmops.snapshot(context, instance, image_id, update_task_state)
- def reboot(self, instance, network_info, reboot_type,
+ def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None):
"""Reboot VM instance."""
self._vmops.reboot(instance, reboot_type)
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index 666e46754..bdadfbc38 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -89,7 +89,7 @@ def reset():
def reset_table(table):
- if not table in _CLASSES:
+ if table not in _CLASSES:
return
_db_content[table] = {}
@@ -417,7 +417,7 @@ class SessionBase(object):
def VBD_add_to_other_config(self, _1, vbd_ref, key, value):
db_ref = _db_content['VBD'][vbd_ref]
- if not 'other_config' in db_ref:
+ if 'other_config' not in db_ref:
db_ref['other_config'] = {}
if key in db_ref['other_config']:
raise Failure(['MAP_DUPLICATE_KEY', 'VBD', 'other_config',
@@ -426,7 +426,7 @@ class SessionBase(object):
def VBD_get_other_config(self, _1, vbd_ref):
db_ref = _db_content['VBD'][vbd_ref]
- if not 'other_config' in db_ref:
+ if 'other_config' not in db_ref:
return {}
return db_ref['other_config']
@@ -497,14 +497,14 @@ class SessionBase(object):
def VM_remove_from_xenstore_data(self, _1, vm_ref, key):
db_ref = _db_content['VM'][vm_ref]
- if not 'xenstore_data' in db_ref:
+ if 'xenstore_data' not in db_ref:
return
if key in db_ref['xenstore_data']:
del db_ref['xenstore_data'][key]
def VM_add_to_xenstore_data(self, _1, vm_ref, key, value):
db_ref = _db_content['VM'][vm_ref]
- if not 'xenstore_data' in db_ref:
+ if 'xenstore_data' not in db_ref:
db_ref['xenstore_data'] = {}
db_ref['xenstore_data'][key] = value
@@ -513,14 +513,14 @@ class SessionBase(object):
def VDI_remove_from_other_config(self, _1, vdi_ref, key):
db_ref = _db_content['VDI'][vdi_ref]
- if not 'other_config' in db_ref:
+ if 'other_config' not in db_ref:
return
if key in db_ref['other_config']:
del db_ref['other_config'][key]
def VDI_add_to_other_config(self, _1, vdi_ref, key, value):
db_ref = _db_content['VDI'][vdi_ref]
- if not 'other_config' in db_ref:
+ if 'other_config' not in db_ref:
db_ref['other_config'] = {}
if key in db_ref['other_config']:
raise Failure(['MAP_DUPLICATE_KEY', 'VDI', 'other_config',
diff --git a/nova/virt/xenapi/imageupload/__init__.py b/nova/virt/xenapi/imageupload/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/nova/virt/xenapi/imageupload/__init__.py
diff --git a/nova/virt/xenapi/imageupload/glance.py b/nova/virt/xenapi/imageupload/glance.py
new file mode 100644
index 000000000..adc06f65b
--- /dev/null
+++ b/nova/virt/xenapi/imageupload/glance.py
@@ -0,0 +1,54 @@
+# Copyright 2013 OpenStack, LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.image import glance
+from nova.openstack.common import cfg
+import nova.openstack.common.log as logging
+from nova.virt.xenapi import vm_utils
+
+LOG = logging.getLogger(__name__)
+
+CONF = cfg.CONF
+
+
+class GlanceStore(object):
+
+ def upload_image(self, context, session, instance, vdi_uuids, image_id):
+ """Requests that the Glance plugin bundle the specified VDIs and
+ push them into Glance using the specified human-friendly name.
+ """
+ # NOTE(sirp): Currently we only support uploading images as VHD, there
+ # is no RAW equivalent (yet)
+ LOG.debug(_("Asking xapi to upload to glance %(vdi_uuids)s as"
+ " ID %(image_id)s"), locals(), instance=instance)
+
+ glance_api_servers = glance.get_api_servers()
+ glance_host, glance_port, glance_use_ssl = glance_api_servers.next()
+
+ properties = {
+ 'auto_disk_config': instance['auto_disk_config'],
+ 'os_type': instance['os_type'] or CONF.default_os_type,
+ }
+
+ params = {'vdi_uuids': vdi_uuids,
+ 'image_id': image_id,
+ 'glance_host': glance_host,
+ 'glance_port': glance_port,
+ 'glance_use_ssl': glance_use_ssl,
+ 'sr_path': vm_utils.get_sr_path(session),
+ 'auth_token': getattr(context, 'auth_token', None),
+ 'properties': properties}
+
+ session.call_plugin_serialized('glance', 'upload_vhd', **params)
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 582a9320a..ec6450d9f 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -47,7 +47,6 @@ from nova import utils
from nova.virt import configdrive
from nova.virt.disk import api as disk
from nova.virt.disk.vfs import localfs as vfsimpl
-from nova.virt import driver
from nova.virt.xenapi import agent
from nova.virt.xenapi import volume_utils
@@ -461,66 +460,66 @@ def create_vdi(session, sr_ref, instance, name_label, disk_type, virtual_size,
return vdi_ref
-def get_vdis_for_boot_from_vol(session, dev_params):
- vdis = {}
- sr_uuid, label, sr_params = volume_utils.parse_sr_info(dev_params)
+def get_vdi_uuid_for_volume(session, connection_data):
+ sr_uuid, label, sr_params = volume_utils.parse_sr_info(connection_data)
sr_ref = volume_utils.find_sr_by_uuid(session, sr_uuid)
- # Try introducing SR if it is not present
+
if not sr_ref:
sr_ref = volume_utils.introduce_sr(session, sr_uuid, label, sr_params)
if sr_ref is None:
raise exception.NovaException(_('SR not present and could not be '
'introduced'))
- else:
- if 'vdi_uuid' in dev_params:
- session.call_xenapi("SR.scan", sr_ref)
- vdis = {'root': dict(uuid=dev_params['vdi_uuid'],
- file=None, osvol=True)}
- else:
- try:
- vdi_ref = volume_utils.introduce_vdi(session, sr_ref)
- vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
- vdis = {'root': dict(uuid=vdi_rec['uuid'],
- file=None, osvol=True)}
- except volume_utils.StorageError, exc:
- LOG.exception(exc)
- volume_utils.forget_sr(session, sr_uuid)
- return vdis
+ vdi_uuid = None
+
+ if 'vdi_uuid' in connection_data:
+ session.call_xenapi("SR.scan", sr_ref)
+ vdi_uuid = connection_data['vdi_uuid']
+ else:
+ try:
+ vdi_ref = volume_utils.introduce_vdi(session, sr_ref)
+ vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
+ vdi_uuid = vdi_rec['uuid']
+ except volume_utils.StorageError, exc:
+ LOG.exception(exc)
+ volume_utils.forget_sr(session, sr_uuid)
-def _volume_in_mapping(mount_device, block_device_info):
- block_device_list = [block_device.strip_prefix(vol['mount_device'])
- for vol in
- driver.block_device_info_get_mapping(
- block_device_info)]
- swap = driver.block_device_info_get_swap(block_device_info)
- if driver.swap_is_usable(swap):
- swap_dev = swap['device_name']
- block_device_list.append(block_device.strip_prefix(swap_dev))
- block_device_list += [block_device.strip_prefix(ephemeral['device_name'])
- for ephemeral in
- driver.block_device_info_get_ephemerals(
- block_device_info)]
- LOG.debug(_("block_device_list %s"), block_device_list)
- return block_device.strip_prefix(mount_device) in block_device_list
+ return vdi_uuid
def get_vdis_for_instance(context, session, instance, name_label, image,
image_type, block_device_info=None):
+ vdis = {}
+
if block_device_info:
LOG.debug(_("block device info: %s"), block_device_info)
- rootdev = block_device_info['root_device_name']
- if _volume_in_mapping(rootdev, block_device_info):
- # call function to return the vdi in connection info of block
- # device.
- # make it a point to return from here.
- bdm_root_dev = block_device_info['block_device_mapping'][0]
- dev_params = bdm_root_dev['connection_info']['data']
- LOG.debug(dev_params)
- return get_vdis_for_boot_from_vol(session, dev_params)
- return _create_image(context, session, instance, name_label, image,
- image_type)
+ root_device_name = block_device_info['root_device_name']
+
+ for bdm in block_device_info['block_device_mapping']:
+ if (block_device.strip_prefix(bdm['mount_device']) ==
+ block_device.strip_prefix(root_device_name)):
+ # If we're a root-device, record that fact so we don't download
+ # a root image via Glance
+ type_ = 'root'
+ else:
+ # Otherwise, use mount_device as `type_` so that we have easy
+ # access to it in _attach_disks to create the VBD
+ type_ = bdm['mount_device']
+
+ connection_data = bdm['connection_info']['data']
+ vdi_uuid = get_vdi_uuid_for_volume(session, connection_data)
+ if vdi_uuid:
+ vdis[type_] = dict(uuid=vdi_uuid, file=None, osvol=True)
+
+ # If we didn't get a root VDI from volumes, then use the Glance image as
+ # the root device
+ if 'root' not in vdis:
+ create_image_vdis = _create_image(
+ context, session, instance, name_label, image, image_type)
+ vdis.update(create_image_vdis)
+
+ return vdis
@contextlib.contextmanager
@@ -715,35 +714,6 @@ def _find_cached_image(session, image_id, sr_ref):
return cached_images.get(image_id)
-def upload_image(context, session, instance, vdi_uuids, image_id):
- """Requests that the Glance plugin bundle the specified VDIs and
- push them into Glance using the specified human-friendly name.
- """
- # NOTE(sirp): Currently we only support uploading images as VHD, there
- # is no RAW equivalent (yet)
- LOG.debug(_("Asking xapi to upload %(vdi_uuids)s as"
- " ID %(image_id)s"), locals(), instance=instance)
-
- glance_api_servers = glance.get_api_servers()
- glance_host, glance_port, glance_use_ssl = glance_api_servers.next()
-
- properties = {
- 'auto_disk_config': instance['auto_disk_config'],
- 'os_type': instance['os_type'] or CONF.default_os_type,
- }
-
- params = {'vdi_uuids': vdi_uuids,
- 'image_id': image_id,
- 'glance_host': glance_host,
- 'glance_port': glance_port,
- 'glance_use_ssl': glance_use_ssl,
- 'sr_path': get_sr_path(session),
- 'auth_token': getattr(context, 'auth_token', None),
- 'properties': properties}
-
- session.call_plugin_serialized('glance', 'upload_vhd', **params)
-
-
def resize_disk(session, instance, vdi_ref, instance_type):
# Copy VDI over to something we can resize
# NOTE(jerdfelt): Would be nice to just set vdi_ref to read/write
@@ -1589,7 +1559,7 @@ def _find_iso_sr(session):
if not sr_rec['content_type'] == 'iso':
LOG.debug(_("ISO: not iso content"))
continue
- if not 'i18n-key' in sr_rec['other_config']:
+ if 'i18n-key' not in sr_rec['other_config']:
LOG.debug(_("ISO: iso content_type, no 'i18n-key' key"))
continue
if not sr_rec['other_config']['i18n-key'] == 'local-storage-iso':
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 4a8372cda..5fca96817 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -26,6 +26,7 @@ import time
from eventlet import greenthread
import netaddr
+from nova import block_device
from nova.compute import api as compute
from nova.compute import power_state
from nova.compute import task_states
@@ -59,7 +60,10 @@ xenapi_vmops_opts = [
'to go to running state'),
cfg.StrOpt('xenapi_vif_driver',
default='nova.virt.xenapi.vif.XenAPIBridgeDriver',
- help='The XenAPI VIF driver using XenServer Network APIs.')
+ help='The XenAPI VIF driver using XenServer Network APIs.'),
+ cfg.StrOpt('xenapi_image_upload_handler',
+ default='nova.virt.xenapi.imageupload.glance.GlanceStore',
+ help='Object Store Driver used to handle image uploads.'),
]
CONF = cfg.CONF
@@ -161,13 +165,19 @@ class VMOps(object):
self.vif_driver = vif_impl(xenapi_session=self._session)
self.default_root_dev = '/dev/sda'
+ msg = _("Importing image upload handler: %s")
+ LOG.debug(msg % CONF.xenapi_image_upload_handler)
+ self.image_upload_handler = importutils.import_object(
+ CONF.xenapi_image_upload_handler)
+
@property
def agent_enabled(self):
return not CONF.xenapi_disable_agent
def _get_agent(self, instance, vm_ref):
if self.agent_enabled:
- return xapi_agent.XenAPIBasedAgent(self._session, instance, vm_ref)
+ return xapi_agent.XenAPIBasedAgent(self._session, self._virtapi,
+ instance, vm_ref)
raise exception.NovaException(_("Error: Agent is disabled"))
def list_instances(self):
@@ -183,7 +193,7 @@ class VMOps(object):
def confirm_migration(self, migration, instance, network_info):
name_label = self._get_orig_vm_name_label(instance)
vm_ref = vm_utils.lookup(self._session, name_label)
- return self._destroy(instance, vm_ref, network_info)
+ return self._destroy(instance, vm_ref, network_info=network_info)
def _attach_mapped_block_devices(self, instance, block_device_info):
# We are attaching these volumes before start (no hotplugging)
@@ -294,7 +304,7 @@ class VMOps(object):
def create_disks_step(undo_mgr, disk_image_type, image_meta):
vdis = self._create_disks(context, instance, name_label,
disk_image_type, image_meta,
- block_device_info)
+ block_device_info=block_device_info)
def undo_create_disks():
vdi_refs = [vdi['ref'] for vdi in vdis.values()
@@ -338,7 +348,7 @@ class VMOps(object):
vdis, disk_image_type, kernel_file, ramdisk_file)
def undo_create_vm():
- self._destroy(instance, vm_ref, network_info)
+ self._destroy(instance, vm_ref, network_info=network_info)
undo_mgr.undo_with(undo_create_vm)
return vm_ref
@@ -503,8 +513,9 @@ class VMOps(object):
ctx = nova_context.get_admin_context()
instance_type = instance['instance_type']
- # DISK_ISO needs two VBDs: the ISO disk and a blank RW disk
+ # Attach (required) root disk
if disk_image_type == vm_utils.ImageType.DISK_ISO:
+ # DISK_ISO needs two VBDs: the ISO disk and a blank RW disk
LOG.debug(_("Detected ISO image type, creating blank VM "
"for install"), instance=instance)
@@ -532,6 +543,19 @@ class VMOps(object):
DEVICE_ROOT, bootable=True,
osvol=root_vdi.get('osvol'))
+ # Attach (optional) additional block-devices
+ for type_, vdi_info in vdis.items():
+ # Additional block-devices for boot use their device-name as the
+ # type.
+ if not type_.startswith('/dev'):
+ continue
+
+ # Convert device name to userdevice number, e.g. /dev/xvdb -> 1
+ userdevice = ord(block_device.strip_prefix(type_)) - ord('a')
+ vm_utils.create_vbd(self._session, vm_ref, vdi_info['ref'],
+ userdevice, bootable=False,
+ osvol=vdi_info.get('osvol'))
+
# Attach (optional) swap disk
swap_mb = instance_type['swap']
if swap_mb:
@@ -661,9 +685,11 @@ class VMOps(object):
coalesce together, so, we must wait for this coalescing to occur to
get a stable representation of the data on disk.
- 3. Push-to-glance: Once coalesced, we call a plugin on the XenServer
- that will bundle the VHDs together and then push the bundle into
- Glance.
+ 3. Push-to-data-store: Once coalesced, we call a plugin on the
+ XenServer that will bundle the VHDs together and then push the
+ bundle. Depending on the configured value of
+ 'xenapi_image_upload_handler', image data may be pushed to
+ Glance or the specified data store.
"""
vm_ref = self._get_vm_opaque_ref(instance)
@@ -674,8 +700,11 @@ class VMOps(object):
update_task_state) as vdi_uuids:
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
- vm_utils.upload_image(
- context, self._session, instance, vdi_uuids, image_id)
+ self.image_upload_handler.upload_image(context,
+ self._session,
+ instance,
+ vdi_uuids,
+ image_id)
LOG.debug(_("Finished snapshot and upload for VM"),
instance=instance)
@@ -998,29 +1027,7 @@ class VMOps(object):
raise exception.NotFound(_("Unable to find root VBD/VDI for VM"))
- def _detach_vm_vols(self, instance, vm_ref, block_device_info=None):
- """Detach any external nova/cinder volumes and purge the SRs.
- This differs from a normal detach in that the VM has been
- shutdown, so there is no need for unplugging VBDs. They do
- need to be destroyed, so that the SR can be forgotten.
- """
- vbd_refs = self._session.call_xenapi("VM.get_VBDs", vm_ref)
- for vbd_ref in vbd_refs:
- other_config = self._session.call_xenapi("VBD.get_other_config",
- vbd_ref)
- if other_config.get('osvol'):
- # this is a nova/cinder volume
- try:
- sr_ref = volume_utils.find_sr_from_vbd(self._session,
- vbd_ref)
- vm_utils.destroy_vbd(self._session, vbd_ref)
- # Forget SR only if not in use
- volume_utils.purge_sr(self._session, sr_ref)
- except Exception as exc:
- LOG.exception(exc)
- raise
-
- def _destroy_vdis(self, instance, vm_ref, block_device_info=None):
+ def _destroy_vdis(self, instance, vm_ref):
"""Destroys all VDIs associated with a VM."""
LOG.debug(_("Destroying VDIs"), instance=instance)
@@ -1102,12 +1109,14 @@ class VMOps(object):
if rescue_vm_ref:
self._destroy_rescue_instance(rescue_vm_ref, vm_ref)
- return self._destroy(instance, vm_ref, network_info,
- block_device_info=block_device_info,
+ # NOTE(sirp): `block_device_info` is not used, information about which
+ # volumes should be detached is determined by the
+ # VBD.other_config['osvol'] attribute
+ return self._destroy(instance, vm_ref, network_info=network_info,
destroy_disks=destroy_disks)
def _destroy(self, instance, vm_ref, network_info=None,
- block_device_info=None, destroy_disks=True):
+ destroy_disks=True):
"""Destroys VM instance by performing:
1. A shutdown
@@ -1123,10 +1132,9 @@ class VMOps(object):
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
- # Destroy VDIs (if necessary)
if destroy_disks:
- self._detach_vm_vols(instance, vm_ref, block_device_info)
- self._destroy_vdis(instance, vm_ref, block_device_info)
+ self._volumeops.detach_all(vm_ref)
+ self._destroy_vdis(instance, vm_ref)
self._destroy_kernel_ramdisk(instance, vm_ref)
vm_utils.destroy_vm(self._session, instance, vm_ref)
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index c2d717cfd..0c8a9e1c7 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -115,39 +115,48 @@ class VolumeOps(object):
def detach_volume(self, connection_info, instance_name, mountpoint):
"""Detach volume storage to VM instance."""
-
- vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name)
-
- # Detach VBD from VM
LOG.debug(_("Detach_volume: %(instance_name)s, %(mountpoint)s")
% locals())
- device_number = volume_utils.get_device_number(mountpoint)
- try:
- vbd_ref = vm_utils.find_vbd_by_number(self._session, vm_ref,
- device_number)
- sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref)
- except volume_utils.StorageError, exc:
- LOG.exception(exc)
- raise Exception(_('Unable to locate volume %s') % mountpoint)
-
- try:
- if not vm_utils._is_vm_shutdown(self._session, vm_ref):
- vm_utils.unplug_vbd(self._session, vbd_ref)
- except volume_utils.StorageError, exc:
- LOG.exception(exc)
- raise Exception(_('Unable to detach volume %s') % mountpoint)
- try:
- vm_utils.destroy_vbd(self._session, vbd_ref)
- except volume_utils.StorageError, exc:
- LOG.exception(exc)
- raise Exception(_('Unable to destroy vbd %s') % mountpoint)
- # Forget SR only if no other volumes on this host are using it
- try:
- volume_utils.purge_sr(self._session, sr_ref)
- except volume_utils.StorageError, exc:
- LOG.exception(exc)
- raise Exception(_('Error purging SR %s') % sr_ref)
+ device_number = volume_utils.get_device_number(mountpoint)
+ vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name)
- LOG.info(_('Mountpoint %(mountpoint)s detached from'
- ' instance %(instance_name)s') % locals())
+ vbd_ref = vm_utils.find_vbd_by_number(
+ self._session, vm_ref, device_number)
+
+ # Unplug VBD if we're NOT shutdown
+ unplug = not vm_utils._is_vm_shutdown(self._session, vm_ref)
+ self._detach_vbd(vbd_ref, unplug=unplug)
+
+ LOG.info(_('Mountpoint %(mountpoint)s detached from instance'
+ ' %(instance_name)s') % locals())
+
+ def _get_all_volume_vbd_refs(self, vm_ref):
+ """Return VBD refs for all Nova/Cinder volumes."""
+ vbd_refs = self._session.call_xenapi("VM.get_VBDs", vm_ref)
+ for vbd_ref in vbd_refs:
+ other_config = self._session.call_xenapi(
+ "VBD.get_other_config", vbd_ref)
+ if other_config.get('osvol'):
+ yield vbd_ref
+
+ def _detach_vbd(self, vbd_ref, unplug=False):
+ if unplug:
+ vm_utils.unplug_vbd(self._session, vbd_ref)
+
+ sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref)
+ vm_utils.destroy_vbd(self._session, vbd_ref)
+
+ # Forget SR only if not in use
+ volume_utils.purge_sr(self._session, sr_ref)
+
+ def detach_all(self, vm_ref):
+ """Detach any external nova/cinder volumes and purge the SRs."""
+ # Generally speaking, detach_all will be called with VM already
+ # shutdown; however if it's still running, we can still perform the
+ # operation by unplugging the VBD first.
+ unplug = not vm_utils._is_vm_shutdown(self._session, vm_ref)
+
+ vbd_refs = self._get_all_volume_vbd_refs(vm_ref)
+ for vbd_ref in vbd_refs:
+ self._detach_vbd(vbd_ref, unplug=unplug)
diff --git a/nova/volume/cinder.py b/nova/volume/cinder.py
index daca69854..05918f83d 100644
--- a/nova/volume/cinder.py
+++ b/nova/volume/cinder.py
@@ -63,8 +63,10 @@ def cinderclient(context):
# FIXME: the cinderclient ServiceCatalog object is mis-named.
# It actually contains the entire access blob.
+ # Only needed parts of the service catalog are passed in, see
+ # nova/context.py.
compat_catalog = {
- 'access': {'serviceCatalog': context.service_catalog or {}}
+ 'access': {'serviceCatalog': context.service_catalog or []}
}
sc = service_catalog.ServiceCatalog(compat_catalog)
if CONF.cinder_endpoint_template:
diff --git a/openstack-common.conf b/openstack-common.conf
index a0b14e651..fcff378b5 100644
--- a/openstack-common.conf
+++ b/openstack-common.conf
@@ -1,7 +1,7 @@
[DEFAULT]
# The list of modules to copy from openstack-common
-modules=cfg,cliutils,context,excutils,eventlet_backdoor,fileutils,gettextutils,importutils,iniparser,jsonutils,local,lockutils,log,network_utils,notifier,plugin,policy,rootwrap,setup,timeutils,rpc,uuidutils
+modules=cfg,cliutils,context,db,db.sqlalchemy,excutils,eventlet_backdoor,fileutils,gettextutils,importutils,iniparser,jsonutils,local,lockutils,log,network_utils,notifier,plugin,policy,rootwrap,setup,timeutils,rpc,uuidutils,install_venv_common,flakes,version
# The base module to hold the copy of openstack.common
base=nova
diff --git a/run_tests.sh b/run_tests.sh
index 11bc8b518..be9b0fa73 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -6,17 +6,23 @@ function usage {
echo "Usage: $0 [OPTION]..."
echo "Run Nova's test suite(s)"
echo ""
- echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
- echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
- echo " -s, --no-site-packages Isolate the virtualenv from the global Python environment"
- echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)."
- echo " -n, --no-recreate-db Don't recreate the test database."
- echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
- echo " -p, --pep8 Just run PEP8 and HACKING compliance check"
- echo " -P, --no-pep8 Don't run static code checks"
- echo " -c, --coverage Generate coverage report"
- echo " -h, --help Print this usage message"
- echo " --hide-elapsed Don't print the elapsed time for each test along with slow test list"
+ echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
+ echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
+ echo " -s, --no-site-packages Isolate the virtualenv from the global Python environment"
+ echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)."
+ echo " -n, --no-recreate-db Don't recreate the test database."
+ echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
+ echo " -p, --pep8 Just run PEP8 and HACKING compliance check"
+ echo " -P, --no-pep8 Don't run static code checks"
+ echo " -c, --coverage Generate coverage report"
+ echo " -h, --help Print this usage message"
+ echo " --hide-elapsed Don't print the elapsed time for each test along with slow test list"
+ echo " --virtual-env-path <path> Location of the virtualenv directory"
+ echo " Default: \$(pwd)"
+ echo " --virtual-env-name <name> Name of the virtualenv directory"
+ echo " Default: .venv"
+ echo " --tools-path <dir> Location of the tools directory"
+ echo " Default: \$(pwd)"
echo ""
echo "Note: with no options specified, the script will try to run the tests in a virtual environment,"
echo " If no virtualenv is found, the script will ask if you would like to create one. If you "
@@ -24,24 +30,43 @@ function usage {
exit
}
-function process_option {
- case "$1" in
- -h|--help) usage;;
- -V|--virtual-env) always_venv=1; never_venv=0;;
- -N|--no-virtual-env) always_venv=0; never_venv=1;;
- -s|--no-site-packages) no_site_packages=1;;
- -r|--recreate-db) recreate_db=1;;
- -n|--no-recreate-db) recreate_db=0;;
- -f|--force) force=1;;
- -p|--pep8) just_pep8=1;;
- -P|--no-pep8) no_pep8=1;;
- -c|--coverage) coverage=1;;
- -*) testropts="$testropts $1";;
- *) testrargs="$testrargs $1"
- esac
+function process_options {
+ i=1
+ while [ $i -le $# ]; do
+ FOO=${!i}
+ case "${!i}" in
+ -h|--help) usage;;
+ -V|--virtual-env) always_venv=1; never_venv=0;;
+ -N|--no-virtual-env) always_venv=0; never_venv=1;;
+ -s|--no-site-packages) no_site_packages=1;;
+ -r|--recreate-db) recreate_db=1;;
+ -n|--no-recreate-db) recreate_db=0;;
+ -f|--force) force=1;;
+ -p|--pep8) just_pep8=1;;
+ -P|--no-pep8) no_pep8=1;;
+ -c|--coverage) coverage=1;;
+ --virtual-env-path)
+ (( i++ ))
+ venv_path=${!i}
+ ;;
+ --virtual-env-name)
+ (( i++ ))
+ venv_dir=${!i}
+ ;;
+ --tools-path)
+ (( i++ ))
+ tools_path=${!i}
+ ;;
+ -*) testropts="$testropts $1";;
+ *) testrargs="$testrargs $1"
+ esac
+ (( i++ ))
+ done
}
-venv=.venv
+tool_path=${tools_path:-$(pwd)}
+venv_path=${venv_path:-$(pwd)}
+venv_dir=${venv_name:-.venv}
with_venv=tools/with_venv.sh
always_venv=0
never_venv=0
@@ -60,9 +85,13 @@ LANG=en_US.UTF-8
LANGUAGE=en_US:en
LC_ALL=C
-for arg in "$@"; do
- process_option $arg
-done
+process_options $@
+# Make our paths available to other scripts we call
+export venv_path
+export venv_dir
+export venv_name
+export tools_dir
+export venv=${venv_path}/${venv_dir}
if [ $no_site_packages -eq 1 ]; then
installvenvopts="--no-site-packages"
@@ -91,9 +120,9 @@ function run_tests {
# Just run the test suites in current environment
set +e
testrargs=`echo "$testrargs" | sed -e's/^\s*\(.*\)\s*$/\1/'`
- TESTRTESTS="$TESTRTESTS --testr-args='$testrargs'"
+ TESTRTESTS="$TESTRTESTS --testr-args='--subunit $testropts $testrargs'"
echo "Running \`${wrapper} $TESTRTESTS\`"
- bash -c "${wrapper} $TESTRTESTS"
+ bash -c "${wrapper} $TESTRTESTS | ${wrapper} subunit2pyunit"
RESULT=$?
set -e
@@ -142,6 +171,7 @@ function run_pep8 {
echo "Running pep8"
${wrapper} python tools/hacking.py ${ignore} ${srcfiles}
+ ${wrapper} bash tools/unused_imports.sh
# NOTE(sdague): as of grizzly-2 these are passing however leaving the comment
# in here in case we need to break it out when we get more of our hacking working
# again.
@@ -154,7 +184,7 @@ function run_pep8 {
}
-TESTRTESTS="python setup.py testr $testropts"
+TESTRTESTS="python setup.py testr"
if [ $never_venv -eq 0 ]
then
diff --git a/setup.py b/setup.py
index 78c9062c2..54d62e409 100644
--- a/setup.py
+++ b/setup.py
@@ -18,12 +18,13 @@
import setuptools
from nova.openstack.common import setup as common_setup
-from nova import version
requires = common_setup.parse_requirements()
+project = 'nova'
-setuptools.setup(name='nova',
- version=version.canonical_version_string(),
+setuptools.setup(
+ name=project,
+ version=common_setup.get_version(project, '2013.1'),
description='cloud computing fabric controller',
author='OpenStack',
author_email='nova@lists.launchpad.net',
diff --git a/smoketests/base.py b/smoketests/base.py
index 7c7d19838..c90da102c 100644
--- a/smoketests/base.py
+++ b/smoketests/base.py
@@ -169,7 +169,6 @@ class SmokeTestCase(unittest.TestCase):
cmd += ' --kernel true'
status, output = commands.getstatusoutput(cmd)
if status != 0:
- print '%s -> \n %s' % (cmd, output)
raise Exception(output)
return True
@@ -178,7 +177,6 @@ class SmokeTestCase(unittest.TestCase):
cmd += '%s -m %s/%s.manifest.xml' % (bucket_name, tempdir, image)
status, output = commands.getstatusoutput(cmd)
if status != 0:
- print '%s -> \n %s' % (cmd, output)
raise Exception(output)
return True
@@ -186,7 +184,6 @@ class SmokeTestCase(unittest.TestCase):
cmd = 'euca-delete-bundle --clear -b %s' % (bucket_name)
status, output = commands.getstatusoutput(cmd)
if status != 0:
- print '%s -> \n%s' % (cmd, output)
raise Exception(output)
return True
diff --git a/smoketests/public_network_smoketests.py b/smoketests/public_network_smoketests.py
index 4fb843e0f..f20b0923e 100644
--- a/smoketests/public_network_smoketests.py
+++ b/smoketests/public_network_smoketests.py
@@ -97,7 +97,6 @@ class InstanceTestsFromPublic(base.UserSmokeTestCase):
self.data['ip_v6'], TEST_KEY)
conn.close()
except Exception as ex:
- print ex
time.sleep(1)
else:
break
diff --git a/tools/flakes.py b/tools/flakes.py
index 4b93abc21..7f96116ca 100644
--- a/tools/flakes.py
+++ b/tools/flakes.py
@@ -2,21 +2,14 @@
wrapper for pyflakes to ignore gettext based warning:
"undefined name '_'"
- From https://bugs.launchpad.net/pyflakes/+bug/844592
+ Synced in from openstack-common
"""
-import __builtin__
-import os
import sys
-from pyflakes.scripts.pyflakes import main
+import pyflakes.checker
+from pyflakes.scripts import pyflakes
if __name__ == "__main__":
- names = os.environ.get('PYFLAKES_BUILTINS', '_')
- names = [x.strip() for x in names.split(',')]
- for x in names:
- if not hasattr(__builtin__, x):
- setattr(__builtin__, x, True)
-
- del names, os, __builtin__
-
- sys.exit(main())
+ orig_builtins = set(pyflakes.checker._MAGIC_GLOBALS)
+ pyflakes.checker._MAGIC_GLOBALS = orig_builtins | set(['_'])
+ sys.exit(pyflakes.main())
diff --git a/tools/hacking.py b/tools/hacking.py
index 801a87899..42a644e7d 100755
--- a/tools/hacking.py
+++ b/tools/hacking.py
@@ -43,6 +43,7 @@ logging.disable('LOG')
#N6xx calling methods
#N7xx localization
#N8xx git commit messages
+#N9xx other
IMPORT_EXCEPTIONS = ['sqlalchemy', 'migrate', 'nova.db.sqlalchemy.session']
START_DOCSTRING_TRIPLE = ['u"""', 'r"""', '"""', "u'''", "r'''", "'''"]
@@ -493,6 +494,23 @@ def nova_localization_strings(logical_line, tokens):
#TODO(jogo) Dict and list objects
+
+def nova_not_in(logical_line):
+ r"""Check localization in line.
+
+ Okay: if x not in y
+ Okay: if not (X in Y or X is Z)
+ Okay: if not (X in Y)
+ N901: if not X in Y
+ N901: if not X.B in Y
+ """
+ split_line = logical_line.split()
+ if (len(split_line) == 5 and split_line[0] == 'if' and
+ split_line[1] == 'not' and split_line[3] == 'in' and not
+ split_line[2].startswith('(')):
+ yield (logical_line.find('not'), "N901: Use the 'not in' "
+ "operator for collection membership evaluation")
+
current_file = ""
diff --git a/tools/install_venv.py b/tools/install_venv.py
index b1ceb74f0..17a0be205 100644
--- a/tools/install_venv.py
+++ b/tools/install_venv.py
@@ -1,4 +1,3 @@
-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
@@ -6,6 +5,7 @@
# All Rights Reserved.
#
# Copyright 2010 OpenStack, LLC
+# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -19,189 +19,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Installation script for Nova's development virtualenv."""
-
-import optparse
import os
-import subprocess
import sys
+import install_venv_common as install_venv
-ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
-VENV = os.path.join(ROOT, '.venv')
-PIP_REQUIRES = os.path.join(ROOT, 'tools', 'pip-requires')
-TEST_REQUIRES = os.path.join(ROOT, 'tools', 'test-requires')
-PY_VERSION = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
-
-
-def die(message, *args):
- print >> sys.stderr, message % args
- sys.exit(1)
-
-
-def check_python_version():
- if sys.version_info < (2, 6):
- die("Need Python Version >= 2.6")
-
-
-def run_command_with_code(cmd, redirect_output=True, check_exit_code=True):
- """Runs a command in an out-of-process shell.
-
- Returns the output of that command. Working directory is ROOT.
- """
- if redirect_output:
- stdout = subprocess.PIPE
- else:
- stdout = None
-
- proc = subprocess.Popen(cmd, cwd=ROOT, stdout=stdout)
- output = proc.communicate()[0]
- if check_exit_code and proc.returncode != 0:
- die('Command "%s" failed.\n%s', ' '.join(cmd), output)
- return (output, proc.returncode)
-
-
-def run_command(cmd, redirect_output=True, check_exit_code=True):
- return run_command_with_code(cmd, redirect_output, check_exit_code)[0]
-
-
-class Distro(object):
-
- def check_cmd(self, cmd):
- return bool(run_command(['which', cmd], check_exit_code=False).strip())
-
- def install_virtualenv(self):
- if self.check_cmd('virtualenv'):
- return
-
- if self.check_cmd('easy_install'):
- print 'Installing virtualenv via easy_install...',
- if run_command(['easy_install', 'virtualenv']):
- print 'Succeeded'
- return
- else:
- print 'Failed'
-
- die('ERROR: virtualenv not found.\n\nNova development'
- ' requires virtualenv, please install it using your'
- ' favorite package management tool')
-
- def post_process(self):
- """Any distribution-specific post-processing gets done here.
-
- In particular, this is useful for applying patches to code inside
- the venv.
- """
- pass
-
-
-class Fedora(Distro):
- """This covers all Fedora-based distributions.
-
- Includes: Fedora, RHEL, CentOS, Scientific Linux"""
-
- def check_pkg(self, pkg):
- return run_command_with_code(['rpm', '-q', pkg],
- check_exit_code=False)[1] == 0
-
- def yum_install(self, pkg, **kwargs):
- print "Attempting to install '%s' via yum" % pkg
- run_command(['sudo', 'yum', 'install', '-y', pkg], **kwargs)
-
- def apply_patch(self, originalfile, patchfile):
- run_command(['patch', originalfile, patchfile])
-
- def install_virtualenv(self):
- if self.check_cmd('virtualenv'):
- return
- if not self.check_pkg('python-virtualenv'):
- self.yum_install('python-virtualenv', check_exit_code=False)
-
- super(Fedora, self).install_virtualenv()
-
- def post_process(self):
- """Workaround for a bug in eventlet.
-
- This currently affects RHEL6.1, but the fix can safely be
- applied to all RHEL and Fedora distributions.
-
- This can be removed when the fix is applied upstream.
-
- Nova: https://bugs.launchpad.net/nova/+bug/884915
- Upstream: https://bitbucket.org/which_linden/eventlet/issue/89
- """
-
- # Install "patch" program if it's not there
- if not self.check_pkg('patch'):
- self.yum_install('patch')
-
- # Apply the eventlet patch
- self.apply_patch(os.path.join(VENV, 'lib', PY_VERSION, 'site-packages',
- 'eventlet/green/subprocess.py'),
- 'contrib/redhat-eventlet.patch')
-
-
-def get_distro():
- if (os.path.exists('/etc/fedora-release') or
- os.path.exists('/etc/redhat-release')):
- return Fedora()
- else:
- return Distro()
-
-
-def check_dependencies():
- get_distro().install_virtualenv()
-
-
-def create_virtualenv(venv=VENV, no_site_packages=True):
- """Creates the virtual environment and installs PIP.
-
- Creates the virtual environment and installs PIP only into the
- virtual environment.
- """
- print 'Creating venv...',
- if no_site_packages:
- run_command(['virtualenv', '-q', '--no-site-packages', VENV])
- else:
- run_command(['virtualenv', '-q', VENV])
- print 'done.'
- print 'Installing pip in virtualenv...',
- if not run_command(['tools/with_venv.sh', 'easy_install',
- 'pip>1.0']).strip():
- die("Failed to install pip.")
- print 'done.'
-
-
-def pip_install(*args):
- run_command(['tools/with_venv.sh',
- 'pip', 'install', '--upgrade'] + list(args),
- redirect_output=False)
-
-
-def install_dependencies(venv=VENV):
- print 'Installing dependencies with pip (this can take a while)...'
-
- # First things first, make sure our venv has the latest pip and distribute.
- # NOTE: we keep pip at version 1.1 since the most recent version causes
- # the .venv creation to fail. See:
- # https://bugs.launchpad.net/nova/+bug/1047120
- pip_install('pip==1.1')
- pip_install('distribute')
-
- # Install greenlet by hand - just listing it in the requires file does not
- # get it in stalled in the right order
- pip_install('greenlet')
-
- pip_install('-r', PIP_REQUIRES)
- pip_install('-r', TEST_REQUIRES)
-
-
-def post_process():
- get_distro().post_process()
-
-
-def print_help():
+def print_help(venv, root):
help = """
Nova development environment setup is complete.
@@ -211,35 +35,40 @@ def print_help():
To activate the Nova virtualenv for the extent of your current shell
session you can run:
- $ source .venv/bin/activate
+ $ source %s/bin/activate
Or, if you prefer, you can run commands in the virtualenv on a case by case
basis by running:
- $ tools/with_venv.sh <your command>
+ $ %s/tools/with_venv.sh <your command>
Also, make test will automatically use the virtualenv.
"""
- print help
-
-
-def parse_args():
- """Parses command-line arguments."""
- parser = optparse.OptionParser()
- parser.add_option("-n", "--no-site-packages", dest="no_site_packages",
- default=False, action="store_true",
- help="Do not inherit packages from global Python install")
- return parser.parse_args()
+ print help % (venv, root)
def main(argv):
- (options, args) = parse_args()
- check_python_version()
- check_dependencies()
- create_virtualenv(no_site_packages=options.no_site_packages)
- install_dependencies()
- post_process()
- print_help()
+ root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+
+ if os.environ.get('tools_path'):
+ root = os.environ['tools_path']
+ venv = os.path.join(root, '.venv')
+ if os.environ.get('venv'):
+ venv = os.environ['venv']
+
+ pip_requires = os.path.join(root, 'tools', 'pip-requires')
+ test_requires = os.path.join(root, 'tools', 'test-requires')
+ py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
+ project = 'Nova'
+ install = install_venv.InstallVenv(root, venv, pip_requires, test_requires,
+ py_version, project)
+ options = install.parse_args(argv)
+ install.check_python_version()
+ install.check_dependencies()
+ install.create_virtualenv(no_site_packages=options.no_site_packages)
+ install.install_dependencies()
+ install.post_process()
+ print_help(venv, root)
if __name__ == '__main__':
main(sys.argv)
diff --git a/tools/install_venv_common.py b/tools/install_venv_common.py
new file mode 100644
index 000000000..3d47b12b2
--- /dev/null
+++ b/tools/install_venv_common.py
@@ -0,0 +1,232 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack, LLC
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Provides methods needed by installation script for OpenStack development
+virtual environments.
+
+Synced in from openstack-common
+"""
+
+import os
+import subprocess
+import sys
+
+
+possible_topdir = os.getcwd()
+if os.path.exists(os.path.join(possible_topdir, "nova",
+ "__init__.py")):
+ sys.path.insert(0, possible_topdir)
+
+
+from nova.openstack.common import cfg
+
+
+class InstallVenv(object):
+
+ def __init__(self, root, venv, pip_requires, test_requires, py_version,
+ project):
+ self.root = root
+ self.venv = venv
+ self.pip_requires = pip_requires
+ self.test_requires = test_requires
+ self.py_version = py_version
+ self.project = project
+
+ def die(self, message, *args):
+ print >> sys.stderr, message % args
+ sys.exit(1)
+
+ def check_python_version(self):
+ if sys.version_info < (2, 6):
+ self.die("Need Python Version >= 2.6")
+
+ def run_command_with_code(self, cmd, redirect_output=True,
+ check_exit_code=True):
+ """Runs a command in an out-of-process shell.
+
+ Returns the output of that command. Working directory is ROOT.
+ """
+ if redirect_output:
+ stdout = subprocess.PIPE
+ else:
+ stdout = None
+
+ proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout)
+ output = proc.communicate()[0]
+ if check_exit_code and proc.returncode != 0:
+ self.die('Command "%s" failed.\n%s', ' '.join(cmd), output)
+ return (output, proc.returncode)
+
+ def run_command(self, cmd, redirect_output=True, check_exit_code=True):
+ return self.run_command_with_code(cmd, redirect_output,
+ check_exit_code)[0]
+
+ def get_distro(self):
+ if (os.path.exists('/etc/fedora-release') or
+ os.path.exists('/etc/redhat-release')):
+ return Fedora(self.root, self.venv, self.pip_requires,
+ self.test_requires, self.py_version, self.project)
+ else:
+ return Distro(self.root, self.venv, self.pip_requires,
+ self.test_requires, self.py_version, self.project)
+
+ def check_dependencies(self):
+ self.get_distro().install_virtualenv()
+
+ def create_virtualenv(self, no_site_packages=True):
+ """Creates the virtual environment and installs PIP.
+
+ Creates the virtual environment and installs PIP only into the
+ virtual environment.
+ """
+ if not os.path.isdir(self.venv):
+ print 'Creating venv...',
+ if no_site_packages:
+ self.run_command(['virtualenv', '-q', '--no-site-packages',
+ self.venv])
+ else:
+ self.run_command(['virtualenv', '-q', self.venv])
+ print 'done.'
+ print 'Installing pip in virtualenv...',
+ if not self.run_command(['tools/with_venv.sh', 'easy_install',
+ 'pip>1.0']).strip():
+ self.die("Failed to install pip.")
+ print 'done.'
+ else:
+ print "venv already exists..."
+ pass
+
+ def pip_install(self, *args):
+ self.run_command(['tools/with_venv.sh',
+ 'pip', 'install', '--upgrade'] + list(args),
+ redirect_output=False)
+
+ def install_dependencies(self):
+ print 'Installing dependencies with pip (this can take a while)...'
+
+ # First things first, make sure our venv has the latest pip and
+ # distribute.
+ # NOTE: we keep pip at version 1.1 since the most recent version causes
+ # the .venv creation to fail. See:
+ # https://bugs.launchpad.net/nova/+bug/1047120
+ self.pip_install('pip==1.1')
+ self.pip_install('distribute')
+
+ # Install greenlet by hand - just listing it in the requires file does
+ # not
+ # get it installed in the right order
+ self.pip_install('greenlet')
+
+ self.pip_install('-r', self.pip_requires)
+ self.pip_install('-r', self.test_requires)
+
+ def post_process(self):
+ self.get_distro().post_process()
+
+ def parse_args(self, argv):
+ """Parses command-line arguments."""
+ cli_opts = [
+ cfg.BoolOpt('no-site-packages',
+ default=False,
+ short='n',
+ help="Do not inherit packages from global Python"
+ "install"),
+ ]
+ CLI = cfg.ConfigOpts()
+ CLI.register_cli_opts(cli_opts)
+ CLI(argv[1:])
+ return CLI
+
+
+class Distro(InstallVenv):
+
+ def check_cmd(self, cmd):
+ return bool(self.run_command(['which', cmd],
+ check_exit_code=False).strip())
+
+ def install_virtualenv(self):
+ if self.check_cmd('virtualenv'):
+ return
+
+ if self.check_cmd('easy_install'):
+ print 'Installing virtualenv via easy_install...',
+ if self.run_command(['easy_install', 'virtualenv']):
+ print 'Succeeded'
+ return
+ else:
+ print 'Failed'
+
+ self.die('ERROR: virtualenv not found.\n\n%s development'
+ ' requires virtualenv, please install it using your'
+ ' favorite package management tool' % self.project)
+
+ def post_process(self):
+ """Any distribution-specific post-processing gets done here.
+
+ In particular, this is useful for applying patches to code inside
+ the venv.
+ """
+ pass
+
+
+class Fedora(Distro):
+ """This covers all Fedora-based distributions.
+
+ Includes: Fedora, RHEL, CentOS, Scientific Linux
+ """
+
+ def check_pkg(self, pkg):
+ return self.run_command_with_code(['rpm', '-q', pkg],
+ check_exit_code=False)[1] == 0
+
+ def yum_install(self, pkg, **kwargs):
+ print "Attempting to install '%s' via yum" % pkg
+ self.run_command(['sudo', 'yum', 'install', '-y', pkg], **kwargs)
+
+ def apply_patch(self, originalfile, patchfile):
+ self.run_command(['patch', originalfile, patchfile])
+
+ def install_virtualenv(self):
+ if self.check_cmd('virtualenv'):
+ return
+
+ if not self.check_pkg('python-virtualenv'):
+ self.yum_install('python-virtualenv', check_exit_code=False)
+
+ super(Fedora, self).install_virtualenv()
+
+ def post_process(self):
+ """Workaround for a bug in eventlet.
+
+ This currently affects RHEL6.1, but the fix can safely be
+ applied to all RHEL and Fedora distributions.
+
+ This can be removed when the fix is applied upstream.
+
+ Nova: https://bugs.launchpad.net/nova/+bug/884915
+ Upstream: https://bitbucket.org/which_linden/eventlet/issue/89
+ """
+
+ # Install "patch" program if it's not there
+ if not self.check_pkg('patch'):
+ self.yum_install('patch')
+
+ # Apply the eventlet patch
+ self.apply_patch(os.path.join(self.venv, 'lib', self.py_version,
+ 'site-packages',
+ 'eventlet/green/subprocess.py'),
+ 'contrib/redhat-eventlet.patch')
diff --git a/tools/pip-requires b/tools/pip-requires
index 231d5cfe5..d7e48ff87 100644
--- a/tools/pip-requires
+++ b/tools/pip-requires
@@ -16,11 +16,12 @@ sqlalchemy-migrate>=0.7.2
netaddr
suds==0.4
paramiko
+pyasn1
Babel>=0.9.6
iso8601>=0.1.4
httplib2
setuptools_git>=0.4
-python-cinderclient
+python-cinderclient>=1.0.1
python-quantumclient>=2.1
python-glanceclient>=0.5.0,<2
python-keystoneclient>=0.2.0
diff --git a/tools/regression_tester.py b/tools/regression_tester.py
new file mode 100755
index 000000000..554260fbd
--- /dev/null
+++ b/tools/regression_tester.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+
+"""Tool for checking if patch contains a regression test.
+
+Pass in gerrit review number as parameter, tool will download branch and run
+modified tests without bug fix.
+"""
+
+import string
+import subprocess
+import sys
+
+#TODO(jogo) use proper optParser
+gerrit_number = sys.argv[1]
+
+
+def run(cmd, fail_ok=False):
+ print "running: %s" % cmd
+ try:
+ rval = subprocess.check_output(cmd, shell=True)
+ except subprocess.CalledProcessError:
+ if not fail_ok:
+ print "the above command terminated with an error"
+ sys.exit(1)
+ pass
+ return rval
+
+
+test_works = False
+
+original_branch = run("git rev-parse --abbrev-ref HEAD")
+run("git review -d %s" % gerrit_number)
+# run new tests with old code
+run("git checkout HEAD^ nova")
+run("git checkout HEAD nova/tests")
+
+# identify which tests have changed
+tests = run("git whatchanged --format=oneline -1 | grep \"nova/tests\" "
+ "| cut -f2").split()
+test_list = []
+for test in tests:
+ test_list.append(string.replace(test[0:-3], '/', '.'))
+
+# run new tests, expect them to fail
+expect_failure = run(("tox -epy27 %s 2>&1" % string.join(test_list)),
+ fail_ok=True)
+if "FAILED (id=" in expect_failure:
+ test_works = True
+
+# cleanup
+run("git checkout HEAD nova")
+new_branch = run("git status | head -1 | cut -d ' ' -f 4")
+run("git checkout %s" % original_branch)
+run("git branch -D %s" % new_branch)
+
+
+if test_works:
+ print expect_failure
+ print ""
+ print "*******************************"
+ print "SUCCESS: test covers regression"
+else:
+ print expect_failure
+ print ""
+ print "***************************************"
+ print "FAILURE: test does not cover regression"
+ sys.exit(1)
diff --git a/tools/test-requires b/tools/test-requires
index fce1bc8f1..49ee52809 100644
--- a/tools/test-requires
+++ b/tools/test-requires
@@ -7,6 +7,7 @@ feedparser
fixtures>=0.3.12
mox==0.5.3
MySQL-python
+psycopg2
pep8==1.3.3
pylint==0.25.2
python-subunit
diff --git a/tools/unused_imports.sh b/tools/unused_imports.sh
new file mode 100755
index 000000000..0e0294517
--- /dev/null
+++ b/tools/unused_imports.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+#snakefood sfood-checker detects even more unused imports
+! pyflakes nova/ | grep "imported but unused"
diff --git a/tools/with_venv.sh b/tools/with_venv.sh
index 550c4774e..94e05c127 100755
--- a/tools/with_venv.sh
+++ b/tools/with_venv.sh
@@ -1,4 +1,7 @@
#!/bin/bash
-TOOLS=`dirname $0`
-VENV=$TOOLS/../.venv
-source $VENV/bin/activate && "$@"
+tools_path=${tools_path:-$(dirname $0)}
+venv_path=${venv_path:-${tools_path}}
+venv_dir=${venv_name:-/../.venv}
+TOOLS=${tools_path}
+VENV=${venv:-${venv_path}/${venv_dir}}
+source ${VENV}/bin/activate && "$@"
diff --git a/tox.ini b/tox.ini
index e98f30151..f54865601 100644
--- a/tox.ini
+++ b/tox.ini
@@ -15,13 +15,16 @@ sitepackages = True
downloadcache = ~/cache/pip
[testenv:pep8]
-deps=pep8==1.3.3
+deps=
+ pep8==1.3.3
+ pyflakes
commands =
python tools/hacking.py --doctest
python tools/hacking.py --ignore=E12,E711,E721,E712,N403,N404 --show-source \
- --exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg .
+ --exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build .
python tools/hacking.py --ignore=E12,E711,E721,E712,N403,N404 --show-source \
--filename=nova* bin
+ bash tools/unused_imports.sh
[testenv:pylint]
setenv = VIRTUAL_ENV={envdir}